kernel/eka/memmodel/epoc/mmubase/mmubase.cpp
changeset 0 a41df078684a
child 13 46fffbe7b5a7
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32\memmodel\epoc\mmubase\mmubase.cpp
       
    15 // 
       
    16 //
       
    17 
       
    18 #include <memmodel/epoc/mmubase/mmubase.h>
       
    19 #include <mmubase.inl>
       
    20 #include <ramcache.h>
       
    21 #include <demand_paging.h>
       
    22 #include "cache_maintenance.h"
       
    23 #include "highrestimer.h"
       
    24 #include <defrag.h>
       
    25 #include <ramalloc.h>
       
    26 
       
    27 
       
    28 __ASSERT_COMPILE(sizeof(SPageInfo)==(1<<KPageInfoShift));
       
    29 
       
    30 _LIT(KLitRamAlloc,"RamAlloc");
       
    31 _LIT(KLitHwChunk,"HwChunk");
       
    32 
       
    33 
       
    34 DMutex* MmuBase::HwChunkMutex;
       
    35 DMutex* MmuBase::RamAllocatorMutex;
       
    36 #ifdef BTRACE_KERNEL_MEMORY
       
    37 TInt   Epoc::DriverAllocdPhysRam = 0;
       
    38 TInt   Epoc::KernelMiscPages = 0;
       
    39 #endif
       
    40 
       
    41 /******************************************************************************
       
    42  * Code common to all MMU memory models
       
    43  ******************************************************************************/
       
    44 
       
    45 const TInt KFreePagesStepSize=16;
       
    46 
       
    47 void MmuBase::Panic(TPanic aPanic)
       
    48 	{
       
    49 	Kern::Fault("MMUBASE",aPanic);
       
    50 	}
       
    51 
       
    52 void SPageInfo::Lock()
       
    53 	{
       
    54 	CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::Lock");
       
    55 	++iLockCount;
       
    56 	if(!iLockCount)
       
    57 		MmuBase::Panic(MmuBase::EPageLockedTooManyTimes);
       
    58 	}
       
    59 
       
    60 TInt SPageInfo::Unlock()
       
    61 	{
       
    62 	CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::Unlock");
       
    63 	if(!iLockCount)
       
    64 		MmuBase::Panic(MmuBase::EPageUnlockedTooManyTimes);
       
    65 	return --iLockCount;
       
    66 	}
       
    67 
       
    68 #ifdef _DEBUG
       
    69 void SPageInfo::Set(TType aType, TAny* aOwner, TUint32 aOffset)
       
    70 	{
       
    71 	CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::Set");
       
    72 	(TUint16&)iType = aType; // also sets iState to EStateNormal
       
    73 	
       
    74 	iOwner = aOwner;
       
    75 	iOffset = aOffset;
       
    76 	iModifier = 0;
       
    77 	}
       
    78 
       
    79 void SPageInfo::Change(TType aType,TState aState)
       
    80 	{
       
    81 	CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::Change");
       
    82 	iType = aType;
       
    83 	iState = aState;
       
    84 	iModifier = 0;
       
    85 	}
       
    86 
       
    87 void SPageInfo::SetState(TState aState)
       
    88 	{
       
    89 	CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::SetState");
       
    90 	iState = aState;
       
    91 	iModifier = 0;
       
    92 	}
       
    93 
       
    94 void SPageInfo::SetModifier(TAny* aModifier)
       
    95 	{
       
    96 	CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::SetModifier");
       
    97 	iModifier = aModifier;
       
    98 	}
       
    99 
       
   100 TInt SPageInfo::CheckModified(TAny* aModifier)
       
   101 	{
       
   102 	CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::CheckModified");
       
   103 	return iModifier!=aModifier;
       
   104 	}
       
   105 
       
   106 void SPageInfo::SetZone(TUint8 aZoneIndex)
       
   107 	{
       
   108 	__ASSERT_ALWAYS(K::Initialising,Kern::Fault("SPageInfo::SetZone",0));
       
   109 	iZone = aZoneIndex;
       
   110 	}
       
   111 
       
   112 
       
   113 #endif
       
   114 
       
   115 MmuBase::MmuBase()
       
   116 	: iRamCache(NULL), iDefrag(NULL)
       
   117 	{
       
   118 	}
       
   119 
       
   120 TUint32 MmuBase::RoundToPageSize(TUint32 aSize)
       
   121 	{
       
   122 	return (aSize+KPageMask)&~KPageMask;
       
   123 	}
       
   124 
       
   125 TUint32 MmuBase::RoundToChunkSize(TUint32 aSize)
       
   126 	{
       
   127 	TUint32 mask=TheMmu->iChunkMask;
       
   128 	return (aSize+mask)&~mask;
       
   129 	}
       
   130 
       
   131 TInt MmuBase::RoundUpRangeToPageSize(TUint32& aBase, TUint32& aSize)
       
   132 	{
       
   133 	TUint32 mask=KPageMask;
       
   134 	TUint32 shift=KPageShift;
       
   135 	TUint32 offset=aBase&mask;
       
   136 	aBase&=~mask;
       
   137 	aSize=(aSize+offset+mask)&~mask;
       
   138 	return TInt(aSize>>shift);
       
   139 	}
       
   140 
       
   141 void MmuBase::Wait()
       
   142 	{
       
   143 	Kern::MutexWait(*RamAllocatorMutex);
       
   144 	if (RamAllocatorMutex->iHoldCount==1)
       
   145 		{
       
   146 		MmuBase& m=*TheMmu;
       
   147 		m.iInitialFreeMemory=Kern::FreeRamInBytes();
       
   148 		m.iAllocFailed=EFalse;
       
   149 		}
       
   150 	}
       
   151 
       
   152 void MmuBase::Signal()
       
   153 	{
       
   154 	if (RamAllocatorMutex->iHoldCount>1)
       
   155 		{
       
   156 		Kern::MutexSignal(*RamAllocatorMutex);
       
   157 		return;
       
   158 		}
       
   159 	MmuBase& m=*TheMmu;
       
   160 	TInt initial=m.iInitialFreeMemory;
       
   161 	TBool failed=m.iAllocFailed;
       
   162 	TInt final=Kern::FreeRamInBytes();
       
   163 	Kern::MutexSignal(*RamAllocatorMutex);
       
   164 	K::CheckFreeMemoryLevel(initial,final,failed);
       
   165 	}
       
   166 
       
   167 void MmuBase::WaitHwChunk()
       
   168 	{
       
   169 	Kern::MutexWait(*HwChunkMutex);
       
   170 	}
       
   171 
       
   172 void MmuBase::SignalHwChunk()
       
   173 	{
       
   174 	Kern::MutexSignal(*HwChunkMutex);
       
   175 	}
       
   176 
       
   177 
       
   178 void MmuBase::MapRamPage(TLinAddr aAddr, TPhysAddr aPage, TPte aPtePerm)
       
   179 	{
       
   180 	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MapRamPage %08x@%08x perm %08x", aPage, aAddr, aPtePerm));
       
   181 	TInt ptid=PageTableId(aAddr);
       
   182 	NKern::LockSystem();
       
   183 	MapRamPages(ptid,SPageInfo::EInvalid,0,aAddr,&aPage,1,aPtePerm);
       
   184 	NKern::UnlockSystem();
       
   185 	}
       
   186 
       
   187 //
       
   188 // Unmap and free pages from a global area
       
   189 //
       
   190 void MmuBase::UnmapAndFree(TLinAddr aAddr, TInt aNumPages)
       
   191 	{
       
   192 	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::UnmapAndFree(%08x,%d)",aAddr,aNumPages));
       
   193 	while(aNumPages)
       
   194 		{
       
   195 		TInt pt_np=(iChunkSize-(aAddr&iChunkMask))>>iPageShift;
       
   196 		TInt np=Min(aNumPages,pt_np);
       
   197 		aNumPages-=np;
       
   198 		TInt id=PageTableId(aAddr);
       
   199 		if (id>=0)
       
   200 			{
       
   201 			while(np)
       
   202 				{
       
   203 				TInt np2=Min(np,KFreePagesStepSize);
       
   204 				TPhysAddr phys[KFreePagesStepSize];
       
   205 				TInt nptes;
       
   206 				TInt nfree;
       
   207 				NKern::LockSystem();
       
   208 				UnmapPages(id,aAddr,np2,phys,true,nptes,nfree,NULL);
       
   209 				NKern::UnlockSystem();
       
   210 				if (nfree)
       
   211 					{
       
   212 					if (iDecommitThreshold)
       
   213 						CacheMaintenanceOnDecommit(phys, nfree);
       
   214 					iRamPageAllocator->FreeRamPages(phys,nfree,EPageFixed);
       
   215 					}
       
   216 				np-=np2;
       
   217 				aAddr+=(np2<<iPageShift);
       
   218 				}
       
   219 			}
       
   220 		else
       
   221 			{
       
   222 			aAddr+=(np<<iPageShift);
       
   223 			}
       
   224 		}
       
   225 	}
       
   226 
       
   227 void MmuBase::FreePages(TPhysAddr* aPageList, TInt aCount, TZonePageType aPageType)
       
   228 	{
       
   229 	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::FreePages(%08x,%d)",aPageList,aCount));
       
   230 	if (!aCount)
       
   231 		return;
       
   232 	TBool sync_decommit = (TUint(aCount)<iDecommitThreshold);
       
   233 	TPhysAddr* ppa=aPageList;
       
   234 	TPhysAddr* ppaE=ppa+aCount;
       
   235 	NKern::LockSystem();
       
   236 	while (ppa<ppaE)
       
   237 		{
       
   238 		TPhysAddr pa=*ppa++;
       
   239 		SPageInfo* pi=SPageInfo::SafeFromPhysAddr(pa);
       
   240 		if (pi)
       
   241 			{
       
   242 			pi->SetUnused();
       
   243 			if (pi->LockCount())
       
   244 				ppa[-1]=KPhysAddrInvalid;	// don't free page if it's locked down
       
   245 			else if (sync_decommit)
       
   246 				{
       
   247 				NKern::UnlockSystem();
       
   248 				CacheMaintenanceOnDecommit(pa);
       
   249 				NKern::LockSystem();
       
   250 				}
       
   251 			}
       
   252 		if (!sync_decommit)
       
   253 			NKern::FlashSystem();
       
   254 		}
       
   255 	NKern::UnlockSystem();
       
   256 	if (iDecommitThreshold && !sync_decommit)
       
   257 		CacheMaintenance::SyncPhysicalCache_All();
       
   258 	iRamPageAllocator->FreeRamPages(aPageList,aCount, aPageType);
       
   259 	}
       
   260 
       
   261 TInt MmuBase::InitPageTableInfo(TInt aId)
       
   262 	{
       
   263 	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::InitPageTableInfo(%x)",aId));
       
   264 	TInt ptb=aId>>iPtBlockShift;
       
   265 	if (++iPtBlockCount[ptb]==1)
       
   266 		{
       
   267 		// expand page table info array
       
   268 		TPhysAddr pagePhys;
       
   269 		if (AllocRamPages(&pagePhys,1, EPageFixed)!=KErrNone)
       
   270 			{
       
   271 			__KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page"));
       
   272 			iPtBlockCount[ptb]=0;
       
   273 			iAllocFailed=ETrue;
       
   274 			return KErrNoMemory;
       
   275 			}
       
   276 #ifdef BTRACE_KERNEL_MEMORY
       
   277 		BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, 1<<KPageShift);
       
   278 		++Epoc::KernelMiscPages;
       
   279 #endif
       
   280 		TLinAddr pil=PtInfoBlockLinAddr(ptb);
       
   281 		NKern::LockSystem();
       
   282 		SPageInfo::FromPhysAddr(pagePhys)->SetPtInfo(ptb);
       
   283 		NKern::UnlockSystem();
       
   284 		MapRamPage(pil, pagePhys, iPtInfoPtePerm);
       
   285 		memclr((TAny*)pil, iPageSize);
       
   286 		}
       
   287 	return KErrNone;
       
   288 	}
       
   289 
       
   290 TInt MmuBase::DoAllocPageTable(TPhysAddr& aPhysAddr)
       
   291 //
       
   292 // Allocate a new page table but don't map it.
       
   293 // Return page table id and page number/phys address of new page if any.
       
   294 //
       
   295 	{
       
   296 	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::DoAllocPageTable()"));
       
   297 #ifdef _DEBUG
       
   298 	if(K::CheckForSimulatedAllocFail())
       
   299 		return KErrNoMemory;
       
   300 #endif
       
   301 	TInt id=iPageTableAllocator?iPageTableAllocator->Alloc():-1;
       
   302 	if (id<0)
       
   303 		{
       
   304 		// need to allocate a new page
       
   305 		if (AllocRamPages(&aPhysAddr,1, EPageFixed)!=KErrNone)
       
   306 			{
       
   307 			__KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page"));
       
   308 			iAllocFailed=ETrue;
       
   309 			return KErrNoMemory;
       
   310 			}
       
   311 
       
   312 		// allocate an ID for the new page
       
   313 		id=iPageTableLinearAllocator->Alloc();
       
   314 		if (id>=0)
       
   315 			{
       
   316 			id<<=iPtClusterShift;
       
   317 			__KTRACE_OPT(KMMU,Kern::Printf("Allocated ID %04x",id));
       
   318 			}
       
   319 		if (id<0 || InitPageTableInfo(id)!=KErrNone)
       
   320 			{
       
   321 			__KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page table info"));
       
   322 			iPageTableLinearAllocator->Free(id>>iPtClusterShift);
       
   323 			if (iDecommitThreshold)
       
   324 				CacheMaintenanceOnDecommit(aPhysAddr);
       
   325 
       
   326 			iRamPageAllocator->FreeRamPage(aPhysAddr, EPageFixed);
       
   327 			iAllocFailed=ETrue;
       
   328 			return KErrNoMemory;
       
   329 			}
       
   330 
       
   331 		// Set up page info for new page
       
   332 		NKern::LockSystem();
       
   333 		SPageInfo::FromPhysAddr(aPhysAddr)->SetPageTable(id>>iPtClusterShift);
       
   334 		NKern::UnlockSystem();
       
   335 #ifdef BTRACE_KERNEL_MEMORY
       
   336 		BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, 1<<KPageShift);
       
   337 		++Epoc::KernelMiscPages;
       
   338 #endif
       
   339 		// mark all subpages other than first as free for use as page tables
       
   340 		if (iPtClusterSize>1)
       
   341 			iPageTableAllocator->Free(id+1,iPtClusterSize-1);
       
   342 		}
       
   343 	else
       
   344 		aPhysAddr=KPhysAddrInvalid;
       
   345 
       
   346 	__KTRACE_OPT(KMMU,Kern::Printf("DoAllocPageTable returns %d (%08x)",id,aPhysAddr));
       
   347 	PtInfo(id).SetUnused();
       
   348 	return id;
       
   349 	}
       
   350 
       
   351 TInt MmuBase::MapPageTable(TInt aId, TPhysAddr aPhysAddr, TBool aAllowExpand)
       
   352 	{
       
   353 	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MapPageTable(%d,%08x)",aId,aPhysAddr));
       
   354 	TLinAddr ptLin=PageTableLinAddr(aId);
       
   355 	TInt ptg=aId>>iPtGroupShift;
       
   356 	if (++iPtGroupCount[ptg]==1)
       
   357 		{
       
   358 		// need to allocate a new page table
       
   359 		__ASSERT_ALWAYS(aAllowExpand, Panic(EMapPageTableBadExpand));
       
   360 		TPhysAddr xptPhys;
       
   361 		TInt xptid=DoAllocPageTable(xptPhys);
       
   362 		if (xptid<0)
       
   363 			{
       
   364 			__KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate extra page table"));
       
   365 			iPtGroupCount[ptg]=0;
       
   366 			return KErrNoMemory;
       
   367 			}
       
   368 		if (xptPhys==KPhysAddrInvalid)
       
   369 			xptPhys=aPhysAddr + ((xptid-aId)<<iPageTableShift);
       
   370 		BootstrapPageTable(xptid, xptPhys, aId, aPhysAddr);	// initialise XPT and map it
       
   371 		}
       
   372 	else
       
   373 		MapRamPage(ptLin, aPhysAddr, iPtPtePerm);
       
   374 	return KErrNone;
       
   375 	}
       
   376 
       
   377 TInt MmuBase::AllocPageTable()
       
   378 //
       
   379 // Allocate a new page table, mapped at the correct linear address.
       
   380 // Clear all entries to Not Present. Return page table id.
       
   381 //
       
   382 	{
       
   383 	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::AllocPageTable()"));
       
   384 	__ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
       
   385 
       
   386 	TPhysAddr ptPhys;
       
   387 	TInt id=DoAllocPageTable(ptPhys);
       
   388 	if (id<0)
       
   389 		return KErrNoMemory;
       
   390 	if (ptPhys!=KPhysAddrInvalid)
       
   391 		{
       
   392 		TInt r=MapPageTable(id,ptPhys);
       
   393 		if (r!=KErrNone)
       
   394 			{
       
   395 			DoFreePageTable(id);
       
   396 			SPageInfo* pi=SPageInfo::FromPhysAddr(ptPhys);
       
   397 			NKern::LockSystem();
       
   398 			pi->SetUnused();
       
   399 			NKern::UnlockSystem();
       
   400 			if (iDecommitThreshold)
       
   401 				CacheMaintenanceOnDecommit(ptPhys);
       
   402 
       
   403 			iRamPageAllocator->FreeRamPage(ptPhys, EPageFixed);
       
   404 			return r;
       
   405 			}
       
   406 		}
       
   407 	ClearPageTable(id);
       
   408 	__KTRACE_OPT(KMMU,Kern::Printf("AllocPageTable returns %d",id));
       
   409 	return id;
       
   410 	}
       
   411 
       
   412 TBool MmuBase::DoFreePageTable(TInt aId)
       
   413 //
       
   414 // Free an empty page table. We assume that all pages mapped by the page table have
       
   415 // already been unmapped and freed.
       
   416 //
       
   417 	{
       
   418 	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::DoFreePageTable(%d)",aId));
       
   419 	SPageTableInfo& s=PtInfo(aId);
       
   420 	__NK_ASSERT_DEBUG(!s.iCount); // shouldn't have any pages mapped
       
   421 	s.SetUnused();
       
   422 
       
   423 	TInt id=aId &~ iPtClusterMask;
       
   424 	if (iPageTableAllocator)
       
   425 		{
       
   426 		iPageTableAllocator->Free(aId);
       
   427 		if (iPageTableAllocator->NotFree(id,iPtClusterSize))
       
   428 			{
       
   429 			// some subpages still in use
       
   430 			return ETrue;
       
   431 			}
       
   432 		__KTRACE_OPT(KMMU,Kern::Printf("Freeing whole page, id=%d",id));
       
   433 		// whole page is now free
       
   434 		// remove it from the page table allocator
       
   435 		iPageTableAllocator->Alloc(id,iPtClusterSize);
       
   436 		}
       
   437 
       
   438 	TInt ptb=aId>>iPtBlockShift;
       
   439 	if (--iPtBlockCount[ptb]==0)
       
   440 		{
       
   441 		// shrink page table info array
       
   442 		TLinAddr pil=PtInfoBlockLinAddr(ptb);
       
   443 		UnmapAndFree(pil,1);	// remove PTE, null page info, free page
       
   444 #ifdef BTRACE_KERNEL_MEMORY
       
   445 		BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscFree, 1<<KPageShift);
       
   446 		--Epoc::KernelMiscPages;
       
   447 #endif
       
   448 		}
       
   449 
       
   450 	// free the page table linear address
       
   451 	iPageTableLinearAllocator->Free(id>>iPtClusterShift);
       
   452 	return EFalse;
       
   453 	}
       
   454 
       
   455 void MmuBase::FreePageTable(TInt aId)
       
   456 //
       
   457 // Free an empty page table. We assume that all pages mapped by the page table have
       
   458 // already been unmapped and freed.
       
   459 //
       
   460 	{
       
   461 	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::FreePageTable(%d)",aId));
       
   462 	if (DoFreePageTable(aId))
       
   463 		return;
       
   464 
       
   465 	TInt id=aId &~ iPtClusterMask;
       
   466 
       
   467 	// calculate linear address of page
       
   468 	TLinAddr ptLin=PageTableLinAddr(id);
       
   469 	__KTRACE_OPT(KMMU,Kern::Printf("Page lin %08x",ptLin));
       
   470 
       
   471 	// unmap and free the page
       
   472 	UnmapAndFree(ptLin,1);
       
   473 #ifdef BTRACE_KERNEL_MEMORY
       
   474 	BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscFree, 1<<KPageShift);
       
   475 	--Epoc::KernelMiscPages;
       
   476 #endif
       
   477 
       
   478 	TInt ptg=aId>>iPtGroupShift;
       
   479 	--iPtGroupCount[ptg];
       
   480 	// don't shrink the page table mapping for now
       
   481 	}
       
   482 
       
   483 TInt MmuBase::AllocPhysicalRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
       
   484 	{
       
   485 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam() size=%x align=%d",aSize,aAlign));
       
   486 	TInt r=AllocContiguousRam(aSize, aPhysAddr, EPageFixed, aAlign);
       
   487 	if (r!=KErrNone)
       
   488 		{
       
   489 		iAllocFailed=ETrue;
       
   490 		return r;
       
   491 		}
       
   492 	TInt n=TInt(TUint32(aSize+iPageMask)>>iPageShift);
       
   493 	SPageInfo* pI=SPageInfo::FromPhysAddr(aPhysAddr);
       
   494 	SPageInfo* pE=pI+n;
       
   495 	for (; pI<pE; ++pI)
       
   496 		{
       
   497 		NKern::LockSystem();
       
   498 		__NK_ASSERT_DEBUG(pI->Type()==SPageInfo::EUnused);
       
   499 		pI->Lock();
       
   500 		NKern::UnlockSystem();
       
   501 		}
       
   502 	return KErrNone;
       
   503 	}
       
   504 
       
   505 /** Attempt to allocate a contiguous block of RAM from the specified zone.
       
   506 
       
   507 @param aZoneIdList	An array of the IDs of the RAM zones to allocate from.
       
   508 @param aZoneIdCount	The number of RAM zone IDs listed in aZoneIdList.
       
   509 @param aSize 		The number of contiguous bytes to allocate
       
   510 @param aPhysAddr 	The physical address of the start of the contiguous block of 
       
   511 					memory allocated
       
   512 @param aAlign		Required alignment
       
   513 @return KErrNone on success, KErrArgument if zone doesn't exist or aSize is larger than the
       
   514 size of the RAM zone or KErrNoMemory when the RAM zone is too full.
       
   515 */
       
   516 TInt MmuBase::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
       
   517 	{
       
   518 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam() size=0x%x align=%d", aSize, aAlign));
       
   519 	TInt r = ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, EPageFixed, aAlign);
       
   520 	if (r!=KErrNone)
       
   521 		{
       
   522 		iAllocFailed=ETrue;
       
   523 		return r;
       
   524 		}
       
   525 	TInt n=TInt(TUint32(aSize+iPageMask)>>iPageShift);
       
   526 	SPageInfo* pI=SPageInfo::FromPhysAddr(aPhysAddr);
       
   527 	SPageInfo* pE=pI+n;
       
   528 	for (; pI<pE; ++pI)
       
   529 		{
       
   530 		NKern::LockSystem();
       
   531 		__NK_ASSERT_DEBUG(pI->Type()==SPageInfo::EUnused);
       
   532 		pI->Lock();
       
   533 		NKern::UnlockSystem();
       
   534 		}
       
   535 	return KErrNone;
       
   536 	}
       
   537 
       
   538 
       
   539 /** Attempt to allocate discontiguous RAM pages.
       
   540 
       
   541 @param aNumPages	The number of pages to allocate.
       
   542 @param aPageList 	Pointer to an array where each element will be the physical 
       
   543 					address of each page allocated.
       
   544 @return KErrNone on success, KErrNoMemory otherwise
       
   545 */
       
   546 TInt MmuBase::AllocPhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
       
   547 	{
       
   548 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam() numpages=%x", aNumPages));
       
   549 	TInt r = AllocRamPages(aPageList, aNumPages, EPageFixed);
       
   550 	if (r!=KErrNone)
       
   551 		{
       
   552 		iAllocFailed=ETrue;
       
   553 		return r;
       
   554 		}
       
   555 	TPhysAddr* pageEnd = aPageList + aNumPages;
       
   556 	for (TPhysAddr* page = aPageList; page < pageEnd; page++)
       
   557 		{
       
   558 		SPageInfo* pageInfo = SPageInfo::FromPhysAddr(*page);
       
   559 		NKern::LockSystem();
       
   560 		__NK_ASSERT_DEBUG(pageInfo->Type() == SPageInfo::EUnused);
       
   561 		pageInfo->Lock();
       
   562 		NKern::UnlockSystem();
       
   563 		}
       
   564 	return KErrNone;
       
   565 	}
       
   566 
       
   567 
       
   568 /** Attempt to allocate discontiguous RAM pages from the specified RAM zones.
       
   569 
       
   570 @param aZoneIdList	An array of the IDs of the RAM zones to allocate from.
       
   571 @param aZoneIdCount	The number of RAM zone IDs listed in aZoneIdList.
       
   572 @param aNumPages	The number of pages to allocate.
       
   573 @param aPageList 	Pointer to an array where each element will be the physical 
       
   574 					address of each page allocated.
       
   575 @return KErrNone on success, KErrArgument if zone doesn't exist or aNumPages is 
       
   576 larger than the total number of pages in the RAM zone or KErrNoMemory when the RAM 
       
   577 zone is too full.
       
   578 */
       
   579 TInt MmuBase::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList)
       
   580 	{
       
   581 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam() numpages 0x%x zones 0x%x", aNumPages, aZoneIdCount));
       
   582 	TInt r = ZoneAllocRamPages(aZoneIdList, aZoneIdCount, aPageList, aNumPages, EPageFixed);
       
   583 	if (r!=KErrNone)
       
   584 		{
       
   585 		iAllocFailed=ETrue;
       
   586 		return r;
       
   587 		}
       
   588 
       
   589 	TPhysAddr* pageEnd = aPageList + aNumPages;
       
   590 	for (TPhysAddr* page = aPageList; page < pageEnd; page++)
       
   591 		{
       
   592 		SPageInfo* pageInfo = SPageInfo::FromPhysAddr(*page);
       
   593 		NKern::LockSystem();
       
   594 		__NK_ASSERT_DEBUG(pageInfo->Type() == SPageInfo::EUnused);
       
   595 		pageInfo->Lock();
       
   596 		NKern::UnlockSystem();
       
   597 		}
       
   598 	return KErrNone;
       
   599 	}
       
   600 
       
   601 
       
   602 TInt MmuBase::FreePhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
       
   603 	{
       
   604 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(%08x,%x)",aPhysAddr,aSize));
       
   605 
       
   606 	TInt n=TInt(TUint32(aSize+iPageMask)>>iPageShift);
       
   607 	SPageInfo* pI=SPageInfo::FromPhysAddr(aPhysAddr);
       
   608 	SPageInfo* pE=pI+n;
       
   609 	for (; pI<pE; ++pI)
       
   610 		{
       
   611 		NKern::LockSystem();
       
   612 		__ASSERT_ALWAYS(pI->Type()==SPageInfo::EUnused && pI->Unlock()==0, Panic(EBadFreePhysicalRam));
       
   613 		NKern::UnlockSystem();
       
   614 		}
       
   615 	TInt r=iRamPageAllocator->FreePhysicalRam(aPhysAddr, aSize);
       
   616 	return r;
       
   617 	}
       
   618 
       
   619 /** Free discontiguous RAM pages that were previously allocated using discontiguous
       
   620 overload of MmuBase::AllocPhysicalRam() or MmuBase::ZoneAllocPhysicalRam().
       
   621 
       
   622 Specifying one of the following may cause the system to panic: 
       
   623 a) an invalid physical RAM address.
       
   624 b) valid physical RAM addresses where some had not been previously allocated.
       
   625 c) an adrress not aligned to a page boundary.
       
   626 
       
   627 @param aNumPages	Number of pages to free
       
   628 @param aPageList	Array of the physical address of each page to free
       
   629 
       
   630 @return KErrNone if the operation was successful.
       
   631 		
       
   632 */
       
   633 TInt MmuBase::FreePhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
       
   634 	{
       
   635 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(%08x,%08x)", aNumPages, aPageList));
       
   636 	
       
   637 	TPhysAddr* pageEnd = aPageList + aNumPages;
       
   638 	TInt r = KErrNone;
       
   639 
       
   640 	for (TPhysAddr* page = aPageList; page < pageEnd && r == KErrNone; page++)
       
   641 		{
       
   642 		SPageInfo* pageInfo = SPageInfo::FromPhysAddr(*page);
       
   643 		NKern::LockSystem();
       
   644 		__ASSERT_ALWAYS(pageInfo->Type()==SPageInfo::EUnused && pageInfo->Unlock()==0, Panic(EBadFreePhysicalRam));
       
   645 		NKern::UnlockSystem();
       
   646 		
       
   647 		// Free the page
       
   648 		r = iRamPageAllocator->FreePhysicalRam(*page, KPageSize);
       
   649 		}
       
   650 	return r;
       
   651 	}
       
   652 
       
   653 
       
   654 TInt MmuBase::ClaimPhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
       
   655 	{
       
   656 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ClaimPhysicalRam(%08x,%x)",aPhysAddr,aSize));
       
   657 	TUint32 pa=aPhysAddr;
       
   658 	TUint32 size=aSize;
       
   659 	TInt n=RoundUpRangeToPageSize(pa,size);
       
   660 	TInt r=iRamPageAllocator->ClaimPhysicalRam(pa, size);
       
   661 	if (r==KErrNone)
       
   662 		{
       
   663 		SPageInfo* pI=SPageInfo::FromPhysAddr(pa);
       
   664 		SPageInfo* pE=pI+n;
       
   665 		for (; pI<pE; ++pI)
       
   666 			{
       
   667 			NKern::LockSystem();
       
   668 			__NK_ASSERT_DEBUG(pI->Type()==SPageInfo::EUnused && pI->LockCount()==0);
       
   669 			pI->Lock();
       
   670 			NKern::UnlockSystem();
       
   671 			}
       
   672 		}
       
   673 	return r;
       
   674 	}
       
   675 
       
   676 /** 
       
   677 Allocate a set of discontiguous RAM pages from the specified zone.
       
   678 
       
   679 @param aZoneIdList	The array of IDs of the RAM zones to allocate from.
       
   680 @param aZoneIdCount	The number of RAM zone IDs in aZoneIdList.
       
   681 @param aPageList 	Preallocated array of TPhysAddr elements that will receive the
       
   682 physical address of each page allocated.
       
   683 @param aNumPages 	The number of pages to allocate.
       
   684 @param aPageType 	The type of the pages being allocated.
       
   685 
       
   686 @return KErrNone on success, KErrArgument if a zone of aZoneIdList doesn't exist, 
       
   687 KErrNoMemory if there aren't enough free pages in the zone
       
   688 */
       
   689 TInt MmuBase::ZoneAllocRamPages(TUint* aZoneIdList, TUint aZoneIdCount, TPhysAddr* aPageList, TInt aNumPages, TZonePageType aPageType)
       
   690 	{
       
   691 #ifdef _DEBUG
       
   692 	if(K::CheckForSimulatedAllocFail())
       
   693 		return KErrNoMemory;
       
   694 #endif
       
   695 	__NK_ASSERT_DEBUG(aPageType == EPageFixed);
       
   696 
       
   697 	return iRamPageAllocator->ZoneAllocRamPages(aZoneIdList, aZoneIdCount, aPageList, aNumPages, aPageType);
       
   698 	}
       
   699 
       
   700 
       
   701 TInt MmuBase::AllocRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aPageType, TUint aBlockedZoneId, TBool aBlockRest)
       
   702 	{
       
   703 #ifdef _DEBUG
       
   704 	if(K::CheckForSimulatedAllocFail())
       
   705 		return KErrNoMemory;
       
   706 #endif
       
   707 	TInt missing = iRamPageAllocator->AllocRamPages(aPageList, aNumPages, aPageType, aBlockedZoneId, aBlockRest);
       
   708 
       
   709 	// If missing some pages, ask the RAM cache to donate some of its pages.
       
   710 	// Don't ask it for discardable pages as those are intended for itself.
       
   711 	if(missing && aPageType != EPageDiscard && iRamCache->GetFreePages(missing))
       
   712 		missing = iRamPageAllocator->AllocRamPages(aPageList, aNumPages, aPageType, aBlockedZoneId, aBlockRest);
       
   713 	return missing ? KErrNoMemory : KErrNone;
       
   714 	}
       
   715 
       
   716 
       
   717 TInt MmuBase::AllocContiguousRam(TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign, TUint aBlockedZoneId, TBool aBlockRest)
       
   718 	{
       
   719 #ifdef _DEBUG
       
   720 	if(K::CheckForSimulatedAllocFail())
       
   721 		return KErrNoMemory;
       
   722 #endif
       
   723 	__NK_ASSERT_DEBUG(aPageType == EPageFixed);
       
   724 	TUint contigPages = (aSize + KPageSize - 1) >> KPageShift;
       
   725 	TInt r = iRamPageAllocator->AllocContiguousRam(contigPages, aPhysAddr, aPageType, aAlign, aBlockedZoneId, aBlockRest);
       
   726 	if (r == KErrNoMemory && contigPages > KMaxFreeableContiguousPages)
       
   727 		{// Allocation failed but as this is a large allocation flush the RAM cache 
       
   728 		// and reattempt the allocation as large allocation wouldn't discard pages.
       
   729 		iRamCache->FlushAll();
       
   730 		r = iRamPageAllocator->AllocContiguousRam(contigPages, aPhysAddr, aPageType, aAlign, aBlockedZoneId, aBlockRest);
       
   731 		}
       
   732 	return r;
       
   733 	}
       
   734 
       
   735 
       
   736 /**
       
   737 Allocate contiguous RAM from the specified RAM zones.
       
   738 @param aZoneIdList	An array of IDs of the RAM zones to allocate from
       
   739 @param aZoneIdCount	The number of IDs listed in aZoneIdList
       
   740 @param aSize		The number of bytes to allocate
       
   741 @param aPhysAddr 	Will receive the physical base address of the allocated RAM
       
   742 @param aPageType 	The type of the pages being allocated
       
   743 @param aAlign 		The log base 2 alginment required
       
   744 */
       
   745 TInt MmuBase::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign)
       
   746 	{
       
   747 #ifdef _DEBUG
       
   748 	if(K::CheckForSimulatedAllocFail())
       
   749 		return KErrNoMemory;
       
   750 #endif
       
   751 	return iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aPageType, aAlign);
       
   752 	}
       
   753 
       
   754 SPageInfo* SPageInfo::SafeFromPhysAddr(TPhysAddr aAddress)
       
   755 	{
       
   756 	TUint index = aAddress>>(KPageShift+KPageShift-KPageInfoShift);
       
   757 	TUint flags = ((TUint8*)KPageInfoMap)[index>>3];
       
   758 	TUint mask = 1<<(index&7);
       
   759 	if(!(flags&mask))
       
   760 		return 0; // no SPageInfo for aAddress
       
   761 	SPageInfo* info = FromPhysAddr(aAddress);
       
   762 	if(info->Type()==SPageInfo::EInvalid)
       
   763 		return 0;
       
   764 	return info;
       
   765 	}
       
   766 
       
   767 /** HAL Function wrapper for the RAM allocator.
       
   768  */
       
   769 
       
   770 TInt RamHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2)
       
   771 	{
       
   772 	DRamAllocator *pRamAlloc = MmuBase::TheMmu->iRamPageAllocator;
       
   773 	
       
   774 	if (pRamAlloc)
       
   775 		return pRamAlloc->HalFunction(aFunction, a1, a2);
       
   776 	return KErrNotSupported;
       
   777 	}
       
   778 
       
   779 
       
   780 /******************************************************************************
       
   781  * Initialisation
       
   782  ******************************************************************************/
       
   783 
       
   784 void MmuBase::Init1()
       
   785 	{
       
   786 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("MmuBase::Init1"));
       
   787 	iInitialFreeMemory=0;
       
   788 	iAllocFailed=EFalse;
       
   789 	}
       
   790 
       
   791 void MmuBase::Init2()
       
   792 	{
       
   793 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("MmuBase::Init2"));
       
   794 	TInt total_ram=TheSuperPage().iTotalRamSize;
       
   795 	TInt total_ram_pages=total_ram>>iPageShift;
       
   796 	iNumPages = total_ram_pages;
       
   797 	const SRamInfo& info=*(const SRamInfo*)TheSuperPage().iRamBootData;
       
   798 	iRamPageAllocator=DRamAllocator::New(info, RamZoneConfig, RamZoneCallback);
       
   799 
       
   800 	TInt max_pt=total_ram>>iPageTableShift;
       
   801 	if (max_pt<iMaxPageTables)
       
   802 		iMaxPageTables=max_pt;
       
   803 	iMaxPageTables &= ~iPtClusterMask;
       
   804 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iMaxPageTables=%d",iMaxPageTables));
       
   805 	TInt max_ptpg=iMaxPageTables>>iPtClusterShift;
       
   806 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("max_ptpg=%d",max_ptpg));
       
   807 	iPageTableLinearAllocator=TBitMapAllocator::New(max_ptpg,ETrue);
       
   808 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPageTableLinearAllocator=%08x",iPageTableLinearAllocator));
       
   809 	__ASSERT_ALWAYS(iPageTableLinearAllocator,Panic(EPtLinAllocCreateFailed));
       
   810 	if (iPtClusterShift)	// if more than one page table per page
       
   811 		{
       
   812 		iPageTableAllocator=TBitMapAllocator::New(iMaxPageTables,EFalse);
       
   813 		__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPageTableAllocator=%08x",iPageTableAllocator));
       
   814 		__ASSERT_ALWAYS(iPageTableAllocator,Panic(EPtAllocCreateFailed));
       
   815 		}
       
   816 	TInt max_ptb=(iMaxPageTables+iPtBlockMask)>>iPtBlockShift;
       
   817 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("max_ptb=%d",max_ptb));
       
   818 	iPtBlockCount=(TInt*)Kern::AllocZ(max_ptb*sizeof(TInt));
       
   819 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPtBlockCount=%08x",iPtBlockCount));
       
   820 	__ASSERT_ALWAYS(iPtBlockCount,Panic(EPtBlockCountCreateFailed));
       
   821 	TInt max_ptg=(iMaxPageTables+iPtGroupMask)>>iPtGroupShift;
       
   822 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ptg_shift=%d, max_ptg=%d",iPtGroupShift,max_ptg));
       
   823 	iPtGroupCount=(TInt*)Kern::AllocZ(max_ptg*sizeof(TInt));
       
   824 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPtGroupCount=%08x",iPtGroupCount));
       
   825 	__ASSERT_ALWAYS(iPtGroupCount,Panic(EPtGroupCountCreateFailed));
       
   826 
       
   827 
       
   828 	// Clear the inital (and only so far) page table info page so all unused
       
   829 	// page tables will be marked as unused.
       
   830 	memclr((TAny*)KPageTableInfoBase, KPageSize);
       
   831 
       
   832 	// look for page tables - assume first page table (id=0) maps page tables
       
   833 	TPte* pPte=(TPte*)iPageTableLinBase;
       
   834 	TInt i;
       
   835 	for (i=0; i<iChunkSize/iPageSize; ++i)
       
   836 		{
       
   837 		TPte pte=*pPte++;
       
   838 		if (!PteIsPresent(pte))	// after boot, page tables are contiguous
       
   839 			break;
       
   840 		iPageTableLinearAllocator->Alloc(i,1);
       
   841 		TPhysAddr ptpgPhys=PtePhysAddr(pte, i);
       
   842 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(ptpgPhys);
       
   843 		__ASSERT_ALWAYS(pi, Panic(EInvalidPageTableAtBoot));
       
   844 		pi->SetPageTable(i);
       
   845 		pi->Lock();
       
   846 		TInt id=i<<iPtClusterShift;
       
   847 		TInt ptb=id>>iPtBlockShift;
       
   848 		++iPtBlockCount[ptb];
       
   849 		TInt ptg=id>>iPtGroupShift;
       
   850 		++iPtGroupCount[ptg];
       
   851 		}
       
   852 
       
   853 	// look for mapped pages
       
   854 	TInt npdes=1<<(32-iChunkShift);
       
   855 	TInt npt=0;
       
   856 	for (i=0; i<npdes; ++i)
       
   857 		{
       
   858 		TLinAddr cAddr=TLinAddr(i<<iChunkShift);
       
   859 		if (cAddr>=PP::RamDriveStartAddress && TUint32(cAddr-PP::RamDriveStartAddress)<TUint32(PP::RamDriveRange))
       
   860 			continue;	// leave RAM drive for now
       
   861 		TInt ptid=PageTableId(cAddr);
       
   862 		TPhysAddr pdePhys = PdePhysAddr(cAddr);	// check for whole PDE mapping
       
   863 		pPte = NULL;
       
   864 		if (ptid>=0)
       
   865 			{
       
   866 			++npt;
       
   867 			__KTRACE_OPT(KMMU,Kern::Printf("Addr %08x -> page table %d", cAddr, ptid));
       
   868 			pPte=(TPte*)PageTableLinAddr(ptid);
       
   869 			}
       
   870 #ifdef KMMU
       
   871 		if (pdePhys != KPhysAddrInvalid)
       
   872 			{
       
   873 			__KTRACE_OPT(KMMU,Kern::Printf("Addr %08x -> Whole PDE Phys %08x", cAddr, pdePhys));
       
   874 			}
       
   875 #endif
       
   876 		if (ptid>=0 || pdePhys != KPhysAddrInvalid)
       
   877 			{
       
   878 			TInt j;
       
   879 			TInt np=0;
       
   880 			for (j=0; j<iChunkSize/iPageSize; ++j)
       
   881 				{
       
   882 				TBool present = ETrue;	// all pages present if whole PDE mapping
       
   883 				TPte pte = 0;
       
   884 				if (pPte)
       
   885 					{
       
   886 					pte = pPte[j];
       
   887 					present = PteIsPresent(pte);
       
   888 					}
       
   889 				if (present)
       
   890 					{
       
   891 					++np;
       
   892 					TPhysAddr pa = pPte ? PtePhysAddr(pte, j) : (pdePhys + (j<<iPageShift));
       
   893 					SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
       
   894 					__KTRACE_OPT(KMMU,Kern::Printf("Addr: %08x PA=%08x",
       
   895 														cAddr+(j<<iPageShift), pa));
       
   896 					if (pi)	// ignore non-RAM mappings
       
   897 						{//these pages will never be freed and can't be moved
       
   898 						TInt r = iRamPageAllocator->MarkPageAllocated(pa, EPageFixed);
       
   899 						// allow KErrAlreadyExists since it's possible that a page is doubly mapped
       
   900 						__ASSERT_ALWAYS(r==KErrNone || r==KErrAlreadyExists, Panic(EBadMappedPageAfterBoot));
       
   901 						SetupInitialPageInfo(pi,cAddr,j);
       
   902 #ifdef BTRACE_KERNEL_MEMORY
       
   903 						if(r==KErrNone)
       
   904 							++Epoc::KernelMiscPages;
       
   905 #endif
       
   906 						}
       
   907 					}
       
   908 				}
       
   909 			__KTRACE_OPT(KMMU,Kern::Printf("Addr: %08x #PTEs=%d",cAddr,np));
       
   910 			if (ptid>=0)
       
   911 				SetupInitialPageTableInfo(ptid,cAddr,np);
       
   912 			}
       
   913 		}
       
   914 
       
   915 	TInt oddpt=npt & iPtClusterMask;
       
   916 	if (oddpt)
       
   917 		oddpt=iPtClusterSize-oddpt;
       
   918 	__KTRACE_OPT(KBOOT,Kern::Printf("Total page tables %d, left over subpages %d",npt,oddpt));
       
   919 	if (oddpt)
       
   920 		iPageTableAllocator->Free(npt,oddpt);
       
   921 
       
   922 	DoInit2();
       
   923 
       
   924 	// Save current free RAM size - there can never be more free RAM than this
       
   925 	TInt max_free = Kern::FreeRamInBytes();
       
   926 	K::MaxFreeRam = max_free;
       
   927 	if (max_free < PP::RamDriveMaxSize)
       
   928 		PP::RamDriveMaxSize = max_free;
       
   929 
       
   930 	if (K::ColdStart)
       
   931 		ClearRamDrive(PP::RamDriveStartAddress);
       
   932 	else
       
   933 		RecoverRamDrive();
       
   934 
       
   935 	TInt r=K::MutexCreate((DMutex*&)RamAllocatorMutex, KLitRamAlloc, NULL, EFalse, KMutexOrdRamAlloc);
       
   936 	if (r!=KErrNone)
       
   937 		Panic(ERamAllocMutexCreateFailed);
       
   938 	r=K::MutexCreate((DMutex*&)HwChunkMutex, KLitHwChunk, NULL, EFalse, KMutexOrdHwChunk);
       
   939 	if (r!=KErrNone)
       
   940 		Panic(EHwChunkMutexCreateFailed);
       
   941 	
       
   942 #ifdef __DEMAND_PAGING__
       
   943 	if (DemandPaging::RomPagingRequested() || DemandPaging::CodePagingRequested())
       
   944 		iRamCache = DemandPaging::New();
       
   945 	else
       
   946 		iRamCache = new RamCache;
       
   947 #else
       
   948 	iRamCache = new RamCache;
       
   949 #endif
       
   950 	if (!iRamCache)
       
   951 		Panic(ERamCacheAllocFailed);
       
   952 	iRamCache->Init2();
       
   953 	RamCacheBase::TheRamCache = iRamCache;
       
   954 
       
   955 	// Get the allocator to signal to the variant which RAM zones are in use so far
       
   956 	iRamPageAllocator->InitialCallback();
       
   957 	}
       
   958 
       
   959 void MmuBase::Init3()
       
   960 	{
       
   961 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("MmuBase::Init3"));
       
   962 
       
   963 	// Initialise demand paging
       
   964 #ifdef __DEMAND_PAGING__
       
   965 	M::DemandPagingInit();
       
   966 #endif
       
   967 
       
   968 	// Register a HAL Function for the Ram allocator.
       
   969 	TInt r = Kern::AddHalEntry(EHalGroupRam, RamHalFunction, 0);
       
   970 	__NK_ASSERT_ALWAYS(r==KErrNone);
       
   971 
       
   972 	//
       
   973 	// Perform the intialisation for page moving and RAM defrag object.
       
   974 	//
       
   975 
       
   976 	// allocate a page to use as an alt stack
       
   977 	MmuBase::Wait();
       
   978 	TPhysAddr stackpage;
       
   979 	r = AllocPhysicalRam(KPageSize, stackpage);
       
   980 	MmuBase::Signal();
       
   981 	if (r!=KErrNone)
       
   982 		Panic(EDefragStackAllocFailed);
       
   983 
       
   984 	// map it at a predetermined address
       
   985 	TInt ptid = PageTableId(KDefragAltStackAddr);
       
   986 	TPte perm = PtePermissions(EKernelStack);
       
   987 	NKern::LockSystem();
       
   988 	MapRamPages(ptid, SPageInfo::EFixed, NULL, KDefragAltStackAddr, &stackpage, 1, perm);
       
   989 	NKern::UnlockSystem();
       
   990 	iAltStackBase = KDefragAltStackAddr + KPageSize;
       
   991 
       
   992 	__KTRACE_OPT(KMMU,Kern::Printf("Allocated defrag alt stack page at %08x, mapped to %08x, base is now %08x", stackpage, KDefragAltStackAddr, iAltStackBase));
       
   993 
       
   994 	// Create the actual defrag object and initialise it.
       
   995 	iDefrag = new Defrag;
       
   996 	if (!iDefrag)
       
   997 		Panic(EDefragAllocFailed);
       
   998 	iDefrag->Init3(iRamPageAllocator);
       
   999 	}
       
  1000 
       
  1001 void MmuBase::CreateKernelSection(TLinAddr aEnd, TInt aHwChunkAlign)
       
  1002 	{
       
  1003 	TLinAddr base=(TLinAddr)TheRomHeader().iKernelLimit;
       
  1004 	iKernelSection=TLinearSection::New(base, aEnd);
       
  1005 	__ASSERT_ALWAYS(iKernelSection!=NULL, Panic(ECreateKernelSectionFailed));
       
  1006 	iHwChunkAllocator=THwChunkAddressAllocator::New(aHwChunkAlign, iKernelSection);
       
  1007 	__ASSERT_ALWAYS(iHwChunkAllocator!=NULL, Panic(ECreateHwChunkAllocFailed));
       
  1008 	}
       
  1009 
       
  1010 // Recover RAM drive contents after a reset
       
  1011 TInt MmuBase::RecoverRamDrive()
       
  1012 	{
       
  1013 	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::RecoverRamDrive()"));
       
  1014 	TLinAddr ptlin;
       
  1015 	TLinAddr chunk = PP::RamDriveStartAddress;
       
  1016 	TLinAddr end = chunk + (TLinAddr)PP::RamDriveRange;
       
  1017 	TInt size = 0;
       
  1018 	TInt limit = RoundToPageSize(TheSuperPage().iRamDriveSize);
       
  1019 	for( ; chunk<end; chunk+=iChunkSize)
       
  1020 		{
       
  1021 		if (size==limit)		// have reached end of ram drive
       
  1022 			break;
       
  1023 		TPhysAddr ptphys = 0;
       
  1024 		TInt ptid = BootPageTableId(chunk, ptphys);	// ret KErrNotFound if PDE not present, KErrUnknown if present but as yet unknown page table
       
  1025 		__KTRACE_OPT(KMMU,Kern::Printf("Addr %08x: PTID=%d PTPHYS=%08x", chunk, ptid, ptphys));
       
  1026 		if (ptid==KErrNotFound)
       
  1027 			break;		// no page table so stop here and clear to end of range
       
  1028 		TPhysAddr ptpgphys = ptphys & ~iPageMask;
       
  1029 		TInt r = iRamPageAllocator->MarkPageAllocated(ptpgphys, EPageMovable);
       
  1030 		__KTRACE_OPT(KMMU,Kern::Printf("MPA: r=%d",r));
       
  1031 		if (r==KErrArgument)
       
  1032 			break;		// page table address was invalid - stop here and clear to end of range
       
  1033 		if (r==KErrNone)
       
  1034 			{
       
  1035 			// this page was currently unallocated
       
  1036 			if (ptid>=0)
       
  1037 				break;	// ID has been allocated - bad news - bail here
       
  1038 			ptid = iPageTableLinearAllocator->Alloc();
       
  1039 			__ASSERT_ALWAYS(ptid>=0, Panic(ERecoverRamDriveAllocPTIDFailed));
       
  1040 			SPageInfo* pi = SPageInfo::SafeFromPhysAddr(ptpgphys);
       
  1041 			__ASSERT_ALWAYS(pi, Panic(ERecoverRamDriveBadPageTable));
       
  1042 			pi->SetPageTable(ptid);	// id = cluster number here
       
  1043 			ptid <<= iPtClusterShift;
       
  1044 			MapPageTable(ptid, ptpgphys, EFalse);
       
  1045 			if (iPageTableAllocator)
       
  1046 				iPageTableAllocator->Free(ptid, iPtClusterSize);
       
  1047 			ptid |= ((ptphys>>iPageTableShift)&iPtClusterMask);
       
  1048 			ptlin = PageTableLinAddr(ptid);
       
  1049 			__KTRACE_OPT(KMMU,Kern::Printf("Page table ID %d lin %08x", ptid, ptlin));
       
  1050 			if (iPageTableAllocator)
       
  1051 				iPageTableAllocator->Alloc(ptid, 1);
       
  1052 			}
       
  1053 		else
       
  1054 			{
       
  1055 			// this page was already allocated
       
  1056 			if (ptid<0)
       
  1057 				break;	// ID not allocated - bad news - bail here
       
  1058 			ptlin = PageTableLinAddr(ptid);
       
  1059 			__KTRACE_OPT(KMMU,Kern::Printf("Page table lin %08x", ptlin));
       
  1060 			if (iPageTableAllocator)
       
  1061 				iPageTableAllocator->Alloc(ptid, 1);
       
  1062 			}
       
  1063 		TInt pte_index;
       
  1064 		TBool chunk_inc = 0;
       
  1065 		TPte* page_table = (TPte*)ptlin;
       
  1066 		for (pte_index=0; pte_index<(iChunkSize>>iPageSize); ++pte_index)
       
  1067 			{
       
  1068 			if (size==limit)		// have reached end of ram drive
       
  1069 				break;
       
  1070 			TPte pte = page_table[pte_index];
       
  1071 			if (PteIsPresent(pte))
       
  1072 				{
       
  1073 				TPhysAddr pa=PtePhysAddr(pte, pte_index);
       
  1074 				SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
       
  1075 				if (!pi)
       
  1076 					break;
       
  1077 				TInt r = iRamPageAllocator->MarkPageAllocated(pa, EPageMovable);
       
  1078 				__ASSERT_ALWAYS(r==KErrNone, Panic(ERecoverRamDriveBadPage));
       
  1079 				size+=iPageSize;
       
  1080 				chunk_inc = iChunkSize;
       
  1081 				}
       
  1082 			}
       
  1083 		if (pte_index < (iChunkSize>>iPageSize) )
       
  1084 			{
       
  1085 			// if we recovered pages in this page table, leave it in place
       
  1086 			chunk += chunk_inc;
       
  1087 
       
  1088 			// clear from here on
       
  1089 			ClearPageTable(ptid, pte_index);
       
  1090 			break;
       
  1091 			}
       
  1092 		}
       
  1093 	if (chunk < end)
       
  1094 		ClearRamDrive(chunk);
       
  1095 	__KTRACE_OPT(KMMU,Kern::Printf("Recovered RAM drive size %08x",size));
       
  1096 	if (size<TheSuperPage().iRamDriveSize)
       
  1097 		{
       
  1098 		__KTRACE_OPT(KMMU,Kern::Printf("Truncating RAM drive from %08x to %08x", TheSuperPage().iRamDriveSize, size));
       
  1099 		TheSuperPage().iRamDriveSize=size;
       
  1100 		}
       
  1101 	return KErrNone;
       
  1102 	}
       
  1103 
       
  1104 TInt MmuBase::AllocShadowPage(TLinAddr aRomAddr)
       
  1105 	{
       
  1106 	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase:AllocShadowPage(%08x)", aRomAddr));
       
  1107 	aRomAddr &= ~iPageMask;
       
  1108 	TPhysAddr orig_phys = KPhysAddrInvalid;
       
  1109 	if (aRomAddr>=iRomLinearBase && aRomAddr<=(iRomLinearEnd-iPageSize))
       
  1110 		orig_phys = LinearToPhysical(aRomAddr);
       
  1111 	__KTRACE_OPT(KMMU,Kern::Printf("OrigPhys = %08x",orig_phys));
       
  1112 	if (orig_phys == KPhysAddrInvalid)
       
  1113 		{
       
  1114 		__KTRACE_OPT(KMMU,Kern::Printf("Invalid ROM address"));
       
  1115 		return KErrArgument;
       
  1116 		}
       
  1117 	SPageInfo* pi = SPageInfo::SafeFromPhysAddr(orig_phys);
       
  1118 	if (pi && pi->Type()==SPageInfo::EShadow)
       
  1119 		{
       
  1120 		__KTRACE_OPT(KMMU,Kern::Printf("ROM address already shadowed"));
       
  1121 		return KErrAlreadyExists;
       
  1122 		}
       
  1123 	TInt ptid = PageTableId(aRomAddr);
       
  1124 	__KTRACE_OPT(KMMU, Kern::Printf("Shadow PTID %d", ptid));
       
  1125 	TInt newptid = -1;
       
  1126 	if (ptid<0)
       
  1127 		{
       
  1128 		newptid = AllocPageTable();
       
  1129 		__KTRACE_OPT(KMMU, Kern::Printf("New shadow PTID %d", newptid));
       
  1130 		if (newptid<0)
       
  1131 			return KErrNoMemory;
       
  1132 		ptid = newptid;
       
  1133 		PtInfo(ptid).SetShadow( (aRomAddr-iRomLinearBase)>>iChunkShift );
       
  1134 		InitShadowPageTable(ptid, aRomAddr, orig_phys);
       
  1135 		}
       
  1136 	TPhysAddr shadow_phys;
       
  1137 
       
  1138 	if (AllocRamPages(&shadow_phys, 1, EPageFixed) != KErrNone)
       
  1139 		{
       
  1140 		__KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page"));
       
  1141 		iAllocFailed=ETrue;
       
  1142 		if (newptid>=0)
       
  1143 			{
       
  1144 			FreePageTable(newptid);
       
  1145 			}
       
  1146 		return KErrNoMemory;
       
  1147 		}
       
  1148 #ifdef BTRACE_KERNEL_MEMORY
       
  1149 	BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, 1<<KPageShift);
       
  1150 	++Epoc::KernelMiscPages;
       
  1151 #endif
       
  1152 	InitShadowPage(shadow_phys, aRomAddr);	// copy original ROM contents
       
  1153 	NKern::LockSystem();
       
  1154 	Pagify(ptid, aRomAddr);
       
  1155 	MapRamPages(ptid, SPageInfo::EShadow, (TAny*)orig_phys, (aRomAddr-iRomLinearBase), &shadow_phys, 1, iShadowPtePerm);
       
  1156 	NKern::UnlockSystem();
       
  1157 	if (newptid>=0)
       
  1158 		{
       
  1159 		NKern::LockSystem();
       
  1160 		AssignShadowPageTable(newptid, aRomAddr);
       
  1161 		NKern::UnlockSystem();
       
  1162 		}
       
  1163 	FlushShadow(aRomAddr);
       
  1164 	__KTRACE_OPT(KMMU,Kern::Printf("AllocShadowPage successful"));
       
  1165 	return KErrNone;
       
  1166 	}
       
  1167 
       
  1168 TInt MmuBase::FreeShadowPage(TLinAddr aRomAddr)
       
  1169 	{
       
  1170 	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase:FreeShadowPage(%08x)", aRomAddr));
       
  1171 	aRomAddr &= ~iPageMask;
       
  1172 	TPhysAddr shadow_phys = KPhysAddrInvalid;
       
  1173 	if (aRomAddr>=iRomLinearBase || aRomAddr<=(iRomLinearEnd-iPageSize))
       
  1174 		shadow_phys = LinearToPhysical(aRomAddr);
       
  1175 	__KTRACE_OPT(KMMU,Kern::Printf("ShadowPhys = %08x",shadow_phys));
       
  1176 	if (shadow_phys == KPhysAddrInvalid)
       
  1177 		{
       
  1178 		__KTRACE_OPT(KMMU,Kern::Printf("Invalid ROM address"));
       
  1179 		return KErrArgument;
       
  1180 		}
       
  1181 	TInt ptid = PageTableId(aRomAddr);
       
  1182 	SPageInfo* pi = SPageInfo::SafeFromPhysAddr(shadow_phys);
       
  1183 	if (ptid<0 || !pi || pi->Type()!=SPageInfo::EShadow)
       
  1184 		{
       
  1185 		__KTRACE_OPT(KMMU,Kern::Printf("No shadow page at this address"));
       
  1186 		return KErrGeneral;
       
  1187 		}
       
  1188 	TPhysAddr orig_phys = (TPhysAddr)pi->Owner();
       
  1189 	DoUnmapShadowPage(ptid, aRomAddr, orig_phys);
       
  1190 	SPageTableInfo& pti = PtInfo(ptid);
       
  1191 	if (pti.Attribs()==SPageTableInfo::EShadow && --pti.iCount==0)
       
  1192 		{
       
  1193 		TInt r = UnassignShadowPageTable(aRomAddr, orig_phys);
       
  1194 		if (r==KErrNone)
       
  1195 			FreePageTable(ptid);
       
  1196 		else
       
  1197 			pti.SetGlobal(aRomAddr>>iChunkShift);
       
  1198 		}
       
  1199 
       
  1200 	FreePages(&shadow_phys, 1, EPageFixed);
       
  1201 	__KTRACE_OPT(KMMU,Kern::Printf("FreeShadowPage successful"));
       
  1202 #ifdef BTRACE_KERNEL_MEMORY
       
  1203 	BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscFree, 1<<KPageShift);
       
  1204 	--Epoc::KernelMiscPages;
       
  1205 #endif
       
  1206 	return KErrNone;
       
  1207 	}
       
  1208 
       
  1209 TInt MmuBase::FreezeShadowPage(TLinAddr aRomAddr)
       
  1210 	{
       
  1211 	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase:FreezeShadowPage(%08x)", aRomAddr));
       
  1212 	aRomAddr &= ~iPageMask;
       
  1213 	TPhysAddr shadow_phys = KPhysAddrInvalid;
       
  1214 	if (aRomAddr>=iRomLinearBase || aRomAddr<=(iRomLinearEnd-iPageSize))
       
  1215 		shadow_phys = LinearToPhysical(aRomAddr);
       
  1216 	__KTRACE_OPT(KMMU,Kern::Printf("ShadowPhys = %08x",shadow_phys));
       
  1217 	if (shadow_phys == KPhysAddrInvalid)
       
  1218 		{
       
  1219 		__KTRACE_OPT(KMMU,Kern::Printf("Invalid ROM address"));
       
  1220 		return KErrArgument;
       
  1221 		}
       
  1222 	TInt ptid = PageTableId(aRomAddr);
       
  1223 	SPageInfo* pi = SPageInfo::SafeFromPhysAddr(shadow_phys);
       
  1224 	if (ptid<0 || pi==0)
       
  1225 		{
       
  1226 		__KTRACE_OPT(KMMU,Kern::Printf("No shadow page at this address"));
       
  1227 		return KErrGeneral;
       
  1228 		}
       
  1229 	DoFreezeShadowPage(ptid, aRomAddr);
       
  1230 	__KTRACE_OPT(KMMU,Kern::Printf("FreezeShadowPage successful"));
       
  1231 	return KErrNone;
       
  1232 	}
       
  1233 
       
  1234 TInt MmuBase::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength)
       
  1235 	{
       
  1236 	memcpy ((TAny*)aDest, (const TAny*)aSrc, aLength);
       
  1237 	return KErrNone;
       
  1238 	}
       
  1239 
       
  1240 void M::BTracePrime(TUint aCategory)
       
  1241 	{
       
  1242 	(void)aCategory;
       
  1243 
       
  1244 #ifdef BTRACE_KERNEL_MEMORY
       
  1245 	// Must check for -1 as that is the default value of aCategory for
       
  1246 	// BTrace::Prime() which is intended to prime all categories that are 
       
  1247 	// currently enabled via a single invocation of BTrace::Prime().
       
  1248 	if(aCategory==BTrace::EKernelMemory || (TInt)aCategory == -1)
       
  1249 		{
       
  1250 		NKern::ThreadEnterCS();
       
  1251 		Mmu::Wait();
       
  1252 		BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryInitialFree,TheSuperPage().iTotalRamSize);
       
  1253 		BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryCurrentFree,Kern::FreeRamInBytes());
       
  1254 		BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, Epoc::KernelMiscPages<<KPageShift);
       
  1255 		#ifdef __DEMAND_PAGING__
       
  1256 		if (DemandPaging::ThePager) 
       
  1257 			BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryDemandPagingCache,DemandPaging::ThePager->iMinimumPageCount << KPageShift);
       
  1258 		#endif
       
  1259 		BTrace8(BTrace::EKernelMemory,BTrace::EKernelMemoryDrvPhysAlloc, Epoc::DriverAllocdPhysRam, -1);
       
  1260 		Mmu::Signal();
       
  1261 		NKern::ThreadLeaveCS();
       
  1262 		}
       
  1263 #endif
       
  1264 
       
  1265 #ifdef BTRACE_RAM_ALLOCATOR
       
  1266 	// Must check for -1 as that is the default value of aCategroy for
       
  1267 	// BTrace::Prime() which is intended to prime all categories that are 
       
  1268 	// currently enabled via a single invocation of BTrace::Prime().
       
  1269 	if(aCategory==BTrace::ERamAllocator || (TInt)aCategory == -1)
       
  1270 		{
       
  1271 		NKern::ThreadEnterCS();
       
  1272 		Mmu::Wait();
       
  1273 		Mmu::Get().iRamPageAllocator->SendInitialBtraceLogs();
       
  1274 		Mmu::Signal();
       
  1275 		NKern::ThreadLeaveCS();
       
  1276 		}
       
  1277 #endif
       
  1278 	}
       
  1279 
       
  1280 
       
  1281 /******************************************************************************
       
  1282  * Code common to all virtual memory models
       
  1283  ******************************************************************************/
       
  1284 
       
  1285 void RHeapK::Mutate(TInt aOffset, TInt aMaxLength)
       
  1286 //
       
  1287 // Used by the kernel to mutate a fixed heap into a chunk heap.
       
  1288 //
       
  1289 	{
       
  1290 	iMinLength += aOffset;
       
  1291 	iMaxLength = aMaxLength + aOffset;
       
  1292 	iOffset = aOffset;
       
  1293 	iChunkHandle = (TInt)K::HeapInfo.iChunk;
       
  1294 	iPageSize = M::PageSizeInBytes();
       
  1295 	iGrowBy = iPageSize;
       
  1296 	iFlags = 0;
       
  1297 	}
       
  1298 
       
  1299 TInt M::PageSizeInBytes()
       
  1300 	{
       
  1301 	return KPageSize;
       
  1302 	}
       
  1303 
       
  1304 TInt MmuBase::FreeRamInBytes()
       
  1305 	{
       
  1306 	TInt free = iRamPageAllocator->FreeRamInBytes();
       
  1307 	if(iRamCache)
       
  1308 		free += iRamCache->NumberOfFreePages()<<iPageShift;
       
  1309 	return free;
       
  1310 	}
       
  1311 
       
  1312 /**	Returns the amount of free RAM currently available.
       
  1313 
       
  1314 @return The number of bytes of free RAM currently available.
       
  1315 @pre	any context
       
  1316  */
       
  1317 EXPORT_C TInt Kern::FreeRamInBytes()
       
  1318 	{
       
  1319 	return MmuBase::TheMmu->FreeRamInBytes();
       
  1320 	}
       
  1321 
       
  1322 
       
  1323 /**	Rounds up the argument to the size of a MMU page.
       
  1324 
       
  1325 	To find out the size of a MMU page:
       
  1326 	@code
       
  1327 	size = Kern::RoundToPageSize(1);
       
  1328 	@endcode
       
  1329 
       
  1330 	@param aSize Value to round up
       
  1331 	@pre any context
       
  1332  */
       
  1333 EXPORT_C TUint32 Kern::RoundToPageSize(TUint32 aSize)
       
  1334 	{
       
  1335 	return MmuBase::RoundToPageSize(aSize);
       
  1336 	}
       
  1337 
       
  1338 
       
  1339 /**	Rounds up the argument to the amount of memory mapped by a MMU page 
       
  1340 	directory entry.
       
  1341 
       
  1342 	Chunks occupy one or more consecutive page directory entries (PDE) and
       
  1343 	therefore the amount of linear and physical memory allocated to a chunk is
       
  1344 	always a multiple of the amount of memory mapped by a page directory entry.
       
  1345  */
       
  1346 EXPORT_C TUint32 Kern::RoundToChunkSize(TUint32 aSize)
       
  1347 	{
       
  1348 	return MmuBase::RoundToChunkSize(aSize);
       
  1349 	}
       
  1350 
       
  1351 
       
  1352 /**
       
  1353 Allows the variant to specify the details of the RAM zones. This should be invoked 
       
  1354 by the variant in its implementation of the pure virtual function Asic::Init1().
       
  1355 
       
  1356 There are some limitations to how the RAM zones can be specified:
       
  1357 - Each RAM zone's address space must be distinct and not overlap with any 
       
  1358 other RAM zone's address space
       
  1359 - Each RAM zone's address space must have a size that is multiples of the 
       
  1360 ASIC's MMU small page size and be aligned to the ASIC's MMU small page size, 
       
  1361 usually 4KB on ARM MMUs.
       
  1362 - When taken together all of the RAM zones must cover the whole of the physical RAM
       
  1363 address space as specified by the bootstrap in the SuperPage members iTotalRamSize
       
  1364 and iRamBootData;.
       
  1365 - There can be no more than KMaxRamZones RAM zones specified by the base port
       
  1366 
       
  1367 Note the verification of the RAM zone data is not performed here but by the ram 
       
  1368 allocator later in the boot up sequence.  This is because it is only possible to
       
  1369 verify the zone data once the physical RAM configuration has been read from 
       
  1370 the super page. Any verification errors result in a "RAM-ALLOC" panic 
       
  1371 faulting the kernel during initialisation.
       
  1372 
       
  1373 @param aZones Pointer to an array of SRamZone structs containing the details for all 
       
  1374 the zones. The end of the array is specified by an element with an iSize of zero. The array must 
       
  1375 remain in memory at least until the kernel has successfully booted.
       
  1376 
       
  1377 @param aCallback Pointer to a call back function that the kernel may invoke to request 
       
  1378 one of the operations specified by TRamZoneOp.
       
  1379 
       
  1380 @return KErrNone if successful, otherwise one of the system wide error codes
       
  1381 
       
  1382 @see TRamZoneOp
       
  1383 @see SRamZone
       
  1384 @see TRamZoneCallback
       
  1385 */
       
  1386 EXPORT_C TInt Epoc::SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback)
       
  1387 	{
       
  1388 	// Ensure this is only called once and only while we are initialising the kernel
       
  1389 	if (!K::Initialising || MmuBase::RamZoneConfig != NULL)
       
  1390 		{// fault kernel, won't return
       
  1391 		K::Fault(K::EBadSetRamZoneConfig);
       
  1392 		}
       
  1393 
       
  1394 	if (NULL == aZones)
       
  1395 		{
       
  1396 		return KErrArgument;
       
  1397 		}
       
  1398 	MmuBase::RamZoneConfig=aZones;
       
  1399 	MmuBase::RamZoneCallback=aCallback;
       
  1400 	return KErrNone;
       
  1401 	}
       
  1402 
       
  1403 
       
  1404 /**
       
  1405 Modify the specified RAM zone's flags.
       
  1406 
       
  1407 This allows the BSP or device driver to configure which type of pages, if any,
       
  1408 can be allocated into a RAM zone by the system.
       
  1409 
       
  1410 Note: updating a RAM zone's flags can result in
       
  1411 	1 - memory allocations failing despite there being enough free RAM in the system.
       
  1412 	2 - the methods TRamDefragRequest::EmptyRamZone(), TRamDefragRequest::ClaimRamZone()
       
  1413 	or TRamDefragRequest::DefragRam() never succeeding.
       
  1414 
       
  1415 The flag masks KRamZoneFlagDiscardOnly, KRamZoneFlagMovAndDisOnly and KRamZoneFlagNoAlloc
       
  1416 are intended to be used with this method.
       
  1417 
       
  1418 @param aId			The ID of the RAM zone to modify.
       
  1419 @param aClearMask	The bit mask to clear, each flag of which must already be set on the RAM zone.
       
  1420 @param aSetMask		The bit mask to set.
       
  1421 
       
  1422 @return KErrNone on success, KErrArgument if the RAM zone of aId not found or if 
       
  1423 aSetMask contains invalid flag bits.
       
  1424 
       
  1425 @see TRamDefragRequest::EmptyRamZone()
       
  1426 @see TRamDefragRequest::ClaimRamZone()
       
  1427 @see TRamDefragRequest::DefragRam()
       
  1428 
       
  1429 @see KRamZoneFlagDiscardOnly
       
  1430 @see KRamZoneFlagMovAndDisOnly
       
  1431 @see KRamZoneFlagNoAlloc
       
  1432 */
       
  1433 EXPORT_C TInt Epoc::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask)
       
  1434 	{
       
  1435 	MmuBase& m = *MmuBase::TheMmu;
       
  1436 	MmuBase::Wait();
       
  1437 
       
  1438 	TInt ret = m.ModifyRamZoneFlags(aId, aClearMask, aSetMask);
       
  1439 
       
  1440 	MmuBase::Signal();
       
  1441 	return ret;
       
  1442 	}
       
  1443 
       
  1444 TInt MmuBase::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask)
       
  1445 	{
       
  1446 	return iRamPageAllocator->ModifyZoneFlags(aId, aClearMask, aSetMask);
       
  1447 	}
       
  1448 
       
  1449 
       
  1450 /**
       
  1451 Gets the current count of a particular RAM zone's pages by type.
       
  1452 
       
  1453 @param aId The ID of the RAM zone to enquire about
       
  1454 @param aPageData If successful, on return this contains the page count
       
  1455 
       
  1456 @return KErrNone if successful, KErrArgument if a RAM zone of aId is not found or
       
  1457 one of the system wide error codes 
       
  1458 
       
  1459 @pre Calling thread must be in a critical section.
       
  1460 @pre Interrupts must be enabled.
       
  1461 @pre Kernel must be unlocked.
       
  1462 @pre No fast mutex can be held.
       
  1463 @pre Call in a thread context.
       
  1464 
       
  1465 @see SRamZonePageCount
       
  1466 */
       
  1467 EXPORT_C TInt Epoc::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData)
       
  1468 	{
       
  1469 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::GetRamZonePageCount");
       
  1470 
       
  1471 	MmuBase& m = *MmuBase::TheMmu;
       
  1472 	MmuBase::Wait(); // Gets RAM alloc mutex
       
  1473 
       
  1474 	TInt r = m.GetRamZonePageCount(aId, aPageData);
       
  1475 
       
  1476 	MmuBase::Signal();	
       
  1477 
       
  1478 	return r;
       
  1479 	}
       
  1480 
       
  1481 TInt MmuBase::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData)
       
  1482 	{
       
  1483 	return iRamPageAllocator->GetZonePageCount(aId, aPageData);
       
  1484 	}
       
  1485 
       
  1486 /**
       
  1487 Replace a page of the system's execute-in-place (XIP) ROM image with a page of
       
  1488 RAM having the same contents. This RAM can subsequently be written to in order
       
  1489 to apply patches to the XIP ROM or to insert software breakpoints for debugging
       
  1490 purposes.
       
  1491 Call Epoc::FreeShadowPage() when you wish to revert to the original ROM page.
       
  1492 
       
  1493 @param	aRomAddr	The virtual address of the ROM page to be replaced.
       
  1494 @return	KErrNone if the operation completed successfully.
       
  1495 		KErrArgument if the specified address is not a valid XIP ROM address.
       
  1496 		KErrNoMemory if the operation failed due to insufficient free RAM.
       
  1497 		KErrAlreadyExists if the XIP ROM page at the specified address has
       
  1498 			already been shadowed by a RAM page.
       
  1499 
       
  1500 @pre Calling thread must be in a critical section.
       
  1501 @pre Interrupts must be enabled.
       
  1502 @pre Kernel must be unlocked.
       
  1503 @pre No fast mutex can be held.
       
  1504 @pre Call in a thread context.
       
  1505 */
       
  1506 EXPORT_C TInt Epoc::AllocShadowPage(TLinAddr aRomAddr)
       
  1507 	{
       
  1508 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::AllocShadowPage");
       
  1509 
       
  1510 	TInt r;
       
  1511 	r=M::LockRegion(aRomAddr,1);
       
  1512 	if(r!=KErrNone && r!=KErrNotFound)
       
  1513 		return r;
       
  1514 	MmuBase& m=*MmuBase::TheMmu;
       
  1515 	MmuBase::Wait();
       
  1516 	r=m.AllocShadowPage(aRomAddr);
       
  1517 	MmuBase::Signal();
       
  1518 	if(r!=KErrNone)
       
  1519 		M::UnlockRegion(aRomAddr,1);
       
  1520 	return r;
       
  1521 	}
       
  1522 
       
  1523 /**
       
  1524 Copies data into shadow memory. Source data is presumed to be in Kernel memory.
       
  1525 
       
  1526 @param	aSrc	Data to copy from.
       
  1527 @param	aDest	Address to copy into.
       
  1528 @param	aLength	Number of bytes to copy. Maximum of 32 bytes of data can be copied.
       
  1529 .
       
  1530 @return	KErrNone 		if the operation completed successfully.
       
  1531 		KErrArgument 	if any part of destination region is not shadow page or
       
  1532 						if aLength is greater then 32 bytes.
       
  1533 
       
  1534 @pre Calling thread must be in a critical section.
       
  1535 @pre Interrupts must be enabled.
       
  1536 @pre Kernel must be unlocked.
       
  1537 @pre No fast mutex can be held.
       
  1538 @pre Call in a thread context.
       
  1539 */
       
  1540 EXPORT_C TInt Epoc::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength)
       
  1541 	{
       
  1542 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::CopyToShadowMemory");
       
  1543 
       
  1544 	if (aLength>32)
       
  1545 		return KErrArgument;
       
  1546 	MmuBase& m=*MmuBase::TheMmu;
       
  1547 	// This is a simple copy operation except on platforms with __CPU_MEMORY_TYPE_REMAPPING defined,
       
  1548 	// where shadow page is read-only and it has to be remapped before it is written into.
       
  1549 	return m.CopyToShadowMemory(aDest, aSrc, aLength);
       
  1550 	}
       
  1551 /**
       
  1552 Revert an XIP ROM address which has previously been shadowed to the original
       
  1553 page of ROM.
       
  1554 
       
  1555 @param	aRomAddr	The virtual address of the ROM page to be reverted.
       
  1556 @return	KErrNone if the operation completed successfully.
       
  1557 		KErrArgument if the specified address is not a valid XIP ROM address.
       
  1558 		KErrGeneral if the specified address has not previously been shadowed
       
  1559 			using Epoc::AllocShadowPage().
       
  1560 
       
  1561 @pre Calling thread must be in a critical section.
       
  1562 @pre Interrupts must be enabled.
       
  1563 @pre Kernel must be unlocked.
       
  1564 @pre No fast mutex can be held.
       
  1565 @pre Call in a thread context.
       
  1566 */
       
  1567 EXPORT_C TInt Epoc::FreeShadowPage(TLinAddr aRomAddr)
       
  1568 	{
       
  1569 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreeShadowPage");
       
  1570 	MmuBase& m=*MmuBase::TheMmu;
       
  1571 	MmuBase::Wait();
       
  1572 	TInt r=m.FreeShadowPage(aRomAddr);
       
  1573 	MmuBase::Signal();
       
  1574 	if(r==KErrNone)
       
  1575 		M::UnlockRegion(aRomAddr,1);
       
  1576 	return r;
       
  1577 	}
       
  1578 
       
  1579 
       
  1580 /**
       
  1581 Change the permissions on an XIP ROM address which has previously been shadowed
       
  1582 by a RAM page so that the RAM page may no longer be written to.
       
  1583 
       
  1584 Note: Shadow page on the latest platforms (that use the reduced set of access permissions:
       
  1585 arm11mpcore, arm1176, cortex) is implemented with read only permissions. Therefore, calling
       
  1586 this function in not necessary, as shadow page is already created as 'frozen'.
       
  1587 
       
  1588 @param	aRomAddr	The virtual address of the shadow RAM page to be frozen.
       
  1589 @return	KErrNone if the operation completed successfully.
       
  1590 		KErrArgument if the specified address is not a valid XIP ROM address.
       
  1591 		KErrGeneral if the specified address has not previously been shadowed
       
  1592 			using Epoc::AllocShadowPage().
       
  1593 
       
  1594 @pre Calling thread must be in a critical section.
       
  1595 @pre Interrupts must be enabled.
       
  1596 @pre Kernel must be unlocked.
       
  1597 @pre No fast mutex can be held.
       
  1598 @pre Call in a thread context.
       
  1599 */
       
  1600 EXPORT_C TInt Epoc::FreezeShadowPage(TLinAddr aRomAddr)
       
  1601 	{
       
  1602 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreezeShadowPage");
       
  1603 	MmuBase& m=*MmuBase::TheMmu;
       
  1604 	MmuBase::Wait();
       
  1605 	TInt r=m.FreezeShadowPage(aRomAddr);
       
  1606 	MmuBase::Signal();
       
  1607 	return r;
       
  1608 	}
       
  1609 
       
  1610 
       
  1611 /**
       
  1612 Allocate a block of physically contiguous RAM with a physical address aligned
       
  1613 to a specified power of 2 boundary.
       
  1614 When the RAM is no longer required it should be freed using
       
  1615 Epoc::FreePhysicalRam()
       
  1616 
       
  1617 @param	aSize		The size in bytes of the required block. The specified size
       
  1618 					is rounded up to the page size, since only whole pages of
       
  1619 					physical RAM can be allocated.
       
  1620 @param	aPhysAddr	Receives the physical address of the base of the block on
       
  1621 					successful allocation.
       
  1622 @param	aAlign		Specifies the number of least significant bits of the
       
  1623 					physical address which are required to be zero. If a value
       
  1624 					less than log2(page size) is specified, page alignment is
       
  1625 					assumed. Pass 0 for aAlign if there are no special alignment
       
  1626 					constraints (other than page alignment).
       
  1627 @return	KErrNone if the allocation was successful.
       
  1628 		KErrNoMemory if a sufficiently large physically contiguous block of free
       
  1629 		RAM	with the specified alignment could not be found.
       
  1630 @pre Calling thread must be in a critical section.
       
  1631 @pre Interrupts must be enabled.
       
  1632 @pre Kernel must be unlocked.
       
  1633 @pre No fast mutex can be held.
       
  1634 @pre Call in a thread context.
       
  1635 @pre Can be used in a device driver.
       
  1636 */
       
  1637 EXPORT_C TInt Epoc::AllocPhysicalRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
       
  1638 	{
       
  1639 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::AllocPhysicalRam");
       
  1640 	MmuBase& m=*MmuBase::TheMmu;
       
  1641 	MmuBase::Wait();
       
  1642 	TInt r=m.AllocPhysicalRam(aSize,aPhysAddr,aAlign);
       
  1643 	if (r == KErrNone)
       
  1644 		{
       
  1645 		// For the sake of platform security we have to clear the memory. E.g. the driver
       
  1646 		// could assign it to a chunk visible to user side.
       
  1647 		m.ClearPages(Kern::RoundToPageSize(aSize)>>m.iPageShift, (TPhysAddr*)(aPhysAddr|1));
       
  1648 #ifdef BTRACE_KERNEL_MEMORY
       
  1649 		TUint size = Kern::RoundToPageSize(aSize);
       
  1650 		BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, size, aPhysAddr);
       
  1651 		Epoc::DriverAllocdPhysRam += size;
       
  1652 #endif
       
  1653 		}
       
  1654 	MmuBase::Signal();
       
  1655 	return r;
       
  1656 	}
       
  1657 
       
  1658 /**
       
  1659 Allocate a block of physically contiguous RAM with a physical address aligned
       
  1660 to a specified power of 2 boundary from the specified zone.
       
  1661 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
       
  1662 
       
  1663 Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt
       
  1664 to allocate regardless of whether the other flags are set for the specified RAM zones 
       
  1665 or not.
       
  1666 
       
  1667 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
       
  1668 
       
  1669 @param 	aZoneId		The ID of the zone to attempt to allocate from.
       
  1670 @param	aSize		The size in bytes of the required block. The specified size
       
  1671 					is rounded up to the page size, since only whole pages of
       
  1672 					physical RAM can be allocated.
       
  1673 @param	aPhysAddr	Receives the physical address of the base of the block on
       
  1674 					successful allocation.
       
  1675 @param	aAlign		Specifies the number of least significant bits of the
       
  1676 					physical address which are required to be zero. If a value
       
  1677 					less than log2(page size) is specified, page alignment is
       
  1678 					assumed. Pass 0 for aAlign if there are no special alignment
       
  1679 					constraints (other than page alignment).
       
  1680 @return	KErrNone if the allocation was successful.
       
  1681 		KErrNoMemory if a sufficiently large physically contiguous block of free
       
  1682 		RAM	with the specified alignment could not be found within the specified 
       
  1683 		zone.
       
  1684 		KErrArgument if a RAM zone of the specified ID can't be found or if the
       
  1685 		RAM zone has a total number of physical pages which is less than those 
       
  1686 		requested for the allocation.
       
  1687 
       
  1688 @pre Calling thread must be in a critical section.
       
  1689 @pre Interrupts must be enabled.
       
  1690 @pre Kernel must be unlocked.
       
  1691 @pre No fast mutex can be held.
       
  1692 @pre Call in a thread context.
       
  1693 @pre Can be used in a device driver.
       
  1694 */
       
  1695 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint aZoneId, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
       
  1696 	{
       
  1697 	return ZoneAllocPhysicalRam(&aZoneId, 1, aSize, aPhysAddr, aAlign);
       
  1698 	}
       
  1699 
       
  1700 
       
  1701 /**
       
  1702 Allocate a block of physically contiguous RAM with a physical address aligned
       
  1703 to a specified power of 2 boundary from the specified RAM zones.
       
  1704 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
       
  1705 
       
  1706 RAM will be allocated into the RAM zones in the order they are specified in the 
       
  1707 aZoneIdList parameter. If the contiguous allocations are intended to span RAM zones 
       
  1708 when required then aZoneIdList should be listed with the RAM zones in ascending 
       
  1709 physical address order.
       
  1710 
       
  1711 Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt
       
  1712 to allocate regardless of whether the other flags are set for the specified RAM zones 
       
  1713 or not.
       
  1714 
       
  1715 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
       
  1716 
       
  1717 @param 	aZoneIdList	A pointer to an array of RAM zone IDs of the RAM zones to 
       
  1718 					attempt to allocate from.
       
  1719 @param 	aZoneIdCount The number of RAM zone IDs contained in aZoneIdList.
       
  1720 @param	aSize		The size in bytes of the required block. The specified size
       
  1721 					is rounded up to the page size, since only whole pages of
       
  1722 					physical RAM can be allocated.
       
  1723 @param	aPhysAddr	Receives the physical address of the base of the block on
       
  1724 					successful allocation.
       
  1725 @param	aAlign		Specifies the number of least significant bits of the
       
  1726 					physical address which are required to be zero. If a value
       
  1727 					less than log2(page size) is specified, page alignment is
       
  1728 					assumed. Pass 0 for aAlign if there are no special alignment
       
  1729 					constraints (other than page alignment).
       
  1730 @return	KErrNone if the allocation was successful.
       
  1731 		KErrNoMemory if a sufficiently large physically contiguous block of free
       
  1732 		RAM	with the specified alignment could not be found within the specified 
       
  1733 		zone.
       
  1734 		KErrArgument if a RAM zone of a specified ID can't be found or if the
       
  1735 		RAM zones have a total number of physical pages which is less than those 
       
  1736 		requested for the allocation.
       
  1737 
       
  1738 @pre Calling thread must be in a critical section.
       
  1739 @pre Interrupts must be enabled.
       
  1740 @pre Kernel must be unlocked.
       
  1741 @pre No fast mutex can be held.
       
  1742 @pre Call in a thread context.
       
  1743 @pre Can be used in a device driver.
       
  1744 */
       
  1745 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
       
  1746 	{
       
  1747 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::ZoneAllocPhysicalRam");
       
  1748 	MmuBase& m=*MmuBase::TheMmu;
       
  1749 	MmuBase::Wait();
       
  1750 	TInt r = m.ZoneAllocPhysicalRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aAlign);
       
  1751 	if (r == KErrNone)
       
  1752 		{
       
  1753 		// For the sake of platform security we have to clear the memory. E.g. the driver
       
  1754 		// could assign it to a chunk visible to user side.
       
  1755 		m.ClearPages(Kern::RoundToPageSize(aSize)>>m.iPageShift, (TPhysAddr*)(aPhysAddr|1));
       
  1756 #ifdef BTRACE_KERNEL_MEMORY
       
  1757 		TUint size = Kern::RoundToPageSize(aSize);
       
  1758 		BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, size, aPhysAddr);
       
  1759 		Epoc::DriverAllocdPhysRam += size;
       
  1760 #endif
       
  1761 		}
       
  1762 	MmuBase::Signal();
       
  1763 	return r;
       
  1764 	}
       
  1765 
       
  1766 
       
  1767 /**
       
  1768 Attempt to allocate discontiguous RAM pages.
       
  1769 
       
  1770 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
       
  1771 
       
  1772 @param	aNumPages	The number of discontiguous pages required to be allocated
       
  1773 @param	aPageList	This should be a pointer to a previously allocated array of
       
  1774 					aNumPages TPhysAddr elements.  On a succesful allocation it 
       
  1775 					will receive the physical addresses of each page allocated.
       
  1776 
       
  1777 @return	KErrNone if the allocation was successful.
       
  1778 		KErrNoMemory if the requested number of pages can't be allocated
       
  1779 
       
  1780 @pre Calling thread must be in a critical section.
       
  1781 @pre Interrupts must be enabled.
       
  1782 @pre Kernel must be unlocked.
       
  1783 @pre No fast mutex can be held.
       
  1784 @pre Call in a thread context.
       
  1785 @pre Can be used in a device driver.
       
  1786 */
       
  1787 EXPORT_C TInt Epoc::AllocPhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
       
  1788 	{
       
  1789 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Epoc::AllocPhysicalRam");
       
  1790 	MmuBase& m = *MmuBase::TheMmu;
       
  1791 	MmuBase::Wait();
       
  1792 	TInt r = m.AllocPhysicalRam(aNumPages, aPageList);
       
  1793 	if (r == KErrNone)
       
  1794 		{
       
  1795 		// For the sake of platform security we have to clear the memory. E.g. the driver
       
  1796 		// could assign it to a chunk visible to user side.
       
  1797 		m.ClearPages(aNumPages, aPageList);
       
  1798 
       
  1799 #ifdef BTRACE_KERNEL_MEMORY
       
  1800 		if (BTrace::CheckFilter(BTrace::EKernelMemory))
       
  1801 			{// Only loop round each page if EKernelMemory tracing is enabled
       
  1802 			TPhysAddr* pAddr = aPageList;
       
  1803 			TPhysAddr* pAddrEnd = aPageList + aNumPages;
       
  1804 			while (pAddr < pAddrEnd)
       
  1805 				{
       
  1806 				BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, KPageSize, *pAddr++);
       
  1807 				Epoc::DriverAllocdPhysRam += KPageSize;
       
  1808 				}
       
  1809 			}
       
  1810 #endif
       
  1811 		}
       
  1812 	MmuBase::Signal();
       
  1813 	return r;
       
  1814 	}
       
  1815 
       
  1816 
       
  1817 /**
       
  1818 Attempt to allocate discontiguous RAM pages from the specified zone.
       
  1819 
       
  1820 Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt
       
  1821 to allocate regardless of whether the other flags are set for the specified RAM zones 
       
  1822 or not.
       
  1823 
       
  1824 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
       
  1825 
       
  1826 @param 	aZoneId		The ID of the zone to attempt to allocate from.
       
  1827 @param	aNumPages	The number of discontiguous pages required to be allocated 
       
  1828 					from the specified zone.
       
  1829 @param	aPageList	This should be a pointer to a previously allocated array of
       
  1830 					aNumPages TPhysAddr elements.  On a succesful 
       
  1831 					allocation it will receive the physical addresses of each 
       
  1832 					page allocated.
       
  1833 @return	KErrNone if the allocation was successful.
       
  1834 		KErrNoMemory if the requested number of pages can't be allocated from the 
       
  1835 		specified zone.
       
  1836 		KErrArgument if a RAM zone of the specified ID can't be found or if the
       
  1837 		RAM zone has a total number of physical pages which is less than those 
       
  1838 		requested for the allocation.
       
  1839 
       
  1840 @pre Calling thread must be in a critical section.
       
  1841 @pre Interrupts must be enabled.
       
  1842 @pre Kernel must be unlocked.
       
  1843 @pre No fast mutex can be held.
       
  1844 @pre Call in a thread context.
       
  1845 @pre Can be used in a device driver.
       
  1846 */
       
  1847 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint aZoneId, TInt aNumPages, TPhysAddr* aPageList)
       
  1848 	{
       
  1849 	return ZoneAllocPhysicalRam(&aZoneId, 1, aNumPages, aPageList);
       
  1850 	}
       
  1851 
       
  1852 
       
  1853 /**
       
  1854 Attempt to allocate discontiguous RAM pages from the specified RAM zones.
       
  1855 The RAM pages will be allocated into the RAM zones in the order that they are specified 
       
  1856 in the aZoneIdList parameter, the RAM zone preferences will be ignored.
       
  1857 
       
  1858 Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt
       
  1859 to allocate regardless of whether the other flags are set for the specified RAM zones 
       
  1860 or not.
       
  1861 
       
  1862 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
       
  1863 
       
  1864 @param 	aZoneIdList	A pointer to an array of RAM zone IDs of the RAM zones to 
       
  1865 					attempt to allocate from.
       
  1866 @param	aZoneIdCount The number of RAM zone IDs pointed to by aZoneIdList.
       
  1867 @param	aNumPages	The number of discontiguous pages required to be allocated 
       
  1868 					from the specified zone.
       
  1869 @param	aPageList	This should be a pointer to a previously allocated array of
       
  1870 					aNumPages TPhysAddr elements.  On a succesful 
       
  1871 					allocation it will receive the physical addresses of each 
       
  1872 					page allocated.
       
  1873 @return	KErrNone if the allocation was successful.
       
  1874 		KErrNoMemory if the requested number of pages can't be allocated from the 
       
  1875 		specified zone.
       
  1876 		KErrArgument if a RAM zone of a specified ID can't be found or if the
       
  1877 		RAM zones have a total number of physical pages which is less than those 
       
  1878 		requested for the allocation.
       
  1879 
       
  1880 @pre Calling thread must be in a critical section.
       
  1881 @pre Interrupts must be enabled.
       
  1882 @pre Kernel must be unlocked.
       
  1883 @pre No fast mutex can be held.
       
  1884 @pre Call in a thread context.
       
  1885 @pre Can be used in a device driver.
       
  1886 */
       
  1887 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList)
       
  1888 	{
       
  1889 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Epoc::ZoneAllocPhysicalRam");
       
  1890 	MmuBase& m = *MmuBase::TheMmu;
       
  1891 	MmuBase::Wait();
       
  1892 	TInt r = m.ZoneAllocPhysicalRam(aZoneIdList, aZoneIdCount, aNumPages, aPageList);
       
  1893 	if (r == KErrNone)
       
  1894 		{
       
  1895 		// For the sake of platform security we have to clear the memory. E.g. the driver
       
  1896 		// could assign it to a chunk visible to user side.
       
  1897 		m.ClearPages(aNumPages, aPageList);
       
  1898 
       
  1899 #ifdef BTRACE_KERNEL_MEMORY
       
  1900 		if (BTrace::CheckFilter(BTrace::EKernelMemory))
       
  1901 			{// Only loop round each page if EKernelMemory tracing is enabled
       
  1902 			TPhysAddr* pAddr = aPageList;
       
  1903 			TPhysAddr* pAddrEnd = aPageList + aNumPages;
       
  1904 			while (pAddr < pAddrEnd)
       
  1905 				{
       
  1906 				BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, KPageSize, *pAddr++);
       
  1907 				Epoc::DriverAllocdPhysRam += KPageSize;
       
  1908 				}
       
  1909 			}
       
  1910 #endif
       
  1911 		}
       
  1912 	MmuBase::Signal();
       
  1913 	return r;
       
  1914 	}
       
  1915 
       
  1916 /**
       
  1917 Free a previously-allocated block of physically contiguous RAM.
       
  1918 
       
  1919 Specifying one of the following may cause the system to panic: 
       
  1920 a) an invalid physical RAM address.
       
  1921 b) valid physical RAM addresses where some had not been previously allocated.
       
  1922 c) an adrress not aligned to a page boundary.
       
  1923 
       
  1924 @param	aPhysAddr	The physical address of the base of the block to be freed.
       
  1925 					This must be the address returned by a previous call to
       
  1926 					Epoc::AllocPhysicalRam(), Epoc::ZoneAllocPhysicalRam(), 
       
  1927 					Epoc::ClaimPhysicalRam() or Epoc::ClaimRamZone().
       
  1928 @param	aSize		The size in bytes of the required block. The specified size
       
  1929 					is rounded up to the page size, since only whole pages of
       
  1930 					physical RAM can be allocated.
       
  1931 @return	KErrNone if the operation was successful.
       
  1932 
       
  1933 
       
  1934 
       
  1935 @pre Calling thread must be in a critical section.
       
  1936 @pre Interrupts must be enabled.
       
  1937 @pre Kernel must be unlocked.
       
  1938 @pre No fast mutex can be held.
       
  1939 @pre Call in a thread context.
       
  1940 @pre Can be used in a device driver.
       
  1941 */
       
  1942 EXPORT_C TInt Epoc::FreePhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
       
  1943 	{
       
  1944 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreePhysicalRam");
       
  1945 	MmuBase& m=*MmuBase::TheMmu;
       
  1946 	MmuBase::Wait();
       
  1947 	TInt r=m.FreePhysicalRam(aPhysAddr,aSize);
       
  1948 #ifdef BTRACE_KERNEL_MEMORY
       
  1949 	if (r == KErrNone)
       
  1950 		{
       
  1951 		TUint size = Kern::RoundToPageSize(aSize);
       
  1952 		BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysFree, size, aPhysAddr);
       
  1953 		Epoc::DriverAllocdPhysRam -= size;
       
  1954 		}
       
  1955 #endif
       
  1956 	MmuBase::Signal();
       
  1957 	return r;
       
  1958 	}
       
  1959 
       
  1960 
       
  1961 /**
       
  1962 Free a number of physical RAM pages that were previously allocated using
       
  1963 Epoc::AllocPhysicalRam() or Epoc::ZoneAllocPhysicalRam().
       
  1964 
       
  1965 Specifying one of the following may cause the system to panic: 
       
  1966 a) an invalid physical RAM address.
       
  1967 b) valid physical RAM addresses where some had not been previously allocated.
       
  1968 c) an adrress not aligned to a page boundary.
       
  1969 
       
  1970 @param	aNumPages	The number of pages to be freed.
       
  1971 @param	aPhysAddr	An array of aNumPages TPhysAddr elements.  Where each element
       
  1972 					should contain the physical address of each page to be freed.
       
  1973 					This must be the same set of addresses as those returned by a 
       
  1974 					previous call to Epoc::AllocPhysicalRam() or 
       
  1975 					Epoc::ZoneAllocPhysicalRam().
       
  1976 @return	KErrNone if the operation was successful.
       
  1977   
       
  1978 @pre Calling thread must be in a critical section.
       
  1979 @pre Interrupts must be enabled.
       
  1980 @pre Kernel must be unlocked.
       
  1981 @pre No fast mutex can be held.
       
  1982 @pre Call in a thread context.
       
  1983 @pre Can be used in a device driver.
       
  1984 		
       
  1985 */
       
  1986 EXPORT_C TInt Epoc::FreePhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
       
  1987 	{
       
  1988 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreePhysicalRam");
       
  1989 	MmuBase& m=*MmuBase::TheMmu;
       
  1990 	MmuBase::Wait();
       
  1991 	TInt r=m.FreePhysicalRam(aNumPages, aPageList);
       
  1992 #ifdef BTRACE_KERNEL_MEMORY
       
  1993 	if (r == KErrNone && BTrace::CheckFilter(BTrace::EKernelMemory))
       
  1994 		{// Only loop round each page if EKernelMemory tracing is enabled
       
  1995 		TPhysAddr* pAddr = aPageList;
       
  1996 		TPhysAddr* pAddrEnd = aPageList + aNumPages;
       
  1997 		while (pAddr < pAddrEnd)
       
  1998 			{
       
  1999 			BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysFree, KPageSize, *pAddr++);
       
  2000 			Epoc::DriverAllocdPhysRam -= KPageSize;
       
  2001 			}
       
  2002 		}
       
  2003 #endif
       
  2004 	MmuBase::Signal();
       
  2005 	return r;
       
  2006 	}
       
  2007 
       
  2008 
       
  2009 /**
       
  2010 Allocate a specific block of physically contiguous RAM, specified by physical
       
  2011 base address and size.
       
  2012 If and when the RAM is no longer required it should be freed using
       
  2013 Epoc::FreePhysicalRam()
       
  2014 
       
  2015 @param	aPhysAddr	The physical address of the base of the required block.
       
  2016 @param	aSize		The size in bytes of the required block. The specified size
       
  2017 					is rounded up to the page size, since only whole pages of
       
  2018 					physical RAM can be allocated.
       
  2019 @return	KErrNone if the operation was successful.
       
  2020 		KErrArgument if the range of physical addresses specified included some
       
  2021 					which are not valid physical RAM addresses.
       
  2022 		KErrInUse	if the range of physical addresses specified are all valid
       
  2023 					physical RAM addresses but some of them have already been
       
  2024 					allocated for other purposes.
       
  2025 @pre Calling thread must be in a critical section.
       
  2026 @pre Interrupts must be enabled.
       
  2027 @pre Kernel must be unlocked.
       
  2028 @pre No fast mutex can be held.
       
  2029 @pre Call in a thread context.
       
  2030 @pre Can be used in a device driver.
       
  2031 */
       
  2032 EXPORT_C TInt Epoc::ClaimPhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
       
  2033 	{
       
  2034 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::ClaimPhysicalRam");
       
  2035 	MmuBase& m=*MmuBase::TheMmu;
       
  2036 	MmuBase::Wait();
       
  2037 	TInt r=m.ClaimPhysicalRam(aPhysAddr,aSize);
       
  2038 #ifdef BTRACE_KERNEL_MEMORY
       
  2039 	if(r==KErrNone)
       
  2040 		{
       
  2041 		TUint32 pa=aPhysAddr;
       
  2042 		TUint32 size=aSize;
       
  2043 		m.RoundUpRangeToPageSize(pa,size);
       
  2044 		BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, size, pa);
       
  2045 		Epoc::DriverAllocdPhysRam += size;
       
  2046 		}
       
  2047 #endif
       
  2048 	MmuBase::Signal();
       
  2049 	return r;
       
  2050 	}
       
  2051 
       
  2052 
       
  2053 /**
       
  2054 Translate a virtual address to the corresponding physical address.
       
  2055 
       
  2056 @param	aLinAddr	The virtual address to be translated.
       
  2057 @return	The physical address corresponding to the given virtual address, or
       
  2058 		KPhysAddrInvalid if the specified virtual address is unmapped.
       
  2059 @pre Interrupts must be enabled.
       
  2060 @pre Kernel must be unlocked.
       
  2061 @pre Call in a thread context.
       
  2062 @pre Can be used in a device driver.
       
  2063 @pre Hold system lock if there is any possibility that the virtual address is
       
  2064 		unmapped, may become unmapped, or may be remapped during the operation.
       
  2065 	This will potentially be the case unless the virtual address refers to a
       
  2066 	hardware chunk or shared chunk under the control of the driver calling this
       
  2067 	function.
       
  2068 */
       
  2069 EXPORT_C TPhysAddr Epoc::LinearToPhysical(TLinAddr aLinAddr)
       
  2070 	{
       
  2071 //	This precondition is violated by various parts of the system under some conditions,
       
  2072 //	e.g. when __FLUSH_PT_INTO_RAM__ is defined. This function might also be called by
       
  2073 //	a higher-level RTOS for which these conditions are meaningless. Thus, it's been
       
  2074 //	disabled for now.
       
  2075 //	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"Epoc::LinearToPhysical");
       
  2076 	MmuBase& m=*MmuBase::TheMmu;
       
  2077 	TPhysAddr pa=m.LinearToPhysical(aLinAddr);
       
  2078 	return pa;
       
  2079 	}
       
  2080 
       
  2081 
       
  2082 EXPORT_C TInt TInternalRamDrive::MaxSize()
       
  2083 	{
       
  2084 	return TheSuperPage().iRamDriveSize+Kern::FreeRamInBytes();
       
  2085 	}
       
  2086 
       
  2087 
       
  2088 /******************************************************************************
       
  2089  * Address allocator
       
  2090  ******************************************************************************/
       
  2091 TLinearSection* TLinearSection::New(TLinAddr aBase, TLinAddr aEnd)
       
  2092 	{
       
  2093 	__KTRACE_OPT(KMMU,Kern::Printf("TLinearSection::New(%08x,%08x)", aBase, aEnd));
       
  2094 	MmuBase& m=*MmuBase::TheMmu;
       
  2095 	TUint npdes=(aEnd-aBase)>>m.iChunkShift;
       
  2096 	TInt nmapw=(npdes+31)>>5;
       
  2097 	TInt memsz=sizeof(TLinearSection)+(nmapw-1)*sizeof(TUint32);
       
  2098 	TLinearSection* p=(TLinearSection*)Kern::Alloc(memsz);
       
  2099 	if (p)
       
  2100 		{
       
  2101 		new(&p->iAllocator) TBitMapAllocator(npdes, ETrue);
       
  2102 		p->iBase=aBase;
       
  2103 		p->iEnd=aEnd;
       
  2104 		}
       
  2105 	__KTRACE_OPT(KMMU,Kern::Printf("TLinearSection at %08x", p));
       
  2106 	return p;
       
  2107 	}
       
  2108 
       
  2109 /******************************************************************************
       
  2110  * Address allocator for HW chunks
       
  2111  ******************************************************************************/
       
  2112 THwChunkPageTable::THwChunkPageTable(TInt aIndex, TInt aSize, TPde aPdePerm)
       
  2113 	:	THwChunkRegion(aIndex, 0, aPdePerm),
       
  2114 		iAllocator(aSize, ETrue)
       
  2115 	{
       
  2116 	}
       
  2117 
       
  2118 THwChunkPageTable* THwChunkPageTable::New(TInt aIndex, TPde aPdePerm)
       
  2119 	{
       
  2120 	__KTRACE_OPT(KMMU, Kern::Printf("THwChunkPageTable::New(%03x,%08x)",aIndex,aPdePerm));
       
  2121 	MmuBase& m=*MmuBase::TheMmu;
       
  2122 	TInt pdepages=m.iChunkSize>>m.iPageShift;
       
  2123 	TInt nmapw=(pdepages+31)>>5;
       
  2124 	TInt memsz=sizeof(THwChunkPageTable)+(nmapw-1)*sizeof(TUint32);
       
  2125 	THwChunkPageTable* p=(THwChunkPageTable*)Kern::Alloc(memsz);
       
  2126 	if (p)
       
  2127 		new (p) THwChunkPageTable(aIndex, pdepages, aPdePerm);
       
  2128 	__KTRACE_OPT(KMMU, Kern::Printf("THwChunkPageTable at %08x",p));
       
  2129 	return p;
       
  2130 	}
       
  2131 
       
  2132 THwChunkAddressAllocator::THwChunkAddressAllocator()
       
  2133 	{
       
  2134 	}
       
  2135 
       
  2136 THwChunkAddressAllocator* THwChunkAddressAllocator::New(TInt aAlign, TLinearSection* aSection)
       
  2137 	{
       
  2138 	__KTRACE_OPT(KMMU, Kern::Printf("THwChunkAddressAllocator::New(%d,%08x)",aAlign,aSection));
       
  2139 	THwChunkAddressAllocator* p=new THwChunkAddressAllocator;
       
  2140 	if (p)
       
  2141 		{
       
  2142 		p->iAlign=aAlign;
       
  2143 		p->iSection=aSection;
       
  2144 		}
       
  2145 	__KTRACE_OPT(KMMU, Kern::Printf("THwChunkAddressAllocator at %08x",p));
       
  2146 	return p;
       
  2147 	}
       
  2148 
       
  2149 THwChunkRegion* THwChunkAddressAllocator::NewRegion(TInt aIndex, TInt aSize, TPde aPdePerm)
       
  2150 	{
       
  2151 	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewRegion(index=%x, size=%x, pde=%08x)",aIndex,aSize,aPdePerm));
       
  2152 	THwChunkRegion* p=new THwChunkRegion(aIndex, aSize, aPdePerm);
       
  2153 	if (p)
       
  2154 		{
       
  2155 		TInt r=InsertInOrder(p, Order);
       
  2156 		__KTRACE_OPT(KMMU, Kern::Printf("p=%08x, insert ret %d",p,r));
       
  2157 		if (r<0)
       
  2158 			delete p, p=NULL;
       
  2159 		}
       
  2160 	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewRegion ret %08x)",p));
       
  2161 	return p;
       
  2162 	}
       
  2163 
       
  2164 THwChunkPageTable* THwChunkAddressAllocator::NewPageTable(TInt aIndex, TPde aPdePerm, TInt aInitB, TInt aInitC)
       
  2165 	{
       
  2166 	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewPageTable(index=%x, pde=%08x, iB=%d, iC=%d)",aIndex,aPdePerm,aInitB,aInitC));
       
  2167 	THwChunkPageTable* p=THwChunkPageTable::New(aIndex, aPdePerm);
       
  2168 	if (p)
       
  2169 		{
       
  2170 		TInt r=InsertInOrder(p, Order);
       
  2171 		__KTRACE_OPT(KMMU, Kern::Printf("p=%08x, insert ret %d",p,r));
       
  2172 		if (r<0)
       
  2173 			delete p, p=NULL;
       
  2174 		else
       
  2175 			p->iAllocator.Alloc(aInitB, aInitC);
       
  2176 		}
       
  2177 	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewPageTable ret %08x)",p));
       
  2178 	return p;
       
  2179 	}
       
  2180 
       
  2181 TLinAddr THwChunkAddressAllocator::SearchExisting(TInt aNumPages, TInt aPageAlign, TInt aPageOffset, TPde aPdePerm)
       
  2182 	{
       
  2183 	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::SrchEx np=%03x align=%d offset=%03x pdeperm=%08x",
       
  2184 				aNumPages, aPageAlign, aPageOffset, aPdePerm));
       
  2185 	TInt c=Count();
       
  2186 	if (c==0)
       
  2187 		return 0;	// don't try to access [0] if array empty!
       
  2188 	THwChunkPageTable** pp=(THwChunkPageTable**)&(*this)[0];
       
  2189 	THwChunkPageTable** ppE=pp+c;
       
  2190 	while(pp<ppE)
       
  2191 		{
       
  2192 		THwChunkPageTable* p=*pp++;
       
  2193 		if (p->iRegionSize!=0 || p->iPdePerm!=aPdePerm)
       
  2194 			continue;	// if not page table or PDE permissions wrong, we can't use it
       
  2195 		TInt r=p->iAllocator.AllocAligned(aNumPages, aPageAlign, -aPageOffset, EFalse);
       
  2196 		__KTRACE_OPT(KMMU, Kern::Printf("r=%d", r));
       
  2197 		if (r<0)
       
  2198 			continue;	// not enough space in this page table
       
  2199 		
       
  2200 		// got enough space in existing page table, so use it
       
  2201 		p->iAllocator.Alloc(r, aNumPages);
       
  2202 		MmuBase& m=*MmuBase::TheMmu;
       
  2203 		TLinAddr a = iSection->iBase + (TLinAddr(p->iIndex)<<m.iChunkShift) + (r<<m.iPageShift);
       
  2204 		__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::SrchEx OK, returning %08x", a));
       
  2205 		return a;
       
  2206 		}
       
  2207 	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::SrchEx not found"));
       
  2208 	return 0;
       
  2209 	}
       
  2210 
       
  2211 TLinAddr THwChunkAddressAllocator::Alloc(TInt aSize, TInt aAlign, TInt aOffset, TPde aPdePerm)
       
  2212 	{
       
  2213 	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Alloc size=%08x align=%d offset=%08x pdeperm=%08x",
       
  2214 				aSize, aAlign, aOffset, aPdePerm));
       
  2215 	MmuBase& m=*MmuBase::TheMmu;
       
  2216 	TInt npages=(aSize+m.iPageMask)>>m.iPageShift;
       
  2217 	TInt align=Max(aAlign,iAlign);
       
  2218 	if (align>m.iChunkShift)
       
  2219 		return 0;
       
  2220 	TInt aligns=1<<align;
       
  2221 	TInt alignm=aligns-1;
       
  2222 	TInt offset=(aOffset&alignm)>>m.iPageShift;
       
  2223 	TInt pdepages=m.iChunkSize>>m.iPageShift;
       
  2224 	TInt pdepageshift=m.iChunkShift-m.iPageShift;
       
  2225 	MmuBase::WaitHwChunk();
       
  2226 	if (npages<pdepages)
       
  2227 		{
       
  2228 		// for small regions, first try to share an existing page table
       
  2229 		TLinAddr a=SearchExisting(npages, align-m.iPageShift, offset, aPdePerm);
       
  2230 		if (a)
       
  2231 			{
       
  2232 			MmuBase::SignalHwChunk();
       
  2233 			return a;
       
  2234 			}
       
  2235 		}
       
  2236 
       
  2237 	// large region or no free space in existing page tables - allocate whole PDEs
       
  2238 	TInt npdes=(npages+offset+pdepages-1)>>pdepageshift;
       
  2239 	__KTRACE_OPT(KMMU, Kern::Printf("Allocate %d PDEs", npdes));
       
  2240 	MmuBase::Wait();
       
  2241 	TInt ix=iSection->iAllocator.AllocConsecutive(npdes, EFalse);
       
  2242 	if (ix>=0)
       
  2243 		iSection->iAllocator.Alloc(ix, npdes);
       
  2244 	MmuBase::Signal();
       
  2245 	TLinAddr a=0;
       
  2246 	if (ix>=0)
       
  2247 		a = iSection->iBase + (TLinAddr(ix)<<m.iChunkShift) + (TLinAddr(offset)<<m.iPageShift);
       
  2248 
       
  2249 	// Create bitmaps for each page table and placeholders for section blocks.
       
  2250 	// We only create a bitmap for the first and last PDE and then only if they are not
       
  2251 	// fully occupied by this request
       
  2252 	THwChunkPageTable* first=NULL;
       
  2253 	THwChunkRegion* middle=NULL;
       
  2254 	TInt remain=npages;
       
  2255 	TInt nix=ix;
       
  2256 	if (a && (offset || npages<pdepages))
       
  2257 		{
       
  2258 		// first PDE is bitmap
       
  2259 		TInt first_count = Min(remain, pdepages-offset);
       
  2260 		first=NewPageTable(nix, aPdePerm, offset, first_count);
       
  2261 		++nix;
       
  2262 		remain -= first_count;
       
  2263 		if (!first)
       
  2264 			a=0;
       
  2265 		}
       
  2266 	if (a && remain>=pdepages)
       
  2267 		{
       
  2268 		// next need whole-PDE-block placeholder
       
  2269 		TInt whole_pdes=remain>>pdepageshift;
       
  2270 		middle=NewRegion(nix, whole_pdes, aPdePerm);
       
  2271 		nix+=whole_pdes;
       
  2272 		remain-=(whole_pdes<<pdepageshift);
       
  2273 		if (!middle)
       
  2274 			a=0;
       
  2275 		}
       
  2276 	if (a && remain)
       
  2277 		{
       
  2278 		// need final bitmap section
       
  2279 		if (!NewPageTable(nix, aPdePerm, 0, remain))
       
  2280 			a=0;
       
  2281 		}
       
  2282 	if (!a)
       
  2283 		{
       
  2284 		// alloc failed somewhere - free anything we did create
       
  2285 		if (middle)
       
  2286 			Discard(middle);
       
  2287 		if (first)
       
  2288 			Discard(first);
       
  2289 		if (ix>=0)
       
  2290 			{
       
  2291 			MmuBase::Wait();
       
  2292 			iSection->iAllocator.Free(ix, npdes);
       
  2293 			MmuBase::Signal();
       
  2294 			}
       
  2295 		}
       
  2296 	MmuBase::SignalHwChunk();
       
  2297 	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Alloc returns %08x", a));
       
  2298 	return a;
       
  2299 	}
       
  2300 
       
  2301 void THwChunkAddressAllocator::Discard(THwChunkRegion* aRegion)
       
  2302 	{
       
  2303 	// remove a region from the array and destroy it
       
  2304 	TInt r=FindInOrder(aRegion, Order);
       
  2305 	if (r>=0)
       
  2306 		Remove(r);
       
  2307 	Kern::Free(aRegion);
       
  2308 	}
       
  2309 
       
  2310 TInt THwChunkAddressAllocator::Order(const THwChunkRegion& a1, const THwChunkRegion& a2)
       
  2311 	{
       
  2312 	// order two regions by address
       
  2313 	return a1.iIndex-a2.iIndex;
       
  2314 	}
       
  2315 
       
  2316 THwChunkRegion* THwChunkAddressAllocator::Free(TLinAddr aAddr, TInt aSize)
       
  2317 	{
       
  2318 	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Free addr=%08x size=%08x", aAddr, aSize));
       
  2319 	__ASSERT_ALWAYS(aAddr>=iSection->iBase && (aAddr+aSize)<=iSection->iEnd,
       
  2320 										MmuBase::Panic(MmuBase::EFreeHwChunkAddrInvalid));
       
  2321 	THwChunkRegion* list=NULL;
       
  2322 	MmuBase& m=*MmuBase::TheMmu;
       
  2323 	TInt ix=(aAddr - iSection->iBase)>>m.iChunkShift;
       
  2324 	TInt remain=(aSize+m.iPageMask)>>m.iPageShift;
       
  2325 	TInt pdepageshift=m.iChunkShift-m.iPageShift;
       
  2326 	TInt offset=(aAddr&m.iChunkMask)>>m.iPageShift;
       
  2327 	THwChunkRegion find(ix, 0, 0);
       
  2328 	MmuBase::WaitHwChunk();
       
  2329 	TInt r=FindInOrder(&find, Order);
       
  2330 	__ASSERT_ALWAYS(r>=0, MmuBase::Panic(MmuBase::EFreeHwChunkAddrInvalid));
       
  2331 	while (remain)
       
  2332 		{
       
  2333 		THwChunkPageTable* p=(THwChunkPageTable*)(*this)[r];
       
  2334 		__ASSERT_ALWAYS(p->iIndex==ix, MmuBase::Panic(MmuBase::EFreeHwChunkIndexInvalid));
       
  2335 		if (p->iRegionSize)
       
  2336 			{
       
  2337 			// multiple-whole-PDE region
       
  2338 			TInt rsz=p->iRegionSize;
       
  2339 			remain-=(rsz<<pdepageshift);
       
  2340 			Remove(r);	// r now indexes following array entry
       
  2341 			ix+=rsz;
       
  2342 			}
       
  2343 		else
       
  2344 			{
       
  2345 			// bitmap region
       
  2346 			TInt n=Min(remain, (1<<pdepageshift)-offset);
       
  2347 			p->iAllocator.Free(offset, n);
       
  2348 			remain-=n;
       
  2349 			++ix;
       
  2350 			if (p->iAllocator.iAvail < p->iAllocator.iSize)
       
  2351 				{
       
  2352 				// bitmap still in use
       
  2353 				offset=0;
       
  2354 				++r;	// r indexes following array entry
       
  2355 				continue;
       
  2356 				}
       
  2357 			Remove(r);	// r now indexes following array entry
       
  2358 			}
       
  2359 		offset=0;
       
  2360 		p->iNext=list;
       
  2361 		list=p;			// chain free region descriptors together
       
  2362 		}
       
  2363 	MmuBase::SignalHwChunk();
       
  2364 	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Free returns %08x", list));
       
  2365 	return list;
       
  2366 	}
       
  2367 
       
  2368 /********************************************
       
  2369  * Hardware chunk abstraction
       
  2370  ********************************************/
       
  2371 THwChunkAddressAllocator* MmuBase::MappingRegion(TUint)
       
  2372 	{
       
  2373 	return iHwChunkAllocator;
       
  2374 	}
       
  2375 
       
  2376 TInt MmuBase::AllocateAllPageTables(TLinAddr aLinAddr, TInt aSize, TPde aPdePerm, TInt aMapShift, SPageTableInfo::TAttribs aAttrib)
       
  2377 	{
       
  2378 	__KTRACE_OPT(KMMU,Kern::Printf("AllocateAllPageTables lin=%08x, size=%x, pde=%08x, mapshift=%d attribs=%d",
       
  2379 																aLinAddr, aSize, aPdePerm, aMapShift, aAttrib));
       
  2380 	TInt offset=aLinAddr&iChunkMask;
       
  2381 	TInt remain=aSize;
       
  2382 	TLinAddr a=aLinAddr&~iChunkMask;
       
  2383 	TInt newpts=0;
       
  2384 	for (; remain>0; a+=iChunkSize)
       
  2385 		{
       
  2386 		// don't need page table if a whole PDE mapping is permitted here
       
  2387 		if (aMapShift<iChunkShift || offset || remain<iChunkSize)
       
  2388 			{
       
  2389 			// need to check for a page table at a
       
  2390 			TInt id=PageTableId(a);
       
  2391 			if (id<0)
       
  2392 				{
       
  2393 				// no page table - must allocate one
       
  2394 				id = AllocPageTable();
       
  2395 				if (id<0)
       
  2396 					break;
       
  2397 				// got page table, assign it
       
  2398 				// AssignPageTable(TInt aId, TInt aUsage, TAny* aObject, TLinAddr aAddr, TPde aPdePerm)
       
  2399 				AssignPageTable(id, aAttrib, NULL, a, aPdePerm);
       
  2400 				++newpts;
       
  2401 				}
       
  2402 			}
       
  2403 		remain -= (iChunkSize-offset);
       
  2404 		offset=0;
       
  2405 		}
       
  2406 	if (remain<=0)
       
  2407 		return KErrNone;	// completed OK
       
  2408 
       
  2409 	// ran out of memory somewhere - free page tables which were allocated
       
  2410 	for (; newpts; --newpts)
       
  2411 		{
       
  2412 		a-=iChunkSize;
       
  2413 		TInt id=UnassignPageTable(a);
       
  2414 		FreePageTable(id);
       
  2415 		}
       
  2416 	return KErrNoMemory;
       
  2417 	}
       
  2418 
       
  2419 
       
  2420 /**
       
  2421 Create a hardware chunk object mapping a specified block of physical addresses
       
  2422 with specified access permissions and cache policy.
       
  2423 
       
  2424 When the mapping is no longer required, close the chunk using chunk->Close(0);
       
  2425 Note that closing a chunk does not free any RAM pages which were mapped by the
       
  2426 chunk - these must be freed separately using Epoc::FreePhysicalRam().
       
  2427 
       
  2428 @param	aChunk	Upon successful completion this parameter receives a pointer to
       
  2429 				the newly created chunk. Upon unsuccessful completion it is
       
  2430 				written with a NULL pointer. The virtual address of the mapping
       
  2431 				can subsequently be discovered using the LinearAddress()
       
  2432 				function on the chunk.
       
  2433 @param	aAddr	The base address of the physical region to be mapped. This will
       
  2434 				be rounded down to a multiple of the hardware page size before
       
  2435 				being used.
       
  2436 @param	aSize	The size of the physical address region to be mapped. This will
       
  2437 				be rounded up to a multiple of the hardware page size before
       
  2438 				being used; the rounding is such that the entire range from
       
  2439 				aAddr to aAddr+aSize-1 inclusive is mapped. For example if
       
  2440 				aAddr=0xB0001FFF, aSize=2 and the hardware page size is 4KB, an
       
  2441 				8KB range of physical addresses from 0xB0001000 to 0xB0002FFF
       
  2442 				inclusive will be mapped.
       
  2443 @param	aMapAttr Mapping attributes required for the mapping. This is formed
       
  2444 				by ORing together values from the TMappingAttributes enumeration
       
  2445 				to specify the access permissions and caching policy.
       
  2446 
       
  2447 @pre Calling thread must be in a critical section.
       
  2448 @pre Interrupts must be enabled.
       
  2449 @pre Kernel must be unlocked.
       
  2450 @pre No fast mutex can be held.
       
  2451 @pre Call in a thread context.
       
  2452 @pre Can be used in a device driver.
       
  2453 @see TMappingAttributes
       
  2454 */
       
  2455 EXPORT_C TInt DPlatChunkHw::New(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aMapAttr)
       
  2456 	{
       
  2457 	if (aAddr == KPhysAddrInvalid)
       
  2458 		return KErrNotSupported;
       
  2459 	return DoNew(aChunk, aAddr, aSize, aMapAttr);
       
  2460 	}
       
  2461 
       
  2462 TInt DPlatChunkHw::DoNew(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aMapAttr)
       
  2463 	{
       
  2464 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DPlatChunkHw::New");
       
  2465 	__KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw::New phys=%08x, size=%x, attribs=%x",aAddr,aSize,aMapAttr));
       
  2466 	if (aSize<=0)
       
  2467 		return KErrArgument;
       
  2468 	MmuBase& m=*MmuBase::TheMmu;
       
  2469 	aChunk=NULL;
       
  2470 	TPhysAddr pa=aAddr!=KPhysAddrInvalid ? aAddr&~m.iPageMask : 0;
       
  2471 	TInt size=((aAddr+aSize+m.iPageMask)&~m.iPageMask)-pa;
       
  2472 	__KTRACE_OPT(KMMU,Kern::Printf("Rounded %08x+%x", pa, size));
       
  2473 	DMemModelChunkHw* pC=new DMemModelChunkHw;
       
  2474 	if (!pC)
       
  2475 		return KErrNoMemory;
       
  2476 	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunkHw created at %08x",pC));
       
  2477 	pC->iPhysAddr=aAddr;
       
  2478 	pC->iSize=size;
       
  2479 	TUint mapattr=aMapAttr;
       
  2480 	TPde pdePerm=0;
       
  2481 	TPte ptePerm=0;
       
  2482 	TInt r=m.PdePtePermissions(mapattr, pdePerm, ptePerm);
       
  2483 	if (r==KErrNone)
       
  2484 		{
       
  2485 		pC->iAllocator=m.MappingRegion(mapattr);
       
  2486 		pC->iAttribs=mapattr;	// save actual mapping attributes
       
  2487 		r=pC->AllocateLinearAddress(pdePerm);
       
  2488 		if (r>=0)
       
  2489 			{
       
  2490 			TInt map_shift=r;
       
  2491 			MmuBase::Wait();
       
  2492 			r=m.AllocateAllPageTables(pC->iLinAddr, size, pdePerm, map_shift, SPageTableInfo::EGlobal);
       
  2493 			if (r==KErrNone && aAddr!=KPhysAddrInvalid)
       
  2494 				m.Map(pC->iLinAddr, pa, size, pdePerm, ptePerm, map_shift);
       
  2495 			MmuBase::Signal();
       
  2496 			}
       
  2497 		}
       
  2498 	if (r==KErrNone)
       
  2499 		aChunk=pC;
       
  2500 	else
       
  2501 		pC->Close(NULL);
       
  2502 	return r;
       
  2503 	}
       
  2504 
       
  2505 TInt DMemModelChunkHw::AllocateLinearAddress(TPde aPdePerm)
       
  2506 	{
       
  2507 	__KTRACE_OPT(KMMU, Kern::Printf("DMemModelChunkHw::AllocateLinearAddress(%08x)", aPdePerm));
       
  2508 	__KTRACE_OPT(KMMU, Kern::Printf("iAllocator=%08x iPhysAddr=%08x iSize=%08x", iAllocator, iPhysAddr, iSize));
       
  2509 	MmuBase& m=*MmuBase::TheMmu;
       
  2510 	TInt map_shift = (iPhysAddr<0xffffffffu) ? 30 : m.iPageShift;
       
  2511 	for (; map_shift>=m.iPageShift; --map_shift)
       
  2512 		{
       
  2513 		TUint32 map_size = 1<<map_shift;
       
  2514 		TUint32 map_mask = map_size-1;
       
  2515 		if (!(m.iMapSizes & map_size))
       
  2516 			continue;	// map_size is not supported on this hardware
       
  2517 		TPhysAddr base = (iPhysAddr+map_mask) &~ map_mask;	// base rounded up
       
  2518 		TPhysAddr end = (iPhysAddr+iSize)&~map_mask;		// end rounded down
       
  2519 		if ((base-end)<0x80000000u && map_shift>m.iPageShift)
       
  2520 			continue;	// region not big enough to use this mapping size
       
  2521 		__KTRACE_OPT(KMMU, Kern::Printf("Try map size %08x", map_size));
       
  2522 		iLinAddr=iAllocator->Alloc(iSize, map_shift, iPhysAddr, aPdePerm);
       
  2523 		if (iLinAddr)
       
  2524 			break;		// done
       
  2525 		}
       
  2526 	TInt r=iLinAddr ? map_shift : KErrNoMemory;
       
  2527 	__KTRACE_OPT(KMMU, Kern::Printf("iLinAddr=%08x, returning %d", iLinAddr, r));
       
  2528 	return r;
       
  2529 	}
       
  2530 
       
  2531 void DMemModelChunkHw::DeallocateLinearAddress()
       
  2532 	{
       
  2533 	__KTRACE_OPT(KMMU, Kern::Printf("DMemModelChunkHw::DeallocateLinearAddress %O", this));
       
  2534 	MmuBase& m=*MmuBase::TheMmu;
       
  2535 	MmuBase::WaitHwChunk();
       
  2536 	THwChunkRegion* rgn=iAllocator->Free(iLinAddr, iSize);
       
  2537 	iLinAddr=0;
       
  2538 	MmuBase::SignalHwChunk();
       
  2539 	TLinAddr base = iAllocator->iSection->iBase;
       
  2540 	TBitMapAllocator& section_allocator = iAllocator->iSection->iAllocator;
       
  2541 	while (rgn)
       
  2542 		{
       
  2543 		MmuBase::Wait();
       
  2544 		if (rgn->iRegionSize)
       
  2545 			{
       
  2546 			// free address range
       
  2547 			__KTRACE_OPT(KMMU, Kern::Printf("Freeing range %03x+%03x", rgn->iIndex, rgn->iRegionSize));
       
  2548 			section_allocator.Free(rgn->iIndex, rgn->iRegionSize);
       
  2549 			
       
  2550 			// Though this is large region, it still can be made up of page tables (not sections).
       
  2551 			// Check each chunk and remove tables in neccessary
       
  2552 			TInt i = 0;
       
  2553 			TLinAddr a = base + (TLinAddr(rgn->iIndex)<<m.iChunkShift);
       
  2554 			for (; i<rgn->iRegionSize ; i++,a+=m.iChunkSize)
       
  2555 				{
       
  2556 				TInt id = m.UnassignPageTable(a);
       
  2557 				if (id>=0)
       
  2558 					m.FreePageTable(id);
       
  2559 				}
       
  2560 			}
       
  2561 		else
       
  2562 			{
       
  2563 			// free address and page table if it exists
       
  2564 			__KTRACE_OPT(KMMU, Kern::Printf("Freeing index %03x", rgn->iIndex));
       
  2565 			section_allocator.Free(rgn->iIndex);
       
  2566 			TLinAddr a = base + (TLinAddr(rgn->iIndex)<<m.iChunkShift);
       
  2567 			TInt id = m.UnassignPageTable(a);
       
  2568 			if (id>=0)
       
  2569 				m.FreePageTable(id);
       
  2570 			}
       
  2571 		MmuBase::Signal();
       
  2572 		THwChunkRegion* free=rgn;
       
  2573 		rgn=rgn->iNext;
       
  2574 		Kern::Free(free);
       
  2575 		}
       
  2576 	}
       
  2577 
       
  2578 
       
  2579 //
       
  2580 // RamCacheBase
       
  2581 //
       
  2582 
       
  2583 
       
  2584 RamCacheBase* RamCacheBase::TheRamCache = NULL;
       
  2585 
       
  2586 
       
  2587 RamCacheBase::RamCacheBase()
       
  2588 	{
       
  2589 	}
       
  2590 
       
  2591 
       
  2592 void RamCacheBase::Init2()
       
  2593 	{
       
  2594 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">RamCacheBase::Init2"));
       
  2595 	iMmu = MmuBase::TheMmu;
       
  2596 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<RamCacheBase::Init2"));
       
  2597 	}
       
  2598 
       
  2599 
       
  2600 void RamCacheBase::ReturnToSystem(SPageInfo* aPageInfo)
       
  2601 	{
       
  2602 	__ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
       
  2603 	__ASSERT_SYSTEM_LOCK;
       
  2604 	aPageInfo->SetUnused();
       
  2605 	--iNumberOfFreePages;
       
  2606 	__NK_ASSERT_DEBUG(iNumberOfFreePages>=0);
       
  2607 	// Release system lock before using the RAM allocator.
       
  2608 	NKern::UnlockSystem();
       
  2609 	iMmu->iRamPageAllocator->FreeRamPage(aPageInfo->PhysAddr(), EPageDiscard);
       
  2610 	NKern::LockSystem();
       
  2611 	}
       
  2612 
       
  2613 
       
  2614 SPageInfo* RamCacheBase::GetPageFromSystem(TUint aBlockedZoneId, TBool aBlockRest)
       
  2615 	{
       
  2616 	__ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
       
  2617 	SPageInfo* pageInfo;
       
  2618 	TPhysAddr pagePhys;
       
  2619 	TInt r = iMmu->iRamPageAllocator->AllocRamPages(&pagePhys,1, EPageDiscard, aBlockedZoneId, aBlockRest);
       
  2620 	if(r==KErrNone)
       
  2621 		{
       
  2622 		NKern::LockSystem();
       
  2623 		pageInfo = SPageInfo::FromPhysAddr(pagePhys);
       
  2624 		pageInfo->Change(SPageInfo::EPagedFree,SPageInfo::EStatePagedDead);
       
  2625 		++iNumberOfFreePages;
       
  2626 		NKern::UnlockSystem();
       
  2627 		}
       
  2628 	else
       
  2629 		pageInfo = NULL;
       
  2630 	return pageInfo;
       
  2631 	}
       
  2632 
       
  2633 
       
  2634 //
       
  2635 // RamCache
       
  2636 //
       
  2637 
       
  2638 
       
  2639 void RamCache::Init2()
       
  2640 	{
       
  2641 	__KTRACE_OPT(KBOOT,Kern::Printf(">RamCache::Init2"));
       
  2642 	RamCacheBase::Init2();
       
  2643 	__KTRACE_OPT(KBOOT,Kern::Printf("<RamCache::Init2"));
       
  2644 	}
       
  2645 
       
  2646 
       
  2647 TInt RamCache::Init3()
       
  2648 	{
       
  2649 	return KErrNone;
       
  2650 	}
       
  2651 
       
  2652 void RamCache::RemovePage(SPageInfo& aPageInfo)
       
  2653 	{
       
  2654 	__NK_ASSERT_DEBUG(aPageInfo.Type() == SPageInfo::EPagedCache);
       
  2655 	__NK_ASSERT_DEBUG(aPageInfo.State() == SPageInfo::EStatePagedYoung);
       
  2656 	aPageInfo.iLink.Deque();
       
  2657 	aPageInfo.SetState(SPageInfo::EStatePagedDead);
       
  2658 	}
       
  2659 
       
  2660 TBool RamCache::GetFreePages(TInt aNumPages)
       
  2661 	{
       
  2662 	__KTRACE_OPT(KPAGING,Kern::Printf("DP: >GetFreePages %d",aNumPages));
       
  2663 	NKern::LockSystem();
       
  2664 
       
  2665 	while(aNumPages>0 && NumberOfFreePages()>=aNumPages)
       
  2666 		{
       
  2667 		// steal a page from cache list and return it to the free pool...
       
  2668 		SPageInfo* pageInfo = SPageInfo::FromLink(iPageList.First()->Deque());
       
  2669 		pageInfo->SetState(SPageInfo::EStatePagedDead);
       
  2670 		SetFree(pageInfo);
       
  2671 		ReturnToSystem(pageInfo);
       
  2672 		--aNumPages;
       
  2673 		}
       
  2674 
       
  2675 	NKern::UnlockSystem();
       
  2676 	__KTRACE_OPT(KPAGING,Kern::Printf("DP: <GetFreePages %d",!aNumPages));
       
  2677 	return !aNumPages;
       
  2678 	}
       
  2679 
       
  2680 
       
  2681 void RamCache::DonateRamCachePage(SPageInfo* aPageInfo)
       
  2682 	{
       
  2683 	SPageInfo::TType type = aPageInfo->Type();
       
  2684 	if(type==SPageInfo::EChunk)
       
  2685 		{
       
  2686 		//Must not donate locked page. An example is DMA trasferred memory.
       
  2687 		__NK_ASSERT_DEBUG(0 == aPageInfo->LockCount());
       
  2688 
       
  2689 		aPageInfo->Change(SPageInfo::EPagedCache,SPageInfo::EStatePagedYoung);
       
  2690 		iPageList.Add(&aPageInfo->iLink);
       
  2691 		++iNumberOfFreePages;
       
  2692 		// Update ram allocator counts as this page has changed its type
       
  2693 		DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
       
  2694 		iMmu->iRamPageAllocator->ChangePageType(aPageInfo, chunk->GetPageType(), EPageDiscard);
       
  2695 
       
  2696 #ifdef BTRACE_PAGING
       
  2697 		BTraceContext8(BTrace::EPaging, BTrace::EPagingChunkDonatePage, chunk, aPageInfo->Offset());
       
  2698 #endif
       
  2699 		return;
       
  2700 		}
       
  2701 	// allow already donated pages...
       
  2702 	__NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache);
       
  2703 	}
       
  2704 
       
  2705 
       
  2706 TBool RamCache::ReclaimRamCachePage(SPageInfo* aPageInfo)
       
  2707 	{
       
  2708 	SPageInfo::TType type = aPageInfo->Type();
       
  2709 //	Kern::Printf("DemandPaging::ReclaimRamCachePage %x %d free=%d",aPageInfo,type,iNumberOfFreePages);
       
  2710 
       
  2711 	if(type==SPageInfo::EChunk)
       
  2712 		return ETrue; // page already reclaimed
       
  2713 
       
  2714 	__NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache);
       
  2715 	__NK_ASSERT_DEBUG(aPageInfo->State()==SPageInfo::EStatePagedYoung);
       
  2716 	// Update ram allocator counts as this page has changed its type
       
  2717 	DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
       
  2718 	iMmu->iRamPageAllocator->ChangePageType(aPageInfo, EPageDiscard, chunk->GetPageType());
       
  2719 	aPageInfo->iLink.Deque();
       
  2720 	--iNumberOfFreePages;
       
  2721 	aPageInfo->Change(SPageInfo::EChunk,SPageInfo::EStateNormal);
       
  2722 
       
  2723 #ifdef BTRACE_PAGING
       
  2724 	BTraceContext8(BTrace::EPaging, BTrace::EPagingChunkReclaimPage, chunk, aPageInfo->Offset());
       
  2725 #endif
       
  2726 	return ETrue;
       
  2727 	}
       
  2728 
       
  2729 
       
  2730 /**
       
  2731 Discard the specified page.
       
  2732 Should only be called on a page if a previous call to IsPageDiscardable()
       
  2733 returned ETrue and the system lock hasn't been released between the calls.
       
  2734 
       
  2735 @param aPageInfo The page info of the page to be discarded
       
  2736 @param aBlockedZoneId Not used by this overload.
       
  2737 @param aBlockRest Not used by this overload. 
       
  2738 @return ETrue if page succesfully discarded
       
  2739 
       
  2740 @pre System lock held.
       
  2741 @post System lock held.
       
  2742 */
       
  2743 TBool RamCache::DoDiscardPage(SPageInfo& aPageInfo, TUint aBlockedZoneId, TBool aBlockRest)
       
  2744 	{
       
  2745 	__NK_ASSERT_DEBUG(iNumberOfFreePages > 0);
       
  2746 	RemovePage(aPageInfo);
       
  2747 	SetFree(&aPageInfo);
       
  2748 	ReturnToSystem(&aPageInfo);
       
  2749 	return ETrue;
       
  2750 	}
       
  2751 
       
  2752 
       
  2753 /**
       
  2754 First stage in discarding a list of pages.
       
  2755 
       
  2756 Must ensure that the pages will still be discardable even if system lock is released.
       
  2757 To be used in conjunction with RamCacheBase::DoDiscardPages1().
       
  2758 
       
  2759 @param aPageList A NULL terminated list of the pages to be discarded
       
  2760 @return KErrNone on success.
       
  2761 
       
  2762 @pre System lock held
       
  2763 @post System lock held
       
  2764 */
       
  2765 TInt RamCache::DoDiscardPages0(SPageInfo** aPageList)
       
  2766 	{
       
  2767 	__ASSERT_SYSTEM_LOCK;
       
  2768 
       
  2769 	SPageInfo* pageInfo;
       
  2770 	while((pageInfo = *aPageList++) != 0)
       
  2771 		{
       
  2772 		RemovePage(*pageInfo);
       
  2773 		}
       
  2774 	return KErrNone;
       
  2775 	}
       
  2776 
       
  2777 
       
  2778 /**
       
  2779 Final stage in discarding a list of page
       
  2780 Finish discarding the pages previously removed by RamCacheBase::DoDiscardPages0().
       
  2781 This overload doesn't actually need to do anything.
       
  2782 
       
  2783 @param aPageList A NULL terminated list of the pages to be discarded
       
  2784 @return KErrNone on success.
       
  2785 
       
  2786 @pre System lock held
       
  2787 @post System lock held
       
  2788 */
       
  2789 TInt RamCache::DoDiscardPages1(SPageInfo** aPageList)
       
  2790 	{
       
  2791 	__ASSERT_SYSTEM_LOCK;
       
  2792 	SPageInfo* pageInfo;
       
  2793 	while((pageInfo = *aPageList++) != 0)
       
  2794 		{
       
  2795 		SetFree(pageInfo);
       
  2796 		ReturnToSystem(pageInfo);
       
  2797 		}
       
  2798 	return KErrNone;
       
  2799 	}
       
  2800 
       
  2801 
       
  2802 /**
       
  2803 Check whether the specified page can be discarded by the RAM cache.
       
  2804 
       
  2805 @param aPageInfo The page info of the page being queried.
       
  2806 @return ETrue when the page can be discarded, EFalse otherwise.
       
  2807 @pre System lock held.
       
  2808 @post System lock held.
       
  2809 */
       
  2810 TBool RamCache::IsPageDiscardable(SPageInfo& aPageInfo)
       
  2811 	{
       
  2812 	SPageInfo::TType type = aPageInfo.Type();
       
  2813 	SPageInfo::TState state = aPageInfo.State();
       
  2814 	return (type == SPageInfo::EPagedCache && state == SPageInfo::EStatePagedYoung);
       
  2815 	}
       
  2816 
       
  2817 
       
  2818 /**
       
  2819 @return ETrue when the unmapped page should be freed, EFalse otherwise
       
  2820 */
       
  2821 TBool RamCache::PageUnmapped(SPageInfo* aPageInfo)
       
  2822 	{
       
  2823 	SPageInfo::TType type = aPageInfo->Type();
       
  2824 //	Kern::Printf("DemandPaging::PageUnmapped %x %d",aPageInfo,type);
       
  2825 	if(type!=SPageInfo::EPagedCache)
       
  2826 		return ETrue;
       
  2827 	SPageInfo::TState state = aPageInfo->State();
       
  2828 	if(state==SPageInfo::EStatePagedYoung)
       
  2829 		{
       
  2830 		// This page will be freed by DChunk::DoDecommit as it was originally 
       
  2831 		// allocated so update page counts in ram allocator
       
  2832 		DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
       
  2833 		iMmu->iRamPageAllocator->ChangePageType(aPageInfo, EPageDiscard, chunk->GetPageType());
       
  2834 		aPageInfo->iLink.Deque();
       
  2835 		--iNumberOfFreePages;
       
  2836 		}
       
  2837 	return ETrue;
       
  2838 	}
       
  2839 
       
  2840 
       
  2841 void RamCache::Panic(TFault aFault)
       
  2842 	{
       
  2843 	Kern::Fault("RamCache",aFault);
       
  2844 	}
       
  2845 
       
  2846 /**
       
  2847 Flush all cache pages.
       
  2848 
       
  2849 @pre RAM allocator mutex held
       
  2850 @post RAM allocator mutex held
       
  2851 */
       
  2852 void RamCache::FlushAll()
       
  2853 	{
       
  2854 	__ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
       
  2855 #ifdef _DEBUG
       
  2856 	// Should always succeed
       
  2857 	__NK_ASSERT_DEBUG(GetFreePages(iNumberOfFreePages));
       
  2858 #else
       
  2859 	GetFreePages(iNumberOfFreePages);
       
  2860 #endif
       
  2861 	}
       
  2862 
       
  2863 
       
  2864 //
       
  2865 // Demand Paging
       
  2866 //
       
  2867 
       
  2868 #ifdef __DEMAND_PAGING__
       
  2869 
       
  2870 DemandPaging* DemandPaging::ThePager = 0;
       
  2871 TBool DemandPaging::PseudoRandInitialised = EFalse;
       
  2872 volatile TUint32 DemandPaging::PseudoRandSeed = 0;
       
  2873 
       
  2874 
       
  2875 void M::DemandPagingInit()
       
  2876 	{
       
  2877 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">M::DemandPagingInit"));
       
  2878 	TInt r = RamCacheBase::TheRamCache->Init3();
       
  2879 	if (r != KErrNone)
       
  2880 		DemandPaging::Panic(DemandPaging::EInitialiseFailed);	
       
  2881 
       
  2882 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<M::DemandPagingInit"));
       
  2883 	}
       
  2884 
       
  2885 
       
  2886 TInt M::DemandPagingFault(TAny* aExceptionInfo)
       
  2887 	{
       
  2888 	DemandPaging* pager = DemandPaging::ThePager;
       
  2889 	if(pager)
       
  2890 		return pager->Fault(aExceptionInfo);
       
  2891 	return KErrAbort;
       
  2892 	}
       
  2893 
       
  2894 #ifdef _DEBUG
       
  2895 extern "C" void ASMCheckPagingSafe(TLinAddr aPC, TLinAddr aLR, TLinAddr aStartAddres, TUint aLength)
       
  2896 	{
       
  2897 	if(M::CheckPagingSafe(EFalse, aStartAddres, aLength))
       
  2898 		return;
       
  2899 	Kern::Printf("ASM_ASSERT_PAGING_SAFE FAILED: pc=%x lr=%x",aPC,aLR);
       
  2900 	__NK_ASSERT_ALWAYS(0);
       
  2901 	}
       
  2902 
       
  2903 extern "C" void ASMCheckDataPagingSafe(TLinAddr aPC, TLinAddr aLR, TLinAddr aStartAddres, TUint aLength)
       
  2904 	{
       
  2905 	if(M::CheckPagingSafe(ETrue, aStartAddres, aLength))
       
  2906 		return;
       
  2907 	__KTRACE_OPT(KDATAPAGEWARN,Kern::Printf("Data paging: ASM_ASSERT_DATA_PAGING_SAFE FAILED: pc=%x lr=%x",aPC,aLR));
       
  2908 	}
       
  2909 #endif
       
  2910 
       
  2911 
       
  2912 TBool M::CheckPagingSafe(TBool aDataPaging, TLinAddr aStartAddr, TUint aLength)
       
  2913 	{
       
  2914 	DemandPaging* pager = DemandPaging::ThePager;
       
  2915 	if(!pager || K::Initialising)
       
  2916 		return ETrue;
       
  2917 	
       
  2918 	NThread* nt = NCurrentThread();
       
  2919 	if(!nt)
       
  2920 		return ETrue; // We've not booted properly yet!
       
  2921 
       
  2922 	if (!pager->NeedsMutexOrderCheck(aStartAddr, aLength))
       
  2923 		return ETrue;
       
  2924 
       
  2925 	TBool dataPagingEnabled = EFalse; // data paging not supported on moving or multiple models
       
  2926 
       
  2927 	DThread* thread = _LOFF(nt,DThread,iNThread);
       
  2928 	NFastMutex* fm = NKern::HeldFastMutex();
       
  2929 	if(fm)
       
  2930 		{
       
  2931 		if(!thread->iPagingExcTrap || fm!=&TheScheduler.iLock)
       
  2932 			{
       
  2933 			if (!aDataPaging)
       
  2934 				{
       
  2935 				__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: CheckPagingSafe FAILED - FM Held"));
       
  2936 				return EFalse;
       
  2937 				}
       
  2938 			else
       
  2939 				{
       
  2940 				__KTRACE_OPT(KDATAPAGEWARN, Kern::Printf("Data paging: CheckPagingSafe FAILED - FM Held"));
       
  2941 				return !dataPagingEnabled;
       
  2942 				}
       
  2943 			}
       
  2944 		}
       
  2945 
       
  2946 	DMutex* m = pager->CheckMutexOrder();
       
  2947 	if (m)
       
  2948 		{
       
  2949 		if (!aDataPaging)
       
  2950 			{
       
  2951 			__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: Mutex Order Fault %O",m));
       
  2952 			return EFalse;
       
  2953 			}
       
  2954 		else
       
  2955 			{
       
  2956 			__KTRACE_OPT(KDATAPAGEWARN, Kern::Printf("Data paging: Mutex Order Fault %O",m));
       
  2957 			return !dataPagingEnabled;
       
  2958 			}
       
  2959 		}
       
  2960 	
       
  2961 	return ETrue;
       
  2962 	}
       
  2963 
       
  2964 
       
  2965 TInt M::LockRegion(TLinAddr aStart,TInt aSize)
       
  2966 	{
       
  2967 	DemandPaging* pager = DemandPaging::ThePager;
       
  2968 	if(pager)
       
  2969 		return pager->LockRegion(aStart,aSize,NULL);
       
  2970 	return KErrNone;
       
  2971 	}
       
  2972 
       
  2973 
       
  2974 TInt M::UnlockRegion(TLinAddr aStart,TInt aSize)
       
  2975 	{
       
  2976 	DemandPaging* pager = DemandPaging::ThePager;
       
  2977 	if(pager)
       
  2978 		return pager->UnlockRegion(aStart,aSize,NULL);
       
  2979 	return KErrNone;
       
  2980 	}
       
  2981 
       
  2982 #else // !__DEMAND_PAGING__
       
  2983 
       
  2984 TInt M::LockRegion(TLinAddr /*aStart*/,TInt /*aSize*/)
       
  2985 	{
       
  2986 	return KErrNone;
       
  2987 	}
       
  2988 
       
  2989 
       
  2990 TInt M::UnlockRegion(TLinAddr /*aStart*/,TInt /*aSize*/)
       
  2991 	{
       
  2992 	return KErrNone;
       
  2993 	}
       
  2994 
       
  2995 #endif // __DEMAND_PAGING__
       
  2996 
       
  2997 
       
  2998 
       
  2999 
       
  3000 //
       
  3001 // DemandPaging
       
  3002 //
       
  3003 
       
  3004 #ifdef __DEMAND_PAGING__
       
  3005 
       
  3006 
       
  3007 const TUint16 KDefaultYoungOldRatio = 3;
       
  3008 const TUint KDefaultMinPages = 256;
       
  3009 const TUint KDefaultMaxPages = KMaxTUint >> KPageShift;
       
  3010 
       
  3011 /*	Need at least 4 mapped pages to guarentee to be able to execute all ARM instructions.
       
  3012 	(Worst case is a THUMB2 STM instruction with both instruction and data stradling page
       
  3013 	boundaries.)
       
  3014 */
       
  3015 const TUint KMinYoungPages = 4;
       
  3016 const TUint KMinOldPages = 1;
       
  3017 
       
  3018 /*	A minimum young/old ratio of 1 means that we need at least twice KMinYoungPages pages...
       
  3019 */
       
  3020 const TUint KAbsoluteMinPageCount = 2*KMinYoungPages;
       
  3021 
       
  3022 __ASSERT_COMPILE(KMinOldPages<=KAbsoluteMinPageCount/2);
       
  3023 
       
  3024 class DMissingPagingDevice : public DPagingDevice
       
  3025 	{
       
  3026 	TInt Read(TThreadMessage* /*aReq*/,TLinAddr /*aBuffer*/,TUint /*aOffset*/,TUint /*aSize*/,TInt /*aDrvNumber*/)
       
  3027 		{ DemandPaging::Panic(DemandPaging::EDeviceMissing); return 0; }
       
  3028 	};
       
  3029 
       
  3030 
       
  3031 TBool DemandPaging::RomPagingRequested()
       
  3032 	{
       
  3033 	return TheRomHeader().iPageableRomSize != 0;
       
  3034 	}
       
  3035 
       
  3036 
       
  3037 TBool DemandPaging::CodePagingRequested()
       
  3038 	{
       
  3039 	return (TheSuperPage().KernelConfigFlags() & EKernelConfigCodePagingPolicyDefaultPaged) != EKernelConfigCodePagingPolicyNoPaging;
       
  3040 	}
       
  3041 
       
  3042 
       
  3043 DemandPaging::DemandPaging()
       
  3044 	{
       
  3045 	}
       
  3046 
       
  3047 
       
  3048 void DemandPaging::Init2()
       
  3049 	{
       
  3050 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">DemandPaging::Init2"));
       
  3051 
       
  3052 	RamCacheBase::Init2();
       
  3053 
       
  3054 	// initialise live list...
       
  3055 	SDemandPagingConfig config = TheRomHeader().iDemandPagingConfig;
       
  3056 
       
  3057 	iMinimumPageCount = KDefaultMinPages;
       
  3058 	if(config.iMinPages)
       
  3059 		iMinimumPageCount = config.iMinPages;
       
  3060 	if(iMinimumPageCount<KAbsoluteMinPageCount)
       
  3061 		iMinimumPageCount = KAbsoluteMinPageCount;
       
  3062 	iInitMinimumPageCount = iMinimumPageCount;
       
  3063 
       
  3064 	iMaximumPageCount = KDefaultMaxPages;
       
  3065 	if(config.iMaxPages)
       
  3066 		iMaximumPageCount = config.iMaxPages;
       
  3067 	iInitMaximumPageCount = iMaximumPageCount;
       
  3068 
       
  3069 	iYoungOldRatio = KDefaultYoungOldRatio;
       
  3070 	if(config.iYoungOldRatio)
       
  3071 		iYoungOldRatio = config.iYoungOldRatio;
       
  3072 	TInt ratioLimit = (iMinimumPageCount-KMinOldPages)/KMinOldPages;
       
  3073 	if(iYoungOldRatio>ratioLimit)
       
  3074 		iYoungOldRatio = ratioLimit;
       
  3075 
       
  3076 	iMinimumPageLimit = (KMinYoungPages * (1 + iYoungOldRatio)) / iYoungOldRatio;
       
  3077 	if(iMinimumPageLimit<KAbsoluteMinPageCount)
       
  3078 		iMinimumPageLimit = KAbsoluteMinPageCount;
       
  3079 
       
  3080 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">DemandPaging::InitialiseLiveList min=%d max=%d ratio=%d",iMinimumPageCount,iMaximumPageCount,iYoungOldRatio));
       
  3081 
       
  3082 	if(iMaximumPageCount<iMinimumPageCount)
       
  3083 		Panic(EInitialiseBadArgs);
       
  3084 
       
  3085 	//
       
  3086 	// This routine doesn't acuire any mutexes because it should be called before the system
       
  3087 	// is fully up and running. I.e. called before another thread can preempt this.
       
  3088 	//
       
  3089 
       
  3090 	// Calculate page counts
       
  3091 	iOldCount = iMinimumPageCount/(1+iYoungOldRatio);
       
  3092 	if(iOldCount<KMinOldPages)
       
  3093 		Panic(EInitialiseBadArgs);
       
  3094 	iYoungCount = iMinimumPageCount-iOldCount;
       
  3095 	if(iYoungCount<KMinYoungPages)
       
  3096 		Panic(EInitialiseBadArgs); // Need at least 4 pages mapped to execute an ARM LDM instruction in THUMB2 mode
       
  3097 	iNumberOfFreePages = 0;
       
  3098 
       
  3099 	// Allocate RAM pages and put them all on the old list
       
  3100 	iYoungCount = 0;
       
  3101 	iOldCount = 0;
       
  3102 	for(TUint i=0; i<iMinimumPageCount; i++)
       
  3103 		{
       
  3104 		// Allocate a single page
       
  3105 		TPhysAddr pagePhys;
       
  3106 		TInt r = iMmu->iRamPageAllocator->AllocRamPages(&pagePhys,1, EPageDiscard);
       
  3107 		if(r!=0)
       
  3108 			Panic(EInitialiseFailed);
       
  3109 		AddAsFreePage(SPageInfo::FromPhysAddr(pagePhys));
       
  3110 		}
       
  3111 
       
  3112 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<DemandPaging::Init2"));
       
  3113 	}
       
  3114 
       
  3115 
       
  3116 TInt VMHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2);
       
  3117 
       
  3118 TInt DemandPaging::Init3()
       
  3119 	{
       
  3120 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">DemandPaging::Init3"));
       
  3121 	TInt r;
       
  3122 
       
  3123 	// construct iBufferChunk
       
  3124 	iDeviceBufferSize = 2*KPageSize;
       
  3125 	TChunkCreateInfo info;
       
  3126 	info.iType = TChunkCreateInfo::ESharedKernelMultiple;
       
  3127 	info.iMaxSize = iDeviceBufferSize*KMaxPagingDevices;
       
  3128 	info.iMapAttr = EMapAttrCachedMax;
       
  3129 	info.iOwnsMemory = ETrue;
       
  3130 	TUint32 mapAttr;
       
  3131 	r = Kern::ChunkCreate(info,iDeviceBuffersChunk,iDeviceBuffers,mapAttr);
       
  3132 	if(r!=KErrNone)
       
  3133 		return r;
       
  3134 
       
  3135 	// Install 'null' paging devices which panic if used...
       
  3136 	DMissingPagingDevice* missingPagingDevice = new DMissingPagingDevice;
       
  3137 	for(TInt i=0; i<KMaxPagingDevices; i++)
       
  3138 		{
       
  3139 		iPagingDevices[i].iInstalled = EFalse;
       
  3140 		iPagingDevices[i].iDevice = missingPagingDevice;
       
  3141 		}
       
  3142 
       
  3143 	// Initialise ROM info...
       
  3144 	const TRomHeader& romHeader = TheRomHeader();
       
  3145 	iRomLinearBase = (TLinAddr)&romHeader;
       
  3146 	iRomSize = iMmu->RoundToPageSize(romHeader.iUncompressedSize);
       
  3147 	if(romHeader.iRomPageIndex)
       
  3148 		iRomPageIndex = (SRomPageInfo*)((TInt)&romHeader+romHeader.iRomPageIndex);
       
  3149 
       
  3150 	TLinAddr pagedStart = romHeader.iPageableRomSize ? (TLinAddr)&romHeader+romHeader.iPageableRomStart : 0;
       
  3151 	if(pagedStart)
       
  3152 		{
       
  3153 		__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("ROM=%x+%x PagedStart=%x",iRomLinearBase,iRomSize,pagedStart));
       
  3154 		__NK_ASSERT_ALWAYS(TUint(pagedStart-iRomLinearBase)<TUint(iRomSize));
       
  3155 		iRomPagedLinearBase = pagedStart;
       
  3156 		iRomPagedSize = iRomLinearBase+iRomSize-pagedStart;
       
  3157 		__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("DemandPaging::Init3, ROM Paged start(0x%x), sixe(0x%x)",iRomPagedLinearBase,iRomPagedSize));
       
  3158 
       
  3159 #ifdef __SUPPORT_DEMAND_PAGING_EMULATION__
       
  3160 		// Get physical addresses of ROM pages
       
  3161 		iOriginalRomPageCount = iMmu->RoundToPageSize(iRomSize)>>KPageShift;
       
  3162 		iOriginalRomPages = new TPhysAddr[iOriginalRomPageCount];
       
  3163 		__NK_ASSERT_ALWAYS(iOriginalRomPages);
       
  3164 		TPhysAddr romPhysAddress; 
       
  3165 		iMmu->LinearToPhysical(iRomLinearBase,iRomSize,romPhysAddress,iOriginalRomPages);
       
  3166 #endif
       
  3167 		}
       
  3168 
       
  3169 	r = Kern::AddHalEntry(EHalGroupVM, VMHalFunction, 0);
       
  3170 	__NK_ASSERT_ALWAYS(r==KErrNone);
       
  3171 
       
  3172 #ifdef __DEMAND_PAGING_BENCHMARKS__
       
  3173 	for (TInt i = 0 ; i < EMaxPagingBm ; ++i)
       
  3174 		ResetBenchmarkData((TPagingBenchmark)i);
       
  3175 #endif
       
  3176 
       
  3177 	// Initialisation now complete
       
  3178 	ThePager = this;
       
  3179 	return KErrNone;
       
  3180 	}
       
  3181 
       
  3182 
       
  3183 DemandPaging::~DemandPaging()
       
  3184 	{
       
  3185 #ifdef __SUPPORT_DEMAND_PAGING_EMULATION__
       
  3186 	delete[] iOriginalRomPages;
       
  3187 #endif
       
  3188 	for (TUint i = 0 ; i < iPagingRequestCount ; ++i)
       
  3189 		delete iPagingRequests[i];
       
  3190 	}
       
  3191 
       
  3192 
       
  3193 TInt DemandPaging::InstallPagingDevice(DPagingDevice* aDevice)
       
  3194 	{
       
  3195 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">DemandPaging::InstallPagingDevice name='%s' type=%d",aDevice->iName,aDevice->iType));
       
  3196 
       
  3197 	if(aDevice->iReadUnitShift>KPageShift)
       
  3198 		Panic(EInvalidPagingDevice);
       
  3199 
       
  3200 	TInt i;
       
  3201 	TInt r = KErrNone;
       
  3202 	TBool createRequestObjects = EFalse;
       
  3203 	
       
  3204 	if ((aDevice->iType & DPagingDevice::ERom) && RomPagingRequested())
       
  3205 		{
       
  3206 		r = DoInstallPagingDevice(aDevice, 0);
       
  3207 		if (r != KErrNone)
       
  3208 			goto done;
       
  3209 		K::MemModelAttributes|=EMemModelAttrRomPaging;
       
  3210 		createRequestObjects = ETrue;
       
  3211 		}
       
  3212 	
       
  3213 	if ((aDevice->iType & DPagingDevice::ECode) && CodePagingRequested())
       
  3214 		{
       
  3215 		for (i = 0 ; i < KMaxLocalDrives ; ++i)
       
  3216 			{
       
  3217 			if (aDevice->iDrivesSupported & (1<<i))
       
  3218 				{
       
  3219 				r = DoInstallPagingDevice(aDevice, i + 1);
       
  3220 				if (r != KErrNone)
       
  3221 					goto done;
       
  3222 				}
       
  3223 			}
       
  3224 		K::MemModelAttributes|=EMemModelAttrCodePaging;
       
  3225 		createRequestObjects = ETrue;
       
  3226 		}
       
  3227 
       
  3228 	if (createRequestObjects)
       
  3229 		{
       
  3230 		for (i = 0 ; i < KPagingRequestsPerDevice ; ++i)
       
  3231 			{
       
  3232 			r = CreateRequestObject();
       
  3233 			if (r != KErrNone)
       
  3234 				goto done;
       
  3235 			}
       
  3236 		}
       
  3237 	
       
  3238 done:	
       
  3239 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<DemandPaging::InstallPagingDevice returns %d",r));
       
  3240 	return r;
       
  3241 	}
       
  3242 
       
  3243 TInt DemandPaging::DoInstallPagingDevice(DPagingDevice* aDevice, TInt aId)
       
  3244 	{
       
  3245 	NKern::LockSystem();
       
  3246 	SPagingDevice* device = &iPagingDevices[aId];
       
  3247 	if(device->iInstalled)
       
  3248 		{
       
  3249 		__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("**** Attempt to install more than one ROM paging device !!!!!!!! ****"));
       
  3250 		//Panic(EDeviceAlreadyExists);
       
  3251 		NKern::UnlockSystem();
       
  3252 		return KErrNone;
       
  3253 		}	
       
  3254 	
       
  3255 	aDevice->iDeviceId = aId;
       
  3256 	device->iDevice = aDevice;
       
  3257 	device->iInstalled = ETrue;
       
  3258 	NKern::UnlockSystem();
       
  3259 	
       
  3260 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("DemandPaging::InstallPagingDevice id=%d, device=%08x",aId,device));
       
  3261 	
       
  3262 	return KErrNone;
       
  3263 	}
       
  3264 
       
  3265 DemandPaging::DPagingRequest::~DPagingRequest()
       
  3266 	{
       
  3267 	if (iMutex)
       
  3268 		iMutex->Close(NULL);
       
  3269 	}
       
  3270 
       
  3271 TInt DemandPaging::CreateRequestObject()
       
  3272 	{
       
  3273 	_LIT(KLitPagingRequest,"PagingRequest-"); 
       
  3274 
       
  3275 	TInt index;
       
  3276 	TInt id = (TInt)__e32_atomic_add_ord32(&iNextPagingRequestCount, 1);
       
  3277 	TLinAddr offset = id * iDeviceBufferSize;
       
  3278 	TUint32 physAddr = 0;
       
  3279 	TInt r = Kern::ChunkCommitContiguous(iDeviceBuffersChunk,offset,iDeviceBufferSize, physAddr);
       
  3280 	if(r != KErrNone)
       
  3281 		return r;
       
  3282 
       
  3283 	DPagingRequest* req = new DPagingRequest();
       
  3284 	if (!req)
       
  3285 		return KErrNoMemory;
       
  3286 
       
  3287 	req->iBuffer = iDeviceBuffers + offset;
       
  3288 	AllocLoadAddress(*req, id);
       
  3289 		
       
  3290 	TBuf<16> mutexName(KLitPagingRequest);
       
  3291 	mutexName.AppendNum(id);
       
  3292 	r = K::MutexCreate(req->iMutex, mutexName, NULL, EFalse, KMutexOrdPageIn);
       
  3293 	if (r!=KErrNone)
       
  3294 		goto done;
       
  3295 
       
  3296 	// Ensure there are enough young pages to cope with new request object
       
  3297 	r = ResizeLiveList(iMinimumPageCount, iMaximumPageCount);
       
  3298 	if (r!=KErrNone)
       
  3299 		goto done;
       
  3300 
       
  3301 	NKern::LockSystem();
       
  3302 	index = iPagingRequestCount++;
       
  3303 	__NK_ASSERT_ALWAYS(index < KMaxPagingRequests);
       
  3304 	iPagingRequests[index] = req;
       
  3305 	iFreeRequestPool.AddHead(req);
       
  3306 	NKern::UnlockSystem();
       
  3307 
       
  3308 done:
       
  3309 	if (r != KErrNone)
       
  3310 		delete req;
       
  3311 	
       
  3312 	return r;
       
  3313 	}
       
  3314 
       
  3315 DemandPaging::DPagingRequest* DemandPaging::AcquireRequestObject()
       
  3316 	{
       
  3317 	__ASSERT_SYSTEM_LOCK;	
       
  3318 	__NK_ASSERT_DEBUG(iPagingRequestCount > 0);
       
  3319 	
       
  3320 	DPagingRequest* req = NULL;
       
  3321 
       
  3322 	// System lock used to serialise access to our data strucures as we have to hold it anyway when
       
  3323 	// we wait on the mutex
       
  3324 
       
  3325 	req = (DPagingRequest*)iFreeRequestPool.GetFirst();
       
  3326 	if (req != NULL)
       
  3327 		__NK_ASSERT_DEBUG(req->iUsageCount == 0);
       
  3328 	else
       
  3329 		{
       
  3330 		// Pick a random request object to wait on
       
  3331 		TUint index = (FastPseudoRand() * TUint64(iPagingRequestCount)) >> 32;
       
  3332 		__NK_ASSERT_DEBUG(index < iPagingRequestCount);
       
  3333 		req = iPagingRequests[index];
       
  3334 		__NK_ASSERT_DEBUG(req->iUsageCount > 0);
       
  3335 		}
       
  3336 	
       
  3337 #ifdef __CONCURRENT_PAGING_INSTRUMENTATION__
       
  3338 	++iWaitingCount;
       
  3339 	if (iWaitingCount > iMaxWaitingCount)
       
  3340 		iMaxWaitingCount = iWaitingCount;
       
  3341 #endif
       
  3342 
       
  3343 	++req->iUsageCount;
       
  3344 	TInt r = req->iMutex->Wait();
       
  3345 	__NK_ASSERT_ALWAYS(r == KErrNone);
       
  3346 
       
  3347 #ifdef __CONCURRENT_PAGING_INSTRUMENTATION__
       
  3348 	--iWaitingCount;
       
  3349 	++iPagingCount;
       
  3350 	if (iPagingCount > iMaxPagingCount)
       
  3351 		iMaxPagingCount = iPagingCount;
       
  3352 #endif
       
  3353 
       
  3354 	return req;
       
  3355 	}
       
  3356 
       
  3357 void DemandPaging::ReleaseRequestObject(DPagingRequest* aReq)
       
  3358 	{
       
  3359 	__ASSERT_SYSTEM_LOCK;
       
  3360 
       
  3361 #ifdef __CONCURRENT_PAGING_INSTRUMENTATION__
       
  3362 	--iPagingCount;
       
  3363 #endif
       
  3364 
       
  3365 	// If there are no threads waiting on the mutex then return it to the free pool
       
  3366 	__NK_ASSERT_DEBUG(aReq->iUsageCount > 0);
       
  3367 	if (--aReq->iUsageCount == 0)
       
  3368 		iFreeRequestPool.AddHead(aReq);
       
  3369 
       
  3370 	aReq->iMutex->Signal();
       
  3371 	NKern::LockSystem();
       
  3372 	}
       
  3373 
       
  3374 TInt DemandPaging::ReadRomPage(const DPagingRequest* aReq, TLinAddr aRomAddress)
       
  3375 	{
       
  3376 	START_PAGING_BENCHMARK;
       
  3377 
       
  3378 	TInt pageSize = KPageSize;
       
  3379 	TInt dataOffset = aRomAddress-iRomLinearBase;
       
  3380 	TInt pageNumber = dataOffset>>KPageShift;
       
  3381 	TInt readUnitShift = RomPagingDevice().iDevice->iReadUnitShift;
       
  3382 	TInt r;
       
  3383 	if(!iRomPageIndex)
       
  3384 		{
       
  3385 		// ROM not broken into pages, so just read it in directly
       
  3386 		START_PAGING_BENCHMARK;
       
  3387 		r = RomPagingDevice().iDevice->Read(const_cast<TThreadMessage*>(&aReq->iMessage),aReq->iLoadAddr,dataOffset>>readUnitShift,pageSize>>readUnitShift,-1/*token for ROM paging*/);
       
  3388 		END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmReadMedia);
       
  3389 		}
       
  3390 	else
       
  3391 		{
       
  3392 		// Work out where data for page is located
       
  3393 		SRomPageInfo* romPageInfo = iRomPageIndex+pageNumber;
       
  3394 		dataOffset = romPageInfo->iDataStart;
       
  3395 		TInt dataSize = romPageInfo->iDataSize;
       
  3396 		if(!dataSize)
       
  3397 			{
       
  3398 			// empty page, fill it with 0xff...
       
  3399 			memset((void*)aReq->iLoadAddr,-1,pageSize);
       
  3400 			r = KErrNone;
       
  3401 			}
       
  3402 		else
       
  3403 			{
       
  3404 			__NK_ASSERT_ALWAYS(romPageInfo->iPagingAttributes&SRomPageInfo::EPageable);
       
  3405 
       
  3406 			// Read data for page...
       
  3407 			TThreadMessage* msg= const_cast<TThreadMessage*>(&aReq->iMessage);
       
  3408 			TLinAddr buffer = aReq->iBuffer;
       
  3409 			TUint readStart = dataOffset>>readUnitShift;
       
  3410 			TUint readSize = ((dataOffset+dataSize-1)>>readUnitShift)-readStart+1;
       
  3411 			__NK_ASSERT_DEBUG((readSize<<readUnitShift)<=iDeviceBufferSize);
       
  3412 			START_PAGING_BENCHMARK;
       
  3413 			r = RomPagingDevice().iDevice->Read(msg,buffer,readStart,readSize,-1/*token for ROM paging*/);
       
  3414 			END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmReadMedia);
       
  3415 			if(r==KErrNone)
       
  3416 				{
       
  3417 				// Decompress data...
       
  3418 				TLinAddr data = buffer+dataOffset-(readStart<<readUnitShift);
       
  3419 				r = Decompress(romPageInfo->iCompressionType,aReq->iLoadAddr,data,dataSize);
       
  3420 				if(r>=0)
       
  3421 					{
       
  3422 					__NK_ASSERT_ALWAYS(r==pageSize);
       
  3423 					r = KErrNone;
       
  3424 					}
       
  3425 				}
       
  3426 			}
       
  3427 		}
       
  3428 
       
  3429 	END_PAGING_BENCHMARK(this, EPagingBmReadRomPage);
       
  3430 	return r;
       
  3431 	}
       
  3432 
       
  3433 TInt ReadFunc(TAny* aArg1, TAny* aArg2, TLinAddr aBuffer, TInt aBlockNumber, TInt aBlockCount)
       
  3434 	{
       
  3435 	START_PAGING_BENCHMARK;
       
  3436 	TInt drive = (TInt)aArg1;
       
  3437 	TThreadMessage* msg= (TThreadMessage*)aArg2;
       
  3438 	DemandPaging::SPagingDevice& device = DemandPaging::ThePager->CodePagingDevice(drive);
       
  3439 	TInt r = device.iDevice->Read(msg, aBuffer, aBlockNumber, aBlockCount, drive);
       
  3440 	END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmReadMedia);
       
  3441 	return r;
       
  3442 	}
       
  3443 
       
  3444 TInt DemandPaging::ReadCodePage(const DPagingRequest* aReq, DMmuCodeSegMemory* aCodeSegMemory, TLinAddr aCodeAddress)
       
  3445 	{
       
  3446 	__KTRACE_OPT(KPAGING,Kern::Printf("ReadCodePage buffer = %08x, csm == %08x, addr == %08x", aReq->iLoadAddr, aCodeSegMemory, aCodeAddress));
       
  3447 	
       
  3448 	START_PAGING_BENCHMARK;
       
  3449 
       
  3450 	// Get the paging device for this drive
       
  3451 	SPagingDevice& device = CodePagingDevice(aCodeSegMemory->iCodeLocalDrive);
       
  3452 
       
  3453 	// Work out which bit of the file to read
       
  3454 	SRamCodeInfo& ri = aCodeSegMemory->iRamInfo;
       
  3455 	TInt codeOffset = aCodeAddress - ri.iCodeRunAddr;
       
  3456 	TInt pageNumber = codeOffset >> KPageShift;
       
  3457 	TBool compressed = aCodeSegMemory->iCompressionType != SRomPageInfo::ENoCompression;
       
  3458 	TInt dataOffset, dataSize;
       
  3459 	if (compressed)
       
  3460 		{
       
  3461 		dataOffset = aCodeSegMemory->iCodePageOffsets[pageNumber];
       
  3462 		dataSize = aCodeSegMemory->iCodePageOffsets[pageNumber + 1] - dataOffset;
       
  3463 		__KTRACE_OPT(KPAGING,Kern::Printf("  compressed, file offset == %x, size == %d", dataOffset, dataSize));
       
  3464 		}
       
  3465 	else
       
  3466 		{
       
  3467 		dataOffset = codeOffset + aCodeSegMemory->iCodeStartInFile;
       
  3468 		dataSize = Min(KPageSize, aCodeSegMemory->iBlockMap.DataLength() - dataOffset);
       
  3469 		__NK_ASSERT_DEBUG(dataSize >= 0);
       
  3470 		__KTRACE_OPT(KPAGING,Kern::Printf("  uncompressed, file offset == %x, size == %d", dataOffset, dataSize));
       
  3471 		}
       
  3472 
       
  3473 	TInt bufferStart = aCodeSegMemory->iBlockMap.Read(aReq->iBuffer,
       
  3474 												dataOffset,
       
  3475 												dataSize,
       
  3476 												device.iDevice->iReadUnitShift,
       
  3477 												ReadFunc,
       
  3478 												(TAny*)aCodeSegMemory->iCodeLocalDrive,
       
  3479 												(TAny*)&aReq->iMessage);
       
  3480 	
       
  3481 
       
  3482 	TInt r = KErrNone;
       
  3483 	if(bufferStart<0)
       
  3484 		{
       
  3485 		r = bufferStart; // return error
       
  3486 		__NK_ASSERT_DEBUG(0);
       
  3487 		}
       
  3488 	else
       
  3489 		{
       
  3490 		TLinAddr data = aReq->iBuffer + bufferStart;
       
  3491 		if (compressed)
       
  3492 			{
       
  3493 			TInt r = Decompress(aCodeSegMemory->iCompressionType, aReq->iLoadAddr, data, dataSize);
       
  3494 			if(r>=0)
       
  3495 				{
       
  3496 				dataSize = Min(KPageSize, ri.iCodeSize - codeOffset);
       
  3497 				if(r!=dataSize)
       
  3498 					{
       
  3499 					__NK_ASSERT_DEBUG(0);
       
  3500 					r = KErrCorrupt;
       
  3501 					}
       
  3502 				else
       
  3503 					r = KErrNone;
       
  3504 				}
       
  3505 			else
       
  3506 				{
       
  3507 				__NK_ASSERT_DEBUG(0);
       
  3508 				}
       
  3509 			}
       
  3510 		else
       
  3511 			{
       
  3512 			#ifdef BTRACE_PAGING_VERBOSE
       
  3513 			BTraceContext4(BTrace::EPaging,BTrace::EPagingDecompressStart,SRomPageInfo::ENoCompression);
       
  3514 			#endif
       
  3515 			memcpy((TAny*)aReq->iLoadAddr, (TAny*)data, dataSize);
       
  3516 			#ifdef BTRACE_PAGING_VERBOSE
       
  3517 			BTraceContext0(BTrace::EPaging,BTrace::EPagingDecompressEnd);
       
  3518 			#endif
       
  3519 			}
       
  3520 		}
       
  3521 
       
  3522 	if(r==KErrNone)
       
  3523 		if (dataSize < KPageSize)
       
  3524 			memset((TAny*)(aReq->iLoadAddr + dataSize), KPageSize - dataSize, 0x03);
       
  3525 
       
  3526 	END_PAGING_BENCHMARK(this, EPagingBmReadCodePage);
       
  3527 	
       
  3528 	return KErrNone;
       
  3529 	}
       
  3530 
       
  3531 
       
  3532 #include "decompress.h"
       
  3533 
       
  3534 	
       
  3535 TInt DemandPaging::Decompress(TInt aCompressionType,TLinAddr aDst,TLinAddr aSrc,TUint aSrcSize)
       
  3536 	{
       
  3537 #ifdef BTRACE_PAGING_VERBOSE
       
  3538 	BTraceContext4(BTrace::EPaging,BTrace::EPagingDecompressStart,aCompressionType);
       
  3539 #endif
       
  3540 	TInt r;
       
  3541 	switch(aCompressionType)
       
  3542 		{
       
  3543 	case SRomPageInfo::ENoCompression:
       
  3544 		memcpy((void*)aDst,(void*)aSrc,aSrcSize);
       
  3545 		r = aSrcSize;
       
  3546 		break;
       
  3547 
       
  3548 	case SRomPageInfo::EBytePair:
       
  3549 		{
       
  3550 		START_PAGING_BENCHMARK;
       
  3551 		TUint8* srcNext=0;
       
  3552 		r=BytePairDecompress((TUint8*)aDst,KPageSize,(TUint8*)aSrc,aSrcSize,srcNext);
       
  3553 		if (r == KErrNone)
       
  3554 			__NK_ASSERT_ALWAYS((TLinAddr)srcNext == aSrc + aSrcSize);
       
  3555 		END_PAGING_BENCHMARK(this, EPagingBmDecompress);
       
  3556 		}
       
  3557 		break;
       
  3558 
       
  3559 	default:
       
  3560 		r = KErrNotSupported;
       
  3561 		break;
       
  3562 		}
       
  3563 #ifdef BTRACE_PAGING_VERBOSE
       
  3564 	BTraceContext0(BTrace::EPaging,BTrace::EPagingDecompressEnd);
       
  3565 #endif
       
  3566 	return r;
       
  3567 	}
       
  3568 
       
  3569 
       
  3570 void DemandPaging::BalanceAges()
       
  3571 	{
       
  3572 	if(iOldCount*iYoungOldRatio>=iYoungCount)
       
  3573 		return; // We have enough old pages
       
  3574 
       
  3575 	// make one young page into an old page...
       
  3576 
       
  3577 	__NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
       
  3578 	__NK_ASSERT_DEBUG(iYoungCount);
       
  3579 	SDblQueLink* link = iYoungList.Last()->Deque();
       
  3580 	--iYoungCount;
       
  3581 
       
  3582 	SPageInfo* pageInfo = SPageInfo::FromLink(link);
       
  3583 	pageInfo->SetState(SPageInfo::EStatePagedOld);
       
  3584 
       
  3585 	iOldList.AddHead(link);
       
  3586 	++iOldCount;
       
  3587 
       
  3588 	SetOld(pageInfo);
       
  3589 
       
  3590 #ifdef BTRACE_PAGING_VERBOSE
       
  3591 	BTraceContext4(BTrace::EPaging,BTrace::EPagingAged,pageInfo->PhysAddr());
       
  3592 #endif
       
  3593 	}
       
  3594 
       
  3595 
       
  3596 void DemandPaging::AddAsYoungest(SPageInfo* aPageInfo)
       
  3597 	{
       
  3598 #ifdef _DEBUG
       
  3599 	SPageInfo::TType type = aPageInfo->Type();
       
  3600 	__NK_ASSERT_DEBUG(type==SPageInfo::EPagedROM || type==SPageInfo::EPagedCode || type==SPageInfo::EPagedData || type==SPageInfo::EPagedCache);
       
  3601 #endif
       
  3602 	aPageInfo->SetState(SPageInfo::EStatePagedYoung);
       
  3603 	iYoungList.AddHead(&aPageInfo->iLink);
       
  3604 	++iYoungCount;
       
  3605 	}
       
  3606 
       
  3607 
       
  3608 void DemandPaging::AddAsFreePage(SPageInfo* aPageInfo)
       
  3609 	{
       
  3610 #ifdef BTRACE_PAGING
       
  3611 	TPhysAddr phys = aPageInfo->PhysAddr();
       
  3612 	BTraceContext4(BTrace::EPaging,BTrace::EPagingPageInFree,phys);
       
  3613 #endif
       
  3614 	aPageInfo->Change(SPageInfo::EPagedFree,SPageInfo::EStatePagedOld);
       
  3615 	iOldList.Add(&aPageInfo->iLink);
       
  3616 	++iOldCount;
       
  3617 	}
       
  3618 
       
  3619 
       
  3620 void DemandPaging::RemovePage(SPageInfo* aPageInfo)
       
  3621 	{
       
  3622 	switch(aPageInfo->State())
       
  3623 		{
       
  3624 	case SPageInfo::EStatePagedYoung:
       
  3625 		__NK_ASSERT_DEBUG(iYoungCount);
       
  3626 		aPageInfo->iLink.Deque();
       
  3627 		--iYoungCount;
       
  3628 		break;
       
  3629 
       
  3630 	case SPageInfo::EStatePagedOld:
       
  3631 		__NK_ASSERT_DEBUG(iOldCount);
       
  3632 		aPageInfo->iLink.Deque();
       
  3633 		--iOldCount;
       
  3634 		break;
       
  3635 
       
  3636 	case SPageInfo::EStatePagedLocked:
       
  3637 		break;
       
  3638 
       
  3639 	default:
       
  3640 		__NK_ASSERT_DEBUG(0);
       
  3641 		}
       
  3642 	aPageInfo->SetState(SPageInfo::EStatePagedDead);
       
  3643 	}
       
  3644 
       
  3645 
       
  3646 SPageInfo* DemandPaging::GetOldestPage()
       
  3647 	{
       
  3648 	// remove oldest from list...
       
  3649 	SDblQueLink* link;
       
  3650 	if(iOldCount)
       
  3651 		{
       
  3652 		__NK_ASSERT_DEBUG(!iOldList.IsEmpty());
       
  3653 		link = iOldList.Last()->Deque();
       
  3654 		--iOldCount;
       
  3655 		}
       
  3656 	else
       
  3657 		{
       
  3658 		__NK_ASSERT_DEBUG(iYoungCount);
       
  3659 		__NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
       
  3660 		link = iYoungList.Last()->Deque();
       
  3661 		--iYoungCount;
       
  3662 		}
       
  3663 	SPageInfo* pageInfo = SPageInfo::FromLink(link);
       
  3664 	pageInfo->SetState(SPageInfo::EStatePagedDead);
       
  3665 
       
  3666 	// put page in a free state...
       
  3667 	SetFree(pageInfo);
       
  3668 	pageInfo->Change(SPageInfo::EPagedFree,SPageInfo::EStatePagedDead);
       
  3669 
       
  3670 	// keep live list balanced...
       
  3671 	BalanceAges();
       
  3672 
       
  3673 	return pageInfo;
       
  3674 	}
       
  3675 
       
  3676 
       
  3677 TBool DemandPaging::GetFreePages(TInt aNumPages)
       
  3678 	{
       
  3679 	__KTRACE_OPT(KPAGING,Kern::Printf("DP: >GetFreePages %d",aNumPages));
       
  3680 	NKern::LockSystem();
       
  3681 
       
  3682 	while(aNumPages>0 && NumberOfFreePages()>=aNumPages)
       
  3683 		{
       
  3684 		// steal a page from live page list and return it to the free pool...
       
  3685 		ReturnToSystem(GetOldestPage());
       
  3686 		--aNumPages;
       
  3687 		}
       
  3688 
       
  3689 	NKern::UnlockSystem();
       
  3690 	__KTRACE_OPT(KPAGING,Kern::Printf("DP: <GetFreePages %d",!aNumPages));
       
  3691 	return !aNumPages;
       
  3692 	}
       
  3693 
       
  3694 
       
  3695 void DemandPaging::DonateRamCachePage(SPageInfo* aPageInfo)
       
  3696 	{
       
  3697 	__NK_ASSERT_DEBUG(iMinimumPageCount + iNumberOfFreePages <= iMaximumPageCount);
       
  3698 	SPageInfo::TType type = aPageInfo->Type();
       
  3699 	if(type==SPageInfo::EChunk)
       
  3700 		{
       
  3701 		//Must not donate locked page. An example is DMA trasferred memory.
       
  3702 		__NK_ASSERT_DEBUG(0 == aPageInfo->LockCount());
       
  3703 		
       
  3704 		aPageInfo->Change(SPageInfo::EPagedCache,SPageInfo::EStatePagedYoung);
       
  3705 
       
  3706 		// Update ram allocator counts as this page has changed its type
       
  3707 		DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
       
  3708 		iMmu->iRamPageAllocator->ChangePageType(aPageInfo, chunk->GetPageType(), EPageDiscard);
       
  3709 
       
  3710 		AddAsYoungest(aPageInfo);
       
  3711 		++iNumberOfFreePages;
       
  3712 		if (iMinimumPageCount + iNumberOfFreePages > iMaximumPageCount)
       
  3713 			ReturnToSystem(GetOldestPage());
       
  3714 		BalanceAges();
       
  3715 		return;
       
  3716 		}
       
  3717 	// allow already donated pages...
       
  3718 	__NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache);
       
  3719 	}
       
  3720 
       
  3721 
       
  3722 TBool DemandPaging::ReclaimRamCachePage(SPageInfo* aPageInfo)
       
  3723 	{
       
  3724 	SPageInfo::TType type = aPageInfo->Type();
       
  3725 	if(type==SPageInfo::EChunk)
       
  3726 		return ETrue; // page already reclaimed
       
  3727 
       
  3728 	__NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache);
       
  3729 
       
  3730 	if(!iNumberOfFreePages)
       
  3731 		return EFalse;
       
  3732 	--iNumberOfFreePages;
       
  3733 
       
  3734 	RemovePage(aPageInfo);
       
  3735 	aPageInfo->Change(SPageInfo::EChunk,SPageInfo::EStateNormal);
       
  3736 
       
  3737 	// Update ram allocator counts as this page has changed its type
       
  3738 	DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
       
  3739 	iMmu->iRamPageAllocator->ChangePageType(aPageInfo, EPageDiscard, chunk->GetPageType());
       
  3740 	return ETrue;
       
  3741 	}
       
  3742 
       
  3743 
       
  3744 SPageInfo* DemandPaging::AllocateNewPage()
       
  3745 	{
       
  3746 	__ASSERT_SYSTEM_LOCK
       
  3747 	SPageInfo* pageInfo;
       
  3748 
       
  3749 	NKern::UnlockSystem();
       
  3750 	MmuBase::Wait();
       
  3751 	NKern::LockSystem();
       
  3752 
       
  3753 	// Try getting a free page from our active page list
       
  3754 	if(iOldCount)
       
  3755 		{
       
  3756 		pageInfo = SPageInfo::FromLink(iOldList.Last());
       
  3757 		if(pageInfo->Type()==SPageInfo::EPagedFree)
       
  3758 			{
       
  3759 			pageInfo = GetOldestPage();
       
  3760 			goto done;
       
  3761 			}
       
  3762 		}
       
  3763 
       
  3764 	// Try getting a free page from the system pool
       
  3765 	if(iMinimumPageCount+iNumberOfFreePages<iMaximumPageCount)
       
  3766 		{
       
  3767 		NKern::UnlockSystem();
       
  3768 		pageInfo = GetPageFromSystem();
       
  3769 		NKern::LockSystem();
       
  3770 		if(pageInfo)
       
  3771 			goto done;
       
  3772 		}
       
  3773 
       
  3774 	// As a last resort, steal one from our list of active pages
       
  3775 	pageInfo = GetOldestPage();
       
  3776 
       
  3777 done:
       
  3778 	NKern::UnlockSystem();
       
  3779 	MmuBase::Signal();
       
  3780 	NKern::LockSystem();
       
  3781 	return pageInfo;
       
  3782 	}
       
  3783 
       
  3784 
       
  3785 void DemandPaging::Rejuvenate(SPageInfo* aPageInfo)
       
  3786 	{
       
  3787 	SPageInfo::TState state = aPageInfo->State();
       
  3788 	if(state==SPageInfo::EStatePagedOld)
       
  3789 		{
       
  3790 		// move page from old list to head of young list...
       
  3791 		__NK_ASSERT_DEBUG(iOldCount);
       
  3792 		aPageInfo->iLink.Deque();
       
  3793 		--iOldCount;
       
  3794 		AddAsYoungest(aPageInfo);
       
  3795 		BalanceAges();
       
  3796 		}
       
  3797 	else if(state==SPageInfo::EStatePagedYoung)
       
  3798 		{
       
  3799 		// page was already young, move it to the start of the list (make it the youngest)
       
  3800 		aPageInfo->iLink.Deque();
       
  3801 		iYoungList.AddHead(&aPageInfo->iLink);
       
  3802 		}
       
  3803 	else
       
  3804 		{
       
  3805 		// leave locked pages alone
       
  3806 		__NK_ASSERT_DEBUG(state==SPageInfo::EStatePagedLocked);
       
  3807 		}
       
  3808 	}
       
  3809 
       
  3810 
       
  3811 TInt DemandPaging::CheckRealtimeThreadFault(DThread* aThread, TAny* aContext)
       
  3812 	{
       
  3813 	TInt r = KErrNone;
       
  3814 	DThread* client = aThread->iIpcClient;
       
  3815 	
       
  3816 	// If iIpcClient is set then we are accessing the address space of a remote thread.  If we are
       
  3817 	// in an IPC trap, this will contain information the local and remte addresses being accessed.
       
  3818 	// If this is not set then we assume than any fault must be the fault of a bad remote address.
       
  3819 	TIpcExcTrap* ipcTrap = (TIpcExcTrap*)aThread->iExcTrap;
       
  3820 	if (ipcTrap && !ipcTrap->IsTIpcExcTrap())
       
  3821 		ipcTrap = 0;
       
  3822 	if (client && (!ipcTrap || ipcTrap->ExcLocation(aThread, aContext) == TIpcExcTrap::EExcRemote))
       
  3823 		{
       
  3824 		// Kill client thread...
       
  3825 		NKern::UnlockSystem();
       
  3826 		if(K::IllegalFunctionForRealtimeThread(client,"Access to Paged Memory (by other thread)"))
       
  3827 			{
       
  3828 			// Treat memory access as bad...
       
  3829 			r = KErrAbort;
       
  3830 			}
       
  3831 		// else thread is in 'warning only' state so allow paging
       
  3832 		}
       
  3833 	else
       
  3834 		{
       
  3835 		// Kill current thread...
       
  3836 		NKern::UnlockSystem();
       
  3837 		if(K::IllegalFunctionForRealtimeThread(NULL,"Access to Paged Memory"))
       
  3838 			{
       
  3839 			// If current thread is in critical section, then the above kill will be deferred
       
  3840 			// and we will continue executing. We will handle this by returning an error
       
  3841 			// which means that the thread will take an exception (which hopfully is XTRAPed!)
       
  3842 			r = KErrAbort;
       
  3843 			}
       
  3844 		// else thread is in 'warning only' state so allow paging
       
  3845 		}
       
  3846 	
       
  3847 	NKern::LockSystem();
       
  3848 	return r;
       
  3849 	}
       
  3850 
       
  3851 
       
  3852 TInt DemandPaging::ResizeLiveList(TUint aMinimumPageCount,TUint aMaximumPageCount)
       
  3853 	{
       
  3854 	if(!aMaximumPageCount)
       
  3855 		{
       
  3856 		aMinimumPageCount = iInitMinimumPageCount;
       
  3857 		aMaximumPageCount = iInitMaximumPageCount;
       
  3858 		}
       
  3859 
       
  3860 	// Min must not be greater than max...
       
  3861 	if(aMinimumPageCount>aMaximumPageCount)
       
  3862 		return KErrArgument;
       
  3863 
       
  3864 	NKern::ThreadEnterCS();
       
  3865 	MmuBase::Wait();
       
  3866 
       
  3867 	NKern::LockSystem();
       
  3868 
       
  3869 	// Make sure aMinimumPageCount is not less than absolute minimum we can cope with...
       
  3870 	iMinimumPageLimit = ((KMinYoungPages + iNextPagingRequestCount) * (1 + iYoungOldRatio)) / iYoungOldRatio;
       
  3871 	if(iMinimumPageLimit<KAbsoluteMinPageCount)
       
  3872 		iMinimumPageLimit = KAbsoluteMinPageCount;
       
  3873 	if(aMinimumPageCount<iMinimumPageLimit+iReservePageCount)
       
  3874 		aMinimumPageCount = iMinimumPageLimit+iReservePageCount;
       
  3875 	if(aMaximumPageCount<aMinimumPageCount)
       
  3876 		aMaximumPageCount=aMinimumPageCount;
       
  3877 
       
  3878 	// Increase iMaximumPageCount?
       
  3879 	TInt extra = aMaximumPageCount-iMaximumPageCount;
       
  3880 	if(extra>0)
       
  3881 		iMaximumPageCount += extra;
       
  3882 
       
  3883 	// Reduce iMinimumPageCount?
       
  3884 	TInt spare = iMinimumPageCount-aMinimumPageCount;
       
  3885 	if(spare>0)
       
  3886 		{
       
  3887 		iMinimumPageCount -= spare;
       
  3888 		iNumberOfFreePages += spare;
       
  3889 		}
       
  3890 
       
  3891 	// Increase iMinimumPageCount?
       
  3892 	TInt r=KErrNone;
       
  3893 	while(aMinimumPageCount>iMinimumPageCount)
       
  3894 		{
       
  3895 		if(iNumberOfFreePages==0)	// Need more pages?
       
  3896 			{
       
  3897 			// get a page from the system
       
  3898 			NKern::UnlockSystem();
       
  3899 			SPageInfo* pageInfo = GetPageFromSystem();
       
  3900 			NKern::LockSystem();
       
  3901 			if(!pageInfo)
       
  3902 				{
       
  3903 				r=KErrNoMemory;
       
  3904 				break;
       
  3905 				}
       
  3906 			AddAsFreePage(pageInfo);
       
  3907 			}
       
  3908 		++iMinimumPageCount;
       
  3909 		--iNumberOfFreePages;
       
  3910 		NKern::FlashSystem();
       
  3911 		}
       
  3912 
       
  3913 	// Reduce iMaximumPageCount?
       
  3914 	while(iMaximumPageCount>aMaximumPageCount)
       
  3915 		{
       
  3916 		if (iMinimumPageCount+iNumberOfFreePages==iMaximumPageCount)	// Need to free pages?
       
  3917 			{
       
  3918 			ReturnToSystem(GetOldestPage());
       
  3919 			}
       
  3920 		--iMaximumPageCount;
       
  3921 		NKern::FlashSystem();
       
  3922 		}
       
  3923 
       
  3924 #ifdef BTRACE_KERNEL_MEMORY
       
  3925 	BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryDemandPagingCache,ThePager->iMinimumPageCount << KPageShift);
       
  3926 #endif
       
  3927 
       
  3928 	__NK_ASSERT_DEBUG(iMinimumPageCount + iNumberOfFreePages <= iMaximumPageCount);
       
  3929 
       
  3930 	NKern::UnlockSystem();
       
  3931 
       
  3932 	MmuBase::Signal();
       
  3933 	NKern::ThreadLeaveCS();
       
  3934 
       
  3935 	return r;
       
  3936 	}
       
  3937 
       
  3938 
       
  3939 TInt VMHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2)
       
  3940 	{
       
  3941 	DemandPaging* pager = DemandPaging::ThePager;
       
  3942 	switch(aFunction)
       
  3943 		{
       
  3944 	case EVMHalFlushCache:
       
  3945 		if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalFlushCache)")))
       
  3946 			K::UnlockedPlatformSecurityPanic();
       
  3947 		pager->FlushAll();
       
  3948 		return KErrNone;
       
  3949 
       
  3950 	case EVMHalSetCacheSize:
       
  3951 		{
       
  3952 		if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetCacheSize)")))
       
  3953 			K::UnlockedPlatformSecurityPanic();
       
  3954 		TUint min = (TUint)a1>>KPageShift;
       
  3955 		if((TUint)a1&KPageMask)
       
  3956 			++min;
       
  3957 		TUint max = (TUint)a2>>KPageShift;
       
  3958 		if((TUint)a2&KPageMask)
       
  3959 			++max;
       
  3960 		return pager->ResizeLiveList(min,max);
       
  3961 		}
       
  3962 
       
  3963 	case EVMHalGetCacheSize:
       
  3964 		{
       
  3965 		SVMCacheInfo info;
       
  3966 		NKern::LockSystem(); // lock system to ensure consistent set of values are read...
       
  3967 		info.iMinSize = pager->iMinimumPageCount<<KPageShift;
       
  3968 		info.iMaxSize = pager->iMaximumPageCount<<KPageShift;
       
  3969 		info.iCurrentSize = (pager->iMinimumPageCount+pager->iNumberOfFreePages)<<KPageShift;
       
  3970 		info.iMaxFreeSize = pager->iNumberOfFreePages<<KPageShift;
       
  3971 		NKern::UnlockSystem();
       
  3972 		kumemput32(a1,&info,sizeof(info));
       
  3973 		}
       
  3974 		return KErrNone;
       
  3975 
       
  3976 	case EVMHalGetEventInfo:
       
  3977 		{
       
  3978 		SVMEventInfo info;
       
  3979 		NKern::LockSystem(); // lock system to ensure consistent set of values are read...
       
  3980 		info = pager->iEventInfo;
       
  3981 		NKern::UnlockSystem();
       
  3982 		Kern::InfoCopy(*(TDes8*)a1,(TUint8*)&info,sizeof(info));
       
  3983 		}
       
  3984 		return KErrNone;
       
  3985 
       
  3986 	case EVMHalResetEventInfo:
       
  3987 		NKern::LockSystem();
       
  3988 		memclr(&pager->iEventInfo, sizeof(pager->iEventInfo));
       
  3989 		NKern::UnlockSystem();
       
  3990 		return KErrNone;
       
  3991 
       
  3992 #ifdef __SUPPORT_DEMAND_PAGING_EMULATION__
       
  3993 	case EVMHalGetOriginalRomPages:
       
  3994 		*(TPhysAddr**)a1 = pager->iOriginalRomPages;
       
  3995 		*(TInt*)a2 = pager->iOriginalRomPageCount;
       
  3996 		return KErrNone;
       
  3997 #endif
       
  3998 
       
  3999 	case EVMPageState:
       
  4000 		return pager->PageState((TLinAddr)a1);
       
  4001 
       
  4002 #ifdef __CONCURRENT_PAGING_INSTRUMENTATION__
       
  4003 	case EVMHalGetConcurrencyInfo:
       
  4004 		{
       
  4005 		NKern::LockSystem();
       
  4006 		SPagingConcurrencyInfo info = { pager->iMaxWaitingCount, pager->iMaxPagingCount };
       
  4007 		NKern::UnlockSystem();
       
  4008 		kumemput32(a1,&info,sizeof(info));
       
  4009 		}
       
  4010 		return KErrNone;
       
  4011 		
       
  4012 	case EVMHalResetConcurrencyInfo:
       
  4013 		NKern::LockSystem();
       
  4014 		pager->iMaxWaitingCount = 0;
       
  4015 		pager->iMaxPagingCount = 0;
       
  4016 		NKern::UnlockSystem();
       
  4017 		return KErrNone;
       
  4018 #endif
       
  4019 
       
  4020 #ifdef __DEMAND_PAGING_BENCHMARKS__
       
  4021 	case EVMHalGetPagingBenchmark:
       
  4022 		{
       
  4023 		TUint index = (TInt) a1;
       
  4024 		if (index >= EMaxPagingBm)
       
  4025 			return KErrNotFound;
       
  4026 		NKern::LockSystem();
       
  4027 		SPagingBenchmarkInfo info = pager->iBenchmarkInfo[index];
       
  4028 		NKern::UnlockSystem();
       
  4029 		kumemput32(a2,&info,sizeof(info));
       
  4030 		}		
       
  4031 		return KErrNone;
       
  4032 		
       
  4033 	case EVMHalResetPagingBenchmark:
       
  4034 		{
       
  4035 		TUint index = (TInt) a1;
       
  4036 		if (index >= EMaxPagingBm)
       
  4037 			return KErrNotFound;
       
  4038 		NKern::LockSystem();
       
  4039 		pager->ResetBenchmarkData((TPagingBenchmark)index);
       
  4040 		NKern::UnlockSystem();
       
  4041 		}
       
  4042 		return KErrNone;
       
  4043 #endif
       
  4044 
       
  4045 	default:
       
  4046 		return KErrNotSupported;
       
  4047 		}
       
  4048 	}
       
  4049 
       
  4050 void DemandPaging::Panic(TFault aFault)
       
  4051 	{
       
  4052 	Kern::Fault("DEMAND-PAGING",aFault);
       
  4053 	}
       
  4054 
       
  4055 
       
  4056 DMutex* DemandPaging::CheckMutexOrder()
       
  4057 	{
       
  4058 #ifdef _DEBUG
       
  4059 	SDblQue& ml = TheCurrentThread->iMutexList;
       
  4060 	if(ml.IsEmpty())
       
  4061 		return NULL;
       
  4062 	DMutex* mm = _LOFF(ml.First(), DMutex, iOrderLink);
       
  4063 	if (KMutexOrdPageIn >= mm->iOrder)
       
  4064 		return mm;
       
  4065 #endif
       
  4066 	return NULL;
       
  4067 	}
       
  4068 
       
  4069 
       
  4070 TBool DemandPaging::ReservePage()
       
  4071 	{
       
  4072 	__ASSERT_SYSTEM_LOCK;
       
  4073 	__ASSERT_CRITICAL;
       
  4074 
       
  4075 	NKern::UnlockSystem();
       
  4076 	MmuBase::Wait();
       
  4077 	NKern::LockSystem();
       
  4078 
       
  4079 	__NK_ASSERT_DEBUG(iMinimumPageCount >= iMinimumPageLimit + iReservePageCount);
       
  4080 	while (iMinimumPageCount == iMinimumPageLimit + iReservePageCount &&
       
  4081 		   iNumberOfFreePages == 0)
       
  4082 		{
       
  4083 		NKern::UnlockSystem();
       
  4084 		SPageInfo* pageInfo = GetPageFromSystem();
       
  4085 		if(!pageInfo)
       
  4086 			{
       
  4087 			MmuBase::Signal();
       
  4088 			NKern::LockSystem();
       
  4089 			return EFalse;
       
  4090 			}
       
  4091 		NKern::LockSystem();
       
  4092 		AddAsFreePage(pageInfo);
       
  4093 		}
       
  4094 	if (iMinimumPageCount == iMinimumPageLimit + iReservePageCount)
       
  4095 		{	
       
  4096 		++iMinimumPageCount;
       
  4097 		--iNumberOfFreePages;
       
  4098 		if (iMinimumPageCount > iMaximumPageCount)
       
  4099 			iMaximumPageCount = iMinimumPageCount;
       
  4100 		}
       
  4101 	++iReservePageCount;
       
  4102 	__NK_ASSERT_DEBUG(iMinimumPageCount >= iMinimumPageLimit + iReservePageCount);
       
  4103 	__NK_ASSERT_DEBUG(iMinimumPageCount + iNumberOfFreePages <= iMaximumPageCount);
       
  4104 
       
  4105 	NKern::UnlockSystem();
       
  4106 	MmuBase::Signal();
       
  4107 	NKern::LockSystem();
       
  4108 	return ETrue;
       
  4109 	}
       
  4110 
       
  4111 
       
  4112 TInt DemandPaging::LockRegion(TLinAddr aStart,TInt aSize,DProcess* aProcess)
       
  4113 	{
       
  4114 	__KTRACE_OPT(KPAGING,Kern::Printf("DP: LockRegion(%08x,%x)",aStart,aSize));
       
  4115 	NKern::ThreadEnterCS();
       
  4116 
       
  4117 	// calculate the number of pages required to lock aSize bytes
       
  4118 	TUint32 mask=KPageMask;
       
  4119 	TUint32 offset=aStart&mask;
       
  4120 	TInt numPages = (aSize+offset+mask)>>KPageShift;
       
  4121 
       
  4122 	// Lock pages...
       
  4123 	TInt r=KErrNone;
       
  4124 	TLinAddr page = aStart;
       
  4125 
       
  4126 	NKern::LockSystem();
       
  4127 	while(--numPages>=0)
       
  4128 		{
       
  4129 		if (!ReservePage())
       
  4130 			break;
       
  4131 		TPhysAddr phys;
       
  4132 		r = LockPage(page,aProcess,phys);
       
  4133 		NKern::FlashSystem();
       
  4134 		if(r!=KErrNone)
       
  4135 			break;
       
  4136 		page += KPageSize;
       
  4137 		}
       
  4138 
       
  4139 	NKern::UnlockSystem();
       
  4140 
       
  4141 	// If error, unlock whatever we managed to lock...
       
  4142 	if(r!=KErrNone)
       
  4143 		{
       
  4144 		while((page-=KPageSize)>=aStart)
       
  4145 			{
       
  4146 			NKern::LockSystem();
       
  4147 			UnlockPage(aStart,aProcess,KPhysAddrInvalid);
       
  4148 			--iReservePageCount;
       
  4149 			NKern::UnlockSystem();
       
  4150 			}
       
  4151 		}
       
  4152 
       
  4153 	NKern::ThreadLeaveCS();
       
  4154 	__KTRACE_OPT(KPAGING,Kern::Printf("DP: LockRegion returns %d",r));
       
  4155 	return r;
       
  4156 	}
       
  4157 
       
  4158 
       
  4159 TInt DemandPaging::UnlockRegion(TLinAddr aStart,TInt aSize,DProcess* aProcess)
       
  4160 	{
       
  4161 	__KTRACE_OPT(KPAGING,Kern::Printf("DP: UnlockRegion(%08x,%x)",aStart,aSize));
       
  4162 	TUint32 mask=KPageMask;
       
  4163 	TUint32 offset=aStart&mask;
       
  4164 	TInt numPages = (aSize+offset+mask)>>KPageShift;
       
  4165 	NKern::LockSystem();
       
  4166 	__NK_ASSERT_DEBUG(iReservePageCount >= (TUint)numPages);
       
  4167 	while(--numPages>=0)
       
  4168 		{
       
  4169 		UnlockPage(aStart,aProcess,KPhysAddrInvalid);
       
  4170 		--iReservePageCount;		
       
  4171 		NKern::FlashSystem();
       
  4172 		aStart += KPageSize;
       
  4173 		}
       
  4174 	NKern::UnlockSystem();
       
  4175 	return KErrNone;
       
  4176 	}
       
  4177 
       
  4178 
       
  4179 void DemandPaging::FlushAll()
       
  4180 	{
       
  4181 	NKern::ThreadEnterCS();
       
  4182 	MmuBase::Wait();
       
  4183 	// look at all RAM pages in the system, and unmap all those used for paging
       
  4184 	const TUint32* piMap = (TUint32*)KPageInfoMap;
       
  4185 	const TUint32* piMapEnd = piMap+(KNumPageInfoPages>>5);
       
  4186 	SPageInfo* pi = (SPageInfo*)KPageInfoLinearBase;
       
  4187 	NKern::LockSystem();
       
  4188 	do
       
  4189 		{
       
  4190 		SPageInfo* piNext = pi+(KPageInfosPerPage<<5);
       
  4191 		for(TUint32 piFlags=*piMap++; piFlags; piFlags>>=1)
       
  4192 			{
       
  4193 			if(!(piFlags&1))
       
  4194 				{
       
  4195 				pi += KPageInfosPerPage;
       
  4196 				continue;
       
  4197 				}
       
  4198 			SPageInfo* piEnd = pi+KPageInfosPerPage;
       
  4199 			do
       
  4200 				{
       
  4201 				SPageInfo::TState state = pi->State();
       
  4202 				if(state==SPageInfo::EStatePagedYoung || state==SPageInfo::EStatePagedOld)
       
  4203 					{
       
  4204 					RemovePage(pi);
       
  4205 					SetFree(pi);
       
  4206 					AddAsFreePage(pi);
       
  4207 					NKern::FlashSystem();
       
  4208 					}
       
  4209 				++pi;
       
  4210 				const TUint KFlashCount = 64; // flash every 64 page infos (must be a power-of-2)
       
  4211 				__ASSERT_COMPILE((TUint)KPageInfosPerPage >= KFlashCount);
       
  4212 				if(((TUint)pi&((KFlashCount-1)<<KPageInfoShift))==0)
       
  4213 					NKern::FlashSystem();
       
  4214 				}
       
  4215 			while(pi<piEnd);
       
  4216 			}
       
  4217 		pi = piNext;
       
  4218 		}
       
  4219 	while(piMap<piMapEnd);
       
  4220 	NKern::UnlockSystem();
       
  4221 
       
  4222 	// reduce live page list to a minimum
       
  4223 	while(GetFreePages(1)) {}; 
       
  4224 
       
  4225 	MmuBase::Signal();
       
  4226 	NKern::ThreadLeaveCS();
       
  4227 	}
       
  4228 
       
  4229 
       
  4230 TInt DemandPaging::LockPage(TLinAddr aPage, DProcess *aProcess, TPhysAddr& aPhysAddr)
       
  4231 	{
       
  4232 	__KTRACE_OPT(KPAGING,Kern::Printf("DP: LockPage() %08x",aPage));
       
  4233 	__ASSERT_SYSTEM_LOCK
       
  4234 
       
  4235 	aPhysAddr = KPhysAddrInvalid;
       
  4236 
       
  4237 	TInt r = EnsurePagePresent(aPage,aProcess);
       
  4238 	if (r != KErrNone)
       
  4239 		return KErrArgument; // page doesn't exist
       
  4240 
       
  4241 	// get info about page to be locked...
       
  4242 	TPhysAddr phys = LinearToPhysical(aPage,aProcess);
       
  4243 retry:
       
  4244 	__NK_ASSERT_DEBUG(phys!=KPhysAddrInvalid);
       
  4245 
       
  4246 	SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(phys);
       
  4247 	if(!pageInfo)
       
  4248 		return KErrNotFound;
       
  4249 
       
  4250 	// lock it...
       
  4251 	SPageInfo::TType type = pageInfo->Type();
       
  4252 	if(type==SPageInfo::EShadow)
       
  4253 		{
       
  4254 		// get the page which is being shadowed and lock that
       
  4255 		phys = (TPhysAddr)pageInfo->Owner();
       
  4256 		goto retry;
       
  4257 		}
       
  4258 
       
  4259 	switch(pageInfo->State())
       
  4260 		{
       
  4261 	case SPageInfo::EStatePagedLocked:
       
  4262 		// already locked, so just increment lock count...
       
  4263 		++pageInfo->PagedLock();
       
  4264 		break;
       
  4265 
       
  4266 	case SPageInfo::EStatePagedYoung:
       
  4267 		{
       
  4268 		if(type!=SPageInfo::EPagedROM && type !=SPageInfo::EPagedCode)
       
  4269 			{
       
  4270 			// not implemented yet
       
  4271 			__NK_ASSERT_ALWAYS(0);
       
  4272 			}
       
  4273 
       
  4274 		// remove page to be locked from live list...
       
  4275 		RemovePage(pageInfo);
       
  4276 
       
  4277 		// change to locked state...
       
  4278 		pageInfo->SetState(SPageInfo::EStatePagedLocked);
       
  4279 		pageInfo->PagedLock() = 1; // Start with lock count of one
       
  4280 
       
  4281 		// open reference on memory...
       
  4282 		if(type==SPageInfo::EPagedCode)
       
  4283 			{
       
  4284 			DMemModelCodeSegMemory* codeSegMemory = (DMemModelCodeSegMemory*)pageInfo->Owner();
       
  4285 			if(codeSegMemory->Open()!=KErrNone)
       
  4286 				{
       
  4287 				__NK_ASSERT_DEBUG(0);
       
  4288 				}
       
  4289 			}
       
  4290 		}
       
  4291 		
       
  4292 		break;
       
  4293 
       
  4294 	case SPageInfo::EStatePagedOld:
       
  4295 		// can't happen because we forced the page to be accessible earlier
       
  4296 		__NK_ASSERT_ALWAYS(0);
       
  4297 		return KErrCorrupt;
       
  4298 
       
  4299 	default:
       
  4300 		return KErrNotFound;
       
  4301 		}
       
  4302 
       
  4303 	aPhysAddr = phys;
       
  4304 
       
  4305 #ifdef BTRACE_PAGING
       
  4306 	BTraceContext8(BTrace::EPaging,BTrace::EPagingPageLock,phys,pageInfo->PagedLock());
       
  4307 #endif
       
  4308 	return KErrNone;
       
  4309 	}
       
  4310 
       
  4311 
       
  4312 TInt DemandPaging::UnlockPage(TLinAddr aPage, DProcess* aProcess, TPhysAddr aPhysAddr)
       
  4313 	{
       
  4314 	__KTRACE_OPT(KPAGING,Kern::Printf("DP: UnlockPage() %08x",aPage));
       
  4315 	__ASSERT_SYSTEM_LOCK;
       
  4316 	__ASSERT_CRITICAL;
       
  4317 
       
  4318 	// Get info about page to be unlocked
       
  4319 	TPhysAddr phys = LinearToPhysical(aPage,aProcess);
       
  4320 	if(phys==KPhysAddrInvalid)
       
  4321 		{
       
  4322 		phys = aPhysAddr;
       
  4323 		if(phys==KPhysAddrInvalid)
       
  4324 			return KErrNotFound;
       
  4325 		}
       
  4326 retry:
       
  4327 	SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(phys);
       
  4328 	if(!pageInfo)
       
  4329 		return KErrNotFound;
       
  4330 
       
  4331 	SPageInfo::TType type = pageInfo->Type();
       
  4332 	if(type==SPageInfo::EShadow)
       
  4333 		{
       
  4334 		// Get the page which is being shadowed and unlock that
       
  4335 		phys = (TPhysAddr)pageInfo->Owner();
       
  4336 		goto retry;
       
  4337 		}
       
  4338 
       
  4339 	__NK_ASSERT_DEBUG(phys==aPhysAddr || aPhysAddr==KPhysAddrInvalid);
       
  4340 
       
  4341 	// Unlock it...
       
  4342 	switch(pageInfo->State())
       
  4343 		{
       
  4344 	case SPageInfo::EStatePagedLocked:
       
  4345 #ifdef BTRACE_PAGING
       
  4346 		BTraceContext8(BTrace::EPaging,BTrace::EPagingPageUnlock,phys,pageInfo->PagedLock());
       
  4347 #endif
       
  4348 		if(!(--pageInfo->PagedLock()))
       
  4349 			{
       
  4350 			// get pointer to memory...
       
  4351 			DMemModelCodeSegMemory* codeSegMemory = 0;
       
  4352 			if(type==SPageInfo::EPagedCode)
       
  4353 				codeSegMemory = (DMemModelCodeSegMemory*)pageInfo->Owner();
       
  4354 
       
  4355 			// put page back on live list...
       
  4356 			AddAsYoungest(pageInfo);
       
  4357 			BalanceAges();
       
  4358 
       
  4359 			// close reference on memory...
       
  4360 			if(codeSegMemory)
       
  4361 				{
       
  4362 				NKern::UnlockSystem();
       
  4363 				codeSegMemory->Close();
       
  4364 				NKern::LockSystem();
       
  4365 				}
       
  4366 			}
       
  4367 		break;
       
  4368 
       
  4369 	default:
       
  4370 		return KErrNotFound;
       
  4371 		}
       
  4372 
       
  4373 	return KErrNone;
       
  4374 	}
       
  4375 
       
  4376 
       
  4377 
       
  4378 TInt DemandPaging::ReserveAlloc(TInt aSize, DDemandPagingLock& aLock)
       
  4379 	{
       
  4380 	__NK_ASSERT_DEBUG(aLock.iPages == NULL);
       
  4381 	
       
  4382 	// calculate the number of pages required to lock aSize bytes
       
  4383 	TInt numPages = ((aSize-1+KPageMask)>>KPageShift)+1;
       
  4384 
       
  4385 	__KTRACE_OPT(KPAGING,Kern::Printf("DP: ReserveAlloc() pages %d",numPages));
       
  4386 	
       
  4387 	NKern::ThreadEnterCS();
       
  4388 
       
  4389 	aLock.iPages = (TPhysAddr*)Kern::Alloc(numPages*sizeof(TPhysAddr));
       
  4390 	if(!aLock.iPages)
       
  4391 		{
       
  4392 		NKern::ThreadLeaveCS();
       
  4393 		return KErrNoMemory;
       
  4394 		}
       
  4395 	
       
  4396 	MmuBase::Wait();
       
  4397 	NKern::LockSystem();
       
  4398 
       
  4399 	// reserve pages, adding more if necessary
       
  4400 	while (aLock.iReservedPageCount < numPages)
       
  4401 		{
       
  4402 		if (!ReservePage())
       
  4403 			break;
       
  4404 		++aLock.iReservedPageCount;
       
  4405 		}
       
  4406 
       
  4407 	NKern::UnlockSystem();
       
  4408 	MmuBase::Signal();
       
  4409 
       
  4410 	TBool enoughPages = aLock.iReservedPageCount == numPages;
       
  4411 	if(!enoughPages)
       
  4412 		ReserveFree(aLock);
       
  4413 
       
  4414 	NKern::ThreadLeaveCS();
       
  4415 	return enoughPages ? KErrNone : KErrNoMemory;
       
  4416 	}
       
  4417 
       
  4418 
       
  4419 
       
  4420 void DemandPaging::ReserveFree(DDemandPagingLock& aLock)
       
  4421 	{
       
  4422 	NKern::ThreadEnterCS();
       
  4423 
       
  4424 	// make sure pages aren't still locked
       
  4425 	ReserveUnlock(aLock);
       
  4426 
       
  4427 	NKern::LockSystem();
       
  4428 	__NK_ASSERT_DEBUG(iReservePageCount >= (TUint)aLock.iReservedPageCount);
       
  4429 	iReservePageCount -= aLock.iReservedPageCount;
       
  4430 	aLock.iReservedPageCount = 0;
       
  4431 	NKern::UnlockSystem();
       
  4432 
       
  4433 	// free page array...
       
  4434 	Kern::Free(aLock.iPages);
       
  4435 	aLock.iPages = 0;
       
  4436 
       
  4437 	NKern::ThreadLeaveCS();
       
  4438 	}
       
  4439 
       
  4440 
       
  4441 
       
  4442 TBool DemandPaging::ReserveLock(DThread* aThread, TLinAddr aStart,TInt aSize, DDemandPagingLock& aLock)
       
  4443 	{
       
  4444 	if(aLock.iLockedPageCount)
       
  4445 		Panic(ELockTwice);
       
  4446 
       
  4447 	// calculate the number of pages that need to be locked...
       
  4448 	TUint32 mask=KPageMask;
       
  4449 	TUint32 offset=aStart&mask;
       
  4450 	TInt numPages = (aSize+offset+mask)>>KPageShift;
       
  4451 	if(numPages>aLock.iReservedPageCount)
       
  4452 		Panic(ELockTooBig);
       
  4453 
       
  4454 	NKern::LockSystem();
       
  4455 
       
  4456 	// lock the pages
       
  4457 	TBool locked = EFalse; // becomes true if any pages were locked
       
  4458 	DProcess* process = aThread->iOwningProcess;
       
  4459 	TLinAddr page=aStart;
       
  4460 	TInt count=numPages;
       
  4461 	TPhysAddr* physPages = aLock.iPages;
       
  4462 	while(--count>=0)
       
  4463 		{
       
  4464 		if(LockPage(page,process,*physPages)==KErrNone)
       
  4465 			locked = ETrue;
       
  4466 		NKern::FlashSystem();
       
  4467 		page += KPageSize;
       
  4468 		++physPages;
       
  4469 		}
       
  4470 
       
  4471 	// if any pages were locked, save the lock info...
       
  4472 	if(locked)
       
  4473 		{
       
  4474 		if(aLock.iLockedPageCount)
       
  4475 			Panic(ELockTwice);
       
  4476 		aLock.iLockedStart = aStart;
       
  4477 		aLock.iLockedPageCount = numPages;
       
  4478 		aLock.iProcess = process;
       
  4479 		aLock.iProcess->Open();
       
  4480 		}
       
  4481 
       
  4482 	NKern::UnlockSystem();
       
  4483 	return locked;
       
  4484 	}
       
  4485 
       
  4486 
       
  4487 
       
  4488 void DemandPaging::ReserveUnlock(DDemandPagingLock& aLock)
       
  4489 	{
       
  4490 	NKern::ThreadEnterCS();
       
  4491 
       
  4492 	DProcess* process = NULL;
       
  4493 	NKern::LockSystem();
       
  4494 	TInt numPages = aLock.iLockedPageCount;
       
  4495 	TLinAddr page = aLock.iLockedStart;
       
  4496 	TPhysAddr* physPages = aLock.iPages;
       
  4497 	while(--numPages>=0)
       
  4498 		{
       
  4499 		UnlockPage(page, aLock.iProcess,*physPages);
       
  4500 		NKern::FlashSystem();
       
  4501 		page += KPageSize;
       
  4502 		++physPages;
       
  4503 		}
       
  4504 	process = aLock.iProcess;
       
  4505 	aLock.iProcess = NULL;
       
  4506 	aLock.iLockedPageCount = 0;
       
  4507 	NKern::UnlockSystem();
       
  4508 	if (process)
       
  4509 		process->Close(NULL);
       
  4510 
       
  4511 	NKern::ThreadLeaveCS();
       
  4512 	}
       
  4513 
       
  4514 /**
       
  4515 Check whether the specified page can be discarded by the RAM cache.
       
  4516 
       
  4517 @param aPageInfo The page info of the page being queried.
       
  4518 @return ETrue when the page can be discarded, EFalse otherwise.
       
  4519 @pre System lock held.
       
  4520 @post System lock held.
       
  4521 */
       
  4522 TBool DemandPaging::IsPageDiscardable(SPageInfo& aPageInfo)
       
  4523 	{
       
  4524 	 // on live list?
       
  4525 	SPageInfo::TState state = aPageInfo.State();
       
  4526 	return (state == SPageInfo::EStatePagedYoung || state == SPageInfo::EStatePagedOld);
       
  4527 	}
       
  4528 
       
  4529 
       
  4530 /**
       
  4531 Discard the specified page.
       
  4532 Should only be called on a page if a previous call to IsPageDiscardable()
       
  4533 returned ETrue and the system lock hasn't been released between the calls.
       
  4534 
       
  4535 @param aPageInfo The page info of the page to be discarded
       
  4536 @param aBlockZoneId The ID of the RAM zone that shouldn't be allocated into.
       
  4537 @param aBlockRest Set to ETrue to stop allocation as soon as aBlockedZoneId is reached 
       
  4538 in preference ordering.  EFalse otherwise.
       
  4539 @return ETrue if the page could be discarded, EFalse otherwise.
       
  4540 
       
  4541 @pre System lock held.
       
  4542 @post System lock held.
       
  4543 */
       
  4544 TBool DemandPaging::DoDiscardPage(SPageInfo& aPageInfo, TUint aBlockedZoneId, TBool aBlockRest)
       
  4545 	{
       
  4546 	__ASSERT_SYSTEM_LOCK;
       
  4547 	// Ensure that we don't reduce the cache beyond its minimum.
       
  4548 	if (iNumberOfFreePages == 0)
       
  4549 		{
       
  4550 		NKern::UnlockSystem();
       
  4551 		SPageInfo* newPage = GetPageFromSystem(aBlockedZoneId, aBlockRest);
       
  4552 		NKern::LockSystem();
       
  4553 		if (newPage == NULL)
       
  4554 			{// couldn't allocate a new page
       
  4555 			return EFalse;
       
  4556 			}
       
  4557 		if (IsPageDiscardable(aPageInfo))
       
  4558 			{// page can still be discarded so use new page 
       
  4559 			// and discard old one
       
  4560 			AddAsFreePage(newPage);
       
  4561 			RemovePage(&aPageInfo);
       
  4562 			SetFree(&aPageInfo);
       
  4563 			ReturnToSystem(&aPageInfo);
       
  4564 			BalanceAges();
       
  4565 			return ETrue;
       
  4566 			}
       
  4567 		else
       
  4568 			{// page no longer discardable so no longer require new page
       
  4569 			ReturnToSystem(newPage);
       
  4570 			return EFalse;
       
  4571 			}
       
  4572 		}
       
  4573 
       
  4574 	// Discard the page
       
  4575 	RemovePage(&aPageInfo);
       
  4576 	SetFree(&aPageInfo);
       
  4577 	ReturnToSystem(&aPageInfo);
       
  4578 	BalanceAges();
       
  4579 	
       
  4580 	return ETrue;
       
  4581 	}
       
  4582 
       
  4583 
       
  4584 /**
       
  4585 First stage in discarding a list of pages.
       
  4586 
       
  4587 Must ensure that the pages will still be discardable even if system lock is released.
       
  4588 To be used in conjunction with RamCacheBase::DoDiscardPages1().
       
  4589 
       
  4590 @param aPageList A NULL terminated list of the pages to be discarded
       
  4591 @return KErrNone on success.
       
  4592 
       
  4593 @pre System lock held
       
  4594 @post System lock held
       
  4595 */
       
  4596 TInt DemandPaging::DoDiscardPages0(SPageInfo** aPageList)
       
  4597 	{
       
  4598 	__ASSERT_SYSTEM_LOCK;
       
  4599 
       
  4600 	SPageInfo* pageInfo;
       
  4601 	while((pageInfo = *aPageList++) != 0)
       
  4602 		{
       
  4603 		RemovePage(pageInfo);
       
  4604 		}
       
  4605 	return KErrNone;
       
  4606 	}
       
  4607 
       
  4608 
       
  4609 /**
       
  4610 Final stage in discarding a list of page
       
  4611 Finish discarding the pages previously removed by RamCacheBase::DoDiscardPages0().
       
  4612 
       
  4613 @param aPageList A NULL terminated list of the pages to be discarded
       
  4614 @return KErrNone on success.
       
  4615 
       
  4616 @pre System lock held
       
  4617 @post System lock held
       
  4618 */
       
  4619 TInt DemandPaging::DoDiscardPages1(SPageInfo** aPageList)
       
  4620 	{
       
  4621 	__ASSERT_SYSTEM_LOCK;
       
  4622 
       
  4623 	SPageInfo* pageInfo;
       
  4624 	while((pageInfo = *aPageList++)!=0)
       
  4625 		{
       
  4626 		SetFree(pageInfo);
       
  4627 		ReturnToSystem(pageInfo);
       
  4628 		BalanceAges();
       
  4629 		}
       
  4630 	return KErrNone;
       
  4631 	}
       
  4632 
       
  4633 
       
  4634 TBool DemandPaging::MayBePaged(TLinAddr aStartAddr, TUint aLength)
       
  4635 	{
       
  4636 	TLinAddr endAddr = aStartAddr + aLength;
       
  4637 	TBool rangeTouchesPagedRom =
       
  4638 		TUint(aStartAddr - iRomPagedLinearBase) < iRomSize  ||
       
  4639 		TUint(endAddr - iRomPagedLinearBase) < iRomSize;
       
  4640 	TBool rangeTouchesCodeArea =
       
  4641 		TUint(aStartAddr - iCodeLinearBase) < iCodeSize  ||
       
  4642 		TUint(endAddr - iCodeLinearBase) < iCodeSize;
       
  4643 	return rangeTouchesPagedRom || rangeTouchesCodeArea;
       
  4644 	}
       
  4645 
       
  4646 
       
  4647 #ifdef __DEMAND_PAGING_BENCHMARKS__
       
  4648 
       
  4649 void DemandPaging::ResetBenchmarkData(TPagingBenchmark aBm)
       
  4650 	{
       
  4651 	SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
       
  4652 	info.iCount = 0;
       
  4653 	info.iTotalTime = 0;
       
  4654 	info.iMaxTime = 0;
       
  4655 	info.iMinTime = KMaxTInt;
       
  4656 	}
       
  4657 
       
  4658 void DemandPaging::RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime)
       
  4659 	{
       
  4660 	SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
       
  4661 	++info.iCount;
       
  4662 #if !defined(HIGH_RES_TIMER) || defined(HIGH_RES_TIMER_COUNTS_UP)
       
  4663 	TInt64 elapsed = aEndTime - aStartTime;
       
  4664 #else
       
  4665 	TInt64 elapsed = aStartTime - aEndTime;
       
  4666 #endif
       
  4667 	info.iTotalTime += elapsed;
       
  4668 	if (elapsed > info.iMaxTime)
       
  4669 		info.iMaxTime = elapsed;
       
  4670 	if (elapsed < info.iMinTime)
       
  4671 		info.iMinTime = elapsed;
       
  4672 	}
       
  4673 	
       
  4674 #endif
       
  4675 
       
  4676 
       
  4677 //
       
  4678 // DDemandPagingLock
       
  4679 //
       
  4680 
       
  4681 EXPORT_C DDemandPagingLock::DDemandPagingLock()
       
  4682 	: iThePager(DemandPaging::ThePager), iReservedPageCount(0), iLockedPageCount(0), iPages(0)
       
  4683 	{
       
  4684 	}
       
  4685 
       
  4686 
       
  4687 EXPORT_C TInt DDemandPagingLock::Alloc(TInt aSize)
       
  4688 	{	
       
  4689 	if (iThePager)
       
  4690 		return iThePager->ReserveAlloc(aSize,*this);
       
  4691 	else
       
  4692 		return KErrNone;
       
  4693 	}
       
  4694 
       
  4695 
       
  4696 EXPORT_C void DDemandPagingLock::DoUnlock()
       
  4697 	{
       
  4698 	if (iThePager)
       
  4699 		iThePager->ReserveUnlock(*this);
       
  4700 	}
       
  4701 
       
  4702 
       
  4703 EXPORT_C void DDemandPagingLock::Free()
       
  4704 	{
       
  4705 	if (iThePager)
       
  4706 		iThePager->ReserveFree(*this);
       
  4707 	}
       
  4708 
       
  4709 
       
  4710 EXPORT_C TInt Kern::InstallPagingDevice(DPagingDevice* aDevice)
       
  4711 	{
       
  4712 	if (DemandPaging::ThePager)
       
  4713 		return DemandPaging::ThePager->InstallPagingDevice(aDevice);
       
  4714 	else
       
  4715 		return KErrNotSupported;
       
  4716 	}
       
  4717 
       
  4718 
       
  4719 #else  // !__DEMAND_PAGING__
       
  4720 
       
  4721 EXPORT_C DDemandPagingLock::DDemandPagingLock()
       
  4722 	: iLockedPageCount(0)
       
  4723 	{
       
  4724 	}
       
  4725 
       
  4726 EXPORT_C TInt DDemandPagingLock::Alloc(TInt /*aSize*/)
       
  4727 	{
       
  4728 	return KErrNone;
       
  4729 	}
       
  4730 
       
  4731 EXPORT_C TBool DDemandPagingLock::Lock(DThread* /*aThread*/, TLinAddr /*aStart*/, TInt /*aSize*/)
       
  4732 	{
       
  4733 	return EFalse;
       
  4734 	}
       
  4735 
       
  4736 EXPORT_C void DDemandPagingLock::DoUnlock()
       
  4737 	{
       
  4738 	}
       
  4739 
       
  4740 EXPORT_C void DDemandPagingLock::Free()
       
  4741 	{
       
  4742 	}
       
  4743 
       
  4744 EXPORT_C TInt Kern::InstallPagingDevice(DPagingDevice* aDevice)
       
  4745 	{
       
  4746 	return KErrNotSupported;
       
  4747 	}
       
  4748 
       
  4749 #endif // __DEMAND_PAGING__
       
  4750 
       
  4751 
       
  4752 DMmuCodeSegMemory::DMmuCodeSegMemory(DEpocCodeSeg* aCodeSeg)
       
  4753 	: DEpocCodeSegMemory(aCodeSeg), iCodeAllocBase(KMinTInt)
       
  4754 	{
       
  4755 	}
       
  4756 
       
  4757 //#define __DUMP_BLOCKMAP_INFO
       
  4758 DMmuCodeSegMemory::~DMmuCodeSegMemory()
       
  4759 	{
       
  4760 #ifdef __DEMAND_PAGING__
       
  4761 	Kern::Free(iCodeRelocTable);
       
  4762 	Kern::Free(iCodePageOffsets);
       
  4763 	Kern::Free(iDataSectionMemory);
       
  4764 #endif
       
  4765 	}
       
  4766 
       
  4767 #ifdef __DEMAND_PAGING__
       
  4768 
       
  4769 /**
       
  4770 Read and process the block map and related data.
       
  4771 */
       
  4772 TInt DMmuCodeSegMemory::ReadBlockMap(const TCodeSegCreateInfo& aInfo)
       
  4773 	{
       
  4774 	__KTRACE_OPT(KPAGING,Kern::Printf("DP: Reading block map for %C", iCodeSeg));
       
  4775 
       
  4776 	if (aInfo.iCodeBlockMapEntriesSize <= 0)
       
  4777 		return KErrArgument;  // no block map provided
       
  4778 	
       
  4779 	// Get compression data
       
  4780 	switch (aInfo.iCompressionType)
       
  4781 		{
       
  4782 		case KFormatNotCompressed:
       
  4783 			iCompressionType = SRomPageInfo::ENoCompression;
       
  4784 			break;
       
  4785 
       
  4786 		case KUidCompressionBytePair:
       
  4787 			{
       
  4788 			iCompressionType = SRomPageInfo::EBytePair;
       
  4789 			if (!aInfo.iCodePageOffsets)
       
  4790 				return KErrArgument;
       
  4791 			TInt size = sizeof(TInt32) * (iPageCount + 1);
       
  4792 			iCodePageOffsets = (TInt32*)Kern::Alloc(size);
       
  4793 			if (!iCodePageOffsets)
       
  4794 				return KErrNoMemory;
       
  4795 			kumemget32(iCodePageOffsets, aInfo.iCodePageOffsets, size);
       
  4796 
       
  4797 #ifdef __DUMP_BLOCKMAP_INFO
       
  4798 			Kern::Printf("CodePageOffsets:");
       
  4799 			for (TInt i = 0 ; i < iPageCount + 1 ; ++i)
       
  4800 				Kern::Printf("  %08x", iCodePageOffsets[i]);
       
  4801 #endif
       
  4802 
       
  4803 			TInt last = 0;
       
  4804 			for (TInt j = 0 ; j < iPageCount + 1 ; ++j)
       
  4805 				{
       
  4806 				if (iCodePageOffsets[j] < last ||
       
  4807 					iCodePageOffsets[j] > (aInfo.iCodeLengthInFile + aInfo.iCodeStartInFile))
       
  4808 					{
       
  4809 					__NK_ASSERT_DEBUG(0);
       
  4810 					return KErrCorrupt;
       
  4811 					}
       
  4812 				last = iCodePageOffsets[j];
       
  4813 				}
       
  4814 			}
       
  4815 			break;
       
  4816 
       
  4817 		default:
       
  4818 			return KErrNotSupported;
       
  4819 		}		
       
  4820 
       
  4821 	// Copy block map data itself...
       
  4822 
       
  4823 #ifdef __DUMP_BLOCKMAP_INFO
       
  4824 	Kern::Printf("Original block map");
       
  4825 	Kern::Printf("  block granularity: %d", aInfo.iCodeBlockMapCommon.iBlockGranularity);
       
  4826 	Kern::Printf("  block start offset: %x", aInfo.iCodeBlockMapCommon.iBlockStartOffset);
       
  4827 	Kern::Printf("  start block address: %016lx", aInfo.iCodeBlockMapCommon.iStartBlockAddress);
       
  4828 	Kern::Printf("  local drive number: %d", aInfo.iCodeBlockMapCommon.iLocalDriveNumber);
       
  4829 	Kern::Printf("  entry size: %d", aInfo.iCodeBlockMapEntriesSize);
       
  4830 #endif
       
  4831 
       
  4832 	// Find relevant paging device
       
  4833 	iCodeLocalDrive = aInfo.iCodeBlockMapCommon.iLocalDriveNumber;
       
  4834 	if (TUint(iCodeLocalDrive) >= (TUint)KMaxLocalDrives)
       
  4835 		{
       
  4836 		__KTRACE_OPT(KPAGING,Kern::Printf("Bad local drive number"));
       
  4837 		return KErrArgument;
       
  4838 		}
       
  4839 	DemandPaging* pager = DemandPaging::ThePager;
       
  4840 	
       
  4841 	if (!pager->CodePagingDevice(iCodeLocalDrive).iInstalled)
       
  4842 		{
       
  4843 		__KTRACE_OPT(KPAGING,Kern::Printf("No paging device installed for drive"));
       
  4844 		return KErrNotSupported;
       
  4845 		}
       
  4846 	DPagingDevice* device = pager->CodePagingDevice(iCodeLocalDrive).iDevice;
       
  4847 
       
  4848 	// Set code start offset
       
  4849 	iCodeStartInFile = aInfo.iCodeStartInFile;
       
  4850 	if (iCodeStartInFile < 0)
       
  4851 		{
       
  4852 		__KTRACE_OPT(KPAGING,Kern::Printf("Bad code start offset"));
       
  4853 		return KErrArgument;
       
  4854 		}
       
  4855 	
       
  4856 	// Allocate buffer for block map and copy from user-side
       
  4857 	TBlockMapEntryBase* buffer = (TBlockMapEntryBase*)Kern::Alloc(aInfo.iCodeBlockMapEntriesSize);
       
  4858 	if (!buffer)
       
  4859 		return KErrNoMemory;
       
  4860 	kumemget32(buffer, aInfo.iCodeBlockMapEntries, aInfo.iCodeBlockMapEntriesSize);
       
  4861 	
       
  4862 #ifdef __DUMP_BLOCKMAP_INFO
       
  4863 	Kern::Printf("  entries:");
       
  4864 	for (TInt k = 0 ; k < aInfo.iCodeBlockMapEntriesSize / sizeof(TBlockMapEntryBase) ; ++k)
       
  4865 		Kern::Printf("    %d: %d blocks at %08x", k, buffer[k].iNumberOfBlocks, buffer[k].iStartBlock);
       
  4866 #endif
       
  4867 
       
  4868 	// Initialise block map
       
  4869 	TInt r = iBlockMap.Initialise(aInfo.iCodeBlockMapCommon,
       
  4870 								  buffer,
       
  4871 								  aInfo.iCodeBlockMapEntriesSize,
       
  4872 								  device->iReadUnitShift,
       
  4873 								  iCodeStartInFile + aInfo.iCodeLengthInFile);
       
  4874 	if (r != KErrNone)
       
  4875 		{
       
  4876 		Kern::Free(buffer);
       
  4877 		return r;
       
  4878 		}
       
  4879 
       
  4880 #if defined(__DUMP_BLOCKMAP_INFO) && defined(_DEBUG)
       
  4881 	iBlockMap.Dump();
       
  4882 #endif
       
  4883 	
       
  4884 	return KErrNone;
       
  4885 	}
       
  4886 
       
  4887 /**
       
  4888 Read code relocation table and import fixup table from user side.
       
  4889 */
       
  4890 TInt DMmuCodeSegMemory::ReadFixupTables(const TCodeSegCreateInfo& aInfo)
       
  4891 	{
       
  4892 	__KTRACE_OPT(KPAGING,Kern::Printf("DP: Reading fixup tables for %C", iCodeSeg));
       
  4893 	
       
  4894 	iCodeRelocTableSize = aInfo.iCodeRelocTableSize;
       
  4895 	iImportFixupTableSize = aInfo.iImportFixupTableSize;
       
  4896 	iCodeDelta = aInfo.iCodeDelta;
       
  4897 	iDataDelta = aInfo.iDataDelta;
       
  4898 	
       
  4899 	// round sizes to four-byte boundaris...
       
  4900 	TInt relocSize = (iCodeRelocTableSize + 3) & ~3;
       
  4901 	TInt fixupSize = (iImportFixupTableSize + 3) & ~3;
       
  4902 
       
  4903 	// copy relocs and fixups...
       
  4904 	iCodeRelocTable = (TUint8*)Kern::Alloc(relocSize+fixupSize);
       
  4905 	if (!iCodeRelocTable)
       
  4906 		return KErrNoMemory;
       
  4907 	iImportFixupTable = iCodeRelocTable + relocSize;
       
  4908 	kumemget32(iCodeRelocTable, aInfo.iCodeRelocTable, relocSize);
       
  4909 	kumemget32(iImportFixupTable, aInfo.iImportFixupTable, fixupSize);
       
  4910 	
       
  4911 	return KErrNone;
       
  4912 	}
       
  4913 
       
  4914 #endif
       
  4915 
       
  4916 
       
  4917 TInt DMmuCodeSegMemory::Create(TCodeSegCreateInfo& aInfo)
       
  4918 	{
       
  4919 	TInt r = KErrNone;	
       
  4920 	if (!aInfo.iUseCodePaging)
       
  4921 		iPageCount=(iRamInfo.iCodeSize+iRamInfo.iDataSize+KPageMask)>>KPageShift;
       
  4922 	else
       
  4923 		{
       
  4924 #ifdef __DEMAND_PAGING__
       
  4925 		iDataSectionMemory = Kern::Alloc(iRamInfo.iDataSize);
       
  4926 		if (!iDataSectionMemory)
       
  4927 			return KErrNoMemory;
       
  4928 
       
  4929 		iPageCount=(iRamInfo.iCodeSize+KPageMask)>>KPageShift;
       
  4930 		iDataPageCount=(iRamInfo.iDataSize+KPageMask)>>KPageShift;
       
  4931 
       
  4932 		r = ReadBlockMap(aInfo);
       
  4933 		if (r != KErrNone)
       
  4934 			return r;
       
  4935 
       
  4936 		iIsDemandPaged = ETrue;
       
  4937 		iCodeSeg->iAttr |= ECodeSegAttCodePaged;
       
  4938 #endif
       
  4939 		}
       
  4940 
       
  4941 	iCodeSeg->iSize = (iPageCount+iDataPageCount)<<KPageShift;
       
  4942 	return r;		
       
  4943 	}
       
  4944 
       
  4945 
       
  4946 TInt DMmuCodeSegMemory::Loaded(TCodeSegCreateInfo& aInfo)
       
  4947 	{
       
  4948 #ifdef __DEMAND_PAGING__
       
  4949 	if(iIsDemandPaged)
       
  4950 		{
       
  4951 		TInt r = ReadFixupTables(aInfo);
       
  4952 		if (r != KErrNone)
       
  4953 			return r;
       
  4954 		}
       
  4955 	TAny* dataSection = iDataSectionMemory;
       
  4956 	if(dataSection)
       
  4957 		{
       
  4958 		UNLOCK_USER_MEMORY();
       
  4959 		memcpy(dataSection,(TAny*)iRamInfo.iDataLoadAddr,iRamInfo.iDataSize);
       
  4960 		LOCK_USER_MEMORY();
       
  4961 		iRamInfo.iDataLoadAddr = (TLinAddr)dataSection;
       
  4962 		}
       
  4963 #endif
       
  4964 	return KErrNone;
       
  4965 	}
       
  4966 
       
  4967 
       
  4968 void DMmuCodeSegMemory::ApplyCodeFixups(TUint32* aBuffer, TLinAddr aDestAddress)
       
  4969 	{
       
  4970 	__NK_ASSERT_DEBUG(iRamInfo.iCodeRunAddr==iRamInfo.iCodeLoadAddr); // code doesn't work if this isn't true
       
  4971 
       
  4972 	START_PAGING_BENCHMARK;
       
  4973 	
       
  4974 	TUint offset = aDestAddress - iRamInfo.iCodeRunAddr;
       
  4975 	__ASSERT_ALWAYS(offset < (TUint)(iRamInfo.iCodeSize + iRamInfo.iDataSize), K::Fault(K::ECodeSegBadFixupAddress));
       
  4976 
       
  4977 	// Index tables are only valid for pages containg code
       
  4978 	if (offset >= (TUint)iRamInfo.iCodeSize)
       
  4979 		return;
       
  4980 
       
  4981 	UNLOCK_USER_MEMORY();
       
  4982 
       
  4983 	TInt page = offset >> KPageShift;
       
  4984 
       
  4985 	// Relocate code
       
  4986 	
       
  4987 	if (iCodeRelocTableSize > 0)
       
  4988 		{
       
  4989 		TUint32* codeRelocTable32 = (TUint32*)iCodeRelocTable;
       
  4990 		TUint startOffset = codeRelocTable32[page];
       
  4991 		TUint endOffset = codeRelocTable32[page + 1];
       
  4992 		
       
  4993 		__KTRACE_OPT(KPAGING, Kern::Printf("Performing code relocation: start == %x, end == %x", startOffset, endOffset));
       
  4994 		__ASSERT_ALWAYS(startOffset <= endOffset && endOffset <= (TUint)iCodeRelocTableSize,
       
  4995 						K::Fault(K::ECodeSegBadFixupTables));
       
  4996 		
       
  4997 		TUint8* codeRelocTable8 = (TUint8*)codeRelocTable32;
       
  4998 		const TUint16* ptr = (const TUint16*)(codeRelocTable8 + startOffset);
       
  4999 		const TUint16* end = (const TUint16*)(codeRelocTable8 + endOffset);
       
  5000 
       
  5001 		const TUint32 codeDelta = iCodeDelta;
       
  5002 		const TUint32 dataDelta = iDataDelta;
       
  5003 
       
  5004 		while (ptr < end)
       
  5005 			{
       
  5006 			TUint16 entry = *ptr++;
       
  5007 
       
  5008 			// address of word to fix up is sum of page start and 12-bit offset
       
  5009 			TUint32* addr = (TUint32*)((TUint8*)aBuffer + (entry & 0x0fff));
       
  5010 			
       
  5011 			TUint32 word = *addr;
       
  5012 #ifdef _DEBUG
       
  5013 			TInt type = entry & 0xf000;
       
  5014 			__NK_ASSERT_DEBUG(type == KTextRelocType || type == KDataRelocType);
       
  5015 #endif
       
  5016 			if (entry < KDataRelocType /* => type == KTextRelocType */)
       
  5017 				word += codeDelta;
       
  5018 			else
       
  5019 				word += dataDelta;
       
  5020 			*addr = word;
       
  5021 			}
       
  5022 		}
       
  5023 		
       
  5024 	// Fixup imports
       
  5025 			
       
  5026 	if (iImportFixupTableSize > 0)
       
  5027 		{
       
  5028 		TUint32* importFixupTable32 = (TUint32*)iImportFixupTable;
       
  5029 		TUint startOffset = importFixupTable32[page];
       
  5030 		TUint endOffset = importFixupTable32[page + 1];
       
  5031 		
       
  5032 		__KTRACE_OPT(KPAGING, Kern::Printf("Performing import fixup: start == %x, end == %x", startOffset, endOffset));
       
  5033 		__ASSERT_ALWAYS(startOffset <= endOffset && endOffset <= (TUint)iImportFixupTableSize,
       
  5034 						K::Fault(K::ECodeSegBadFixupTables));
       
  5035 		
       
  5036 		TUint8* importFixupTable8 = (TUint8*)importFixupTable32;
       
  5037 		const TUint16* ptr = (const TUint16*)(importFixupTable8 + startOffset);
       
  5038 		const TUint16* end = (const TUint16*)(importFixupTable8 + endOffset);
       
  5039 
       
  5040 		while (ptr < end)
       
  5041 			{
       
  5042 			TUint16 offset = *ptr++;
       
  5043 		
       
  5044 			// get word to write into that address
       
  5045 			// (don't read as a single TUint32 because may not be word-aligned)
       
  5046 			TUint32 wordLow = *ptr++;
       
  5047 			TUint32 wordHigh = *ptr++;
       
  5048 			TUint32 word = (wordHigh << 16) | wordLow;
       
  5049 
       
  5050 			__KTRACE_OPT(KPAGING, Kern::Printf("DP: Fixup %08x=%08x", iRamInfo.iCodeRunAddr+(page<<KPageShift)+offset, word));
       
  5051 			*(TUint32*)((TLinAddr)aBuffer+offset) = word;
       
  5052 			}
       
  5053 		}
       
  5054 	
       
  5055 	LOCK_USER_MEMORY();
       
  5056 
       
  5057 	END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmFixupCodePage);
       
  5058 	}
       
  5059 
       
  5060 
       
  5061 TInt DMmuCodeSegMemory::ApplyCodeFixupsOnLoad(TUint32* aBuffer, TLinAddr aDestAddress)
       
  5062 	{
       
  5063 #ifdef __DEMAND_PAGING__
       
  5064 	TInt r=DemandPaging::ThePager->LockRegion((TLinAddr)aBuffer,KPageSize,&Kern::CurrentProcess());
       
  5065 	if(r!=KErrNone)
       
  5066 		return r;
       
  5067 #endif
       
  5068 	ApplyCodeFixups(aBuffer,aDestAddress);
       
  5069 	UNLOCK_USER_MEMORY();
       
  5070 	CacheMaintenance::CodeChanged((TLinAddr)aBuffer, KPageSize);
       
  5071 	LOCK_USER_MEMORY();
       
  5072 #ifdef __DEMAND_PAGING__
       
  5073 	DemandPaging::ThePager->UnlockRegion((TLinAddr)aBuffer,KPageSize,&Kern::CurrentProcess());
       
  5074 #endif
       
  5075 	return KErrNone;
       
  5076 	}
       
  5077 
       
  5078 
       
  5079 #ifdef __DEMAND_PAGING__
       
  5080 
       
  5081 TInt M::CreateVirtualPinObject(TVirtualPinObject*& aPinObject)
       
  5082 	{
       
  5083 	aPinObject = (TVirtualPinObject*) new DDemandPagingLock;
       
  5084 	return aPinObject != NULL ? KErrNone : KErrNoMemory;
       
  5085 	}
       
  5086 
       
  5087 TInt M::PinVirtualMemory(TVirtualPinObject* aPinObject, TLinAddr aStart, TUint aSize, DThread* aThread)
       
  5088 	{
       
  5089 	if (!DemandPaging::ThePager)
       
  5090 		return KErrNone;
       
  5091 	
       
  5092 	if (!DemandPaging::ThePager->MayBePaged(aStart, aSize))
       
  5093 		return KErrNone;
       
  5094 
       
  5095 	DDemandPagingLock* lock = (DDemandPagingLock*)aPinObject;
       
  5096 	TInt r = lock->Alloc(aSize);
       
  5097 	if (r != KErrNone)
       
  5098 		return r;
       
  5099 	lock->Lock(aThread, aStart, aSize);
       
  5100 	return KErrNone;
       
  5101 	}
       
  5102 
       
  5103 TInt M::CreateAndPinVirtualMemory(TVirtualPinObject*& aPinObject, TLinAddr aStart, TUint aSize)
       
  5104 	{
       
  5105 	aPinObject = 0;
       
  5106 
       
  5107 	if (!DemandPaging::ThePager)
       
  5108 		return KErrNone;
       
  5109 	if (!DemandPaging::ThePager->MayBePaged(aStart, aSize))
       
  5110 		return KErrNone;
       
  5111 
       
  5112 	TInt r = CreateVirtualPinObject(aPinObject);
       
  5113 	if (r != KErrNone)
       
  5114 		return r;
       
  5115 
       
  5116 	DDemandPagingLock* lock = (DDemandPagingLock*)aPinObject;
       
  5117 	r = lock->Alloc(aSize);
       
  5118 	if (r != KErrNone)
       
  5119 		return r;
       
  5120 	lock->Lock(TheCurrentThread, aStart, aSize);
       
  5121 	return KErrNone;
       
  5122 	}
       
  5123 
       
  5124 void M::UnpinVirtualMemory(TVirtualPinObject* aPinObject)
       
  5125 	{
       
  5126 	DDemandPagingLock* lock = (DDemandPagingLock*)aPinObject;
       
  5127 	if (lock)
       
  5128 		lock->Free();
       
  5129 	}
       
  5130 	
       
  5131 void M::DestroyVirtualPinObject(TVirtualPinObject*& aPinObject)
       
  5132 	{
       
  5133 	DDemandPagingLock* lock = (DDemandPagingLock*)__e32_atomic_swp_ord_ptr(&aPinObject, 0);
       
  5134 	if (lock)
       
  5135 		lock->AsyncDelete();
       
  5136 	}
       
  5137 
       
  5138 #else
       
  5139 
       
  5140 class TVirtualPinObject
       
  5141 	{	
       
  5142 	};
       
  5143 
       
  5144 TInt M::CreateVirtualPinObject(TVirtualPinObject*& aPinObject)
       
  5145 	{
       
  5146 	aPinObject = new TVirtualPinObject;
       
  5147 	return aPinObject != NULL ? KErrNone : KErrNoMemory;
       
  5148 	}
       
  5149 
       
  5150 TInt M::PinVirtualMemory(TVirtualPinObject* aPinObject, TLinAddr, TUint, DThread*)
       
  5151 	{
       
  5152 	__ASSERT_DEBUG(aPinObject, K::Fault(K::EVirtualPinObjectBad));
       
  5153 	(void)aPinObject;
       
  5154 	return KErrNone;
       
  5155 	}
       
  5156 
       
  5157 TInt M::CreateAndPinVirtualMemory(TVirtualPinObject*& aPinObject, TLinAddr, TUint)
       
  5158 	{
       
  5159 	aPinObject = 0;
       
  5160 	return KErrNone;
       
  5161 	}
       
  5162 
       
  5163 void M::UnpinVirtualMemory(TVirtualPinObject* aPinObject)
       
  5164 	{
       
  5165 	__ASSERT_DEBUG(aPinObject, K::Fault(K::EVirtualPinObjectBad));
       
  5166 	(void)aPinObject;
       
  5167 	}
       
  5168 
       
  5169 void M::DestroyVirtualPinObject(TVirtualPinObject*& aPinObject)
       
  5170 	{
       
  5171 	TVirtualPinObject* object = (TVirtualPinObject*)__e32_atomic_swp_ord_ptr(&aPinObject, 0);
       
  5172 	if (object)
       
  5173 		Kern::AsyncFree(object);
       
  5174 	}
       
  5175 
       
  5176 #endif
       
  5177 
       
  5178 TInt M::CreatePhysicalPinObject(TPhysicalPinObject*& aPinObject)
       
  5179 	{
       
  5180 	return KErrNotSupported;
       
  5181 	}
       
  5182 
       
  5183 TInt M::PinPhysicalMemory(TPhysicalPinObject*, TLinAddr, TUint, TBool, TUint32&, TUint32*, TUint32&, TUint&, DThread*)
       
  5184 	{
       
  5185 	K::Fault(K::EPhysicalPinObjectBad);
       
  5186 	return KErrNone;
       
  5187 	}
       
  5188 
       
  5189 void M::UnpinPhysicalMemory(TPhysicalPinObject* aPinObject)
       
  5190 	{
       
  5191 	K::Fault(K::EPhysicalPinObjectBad);
       
  5192 	}
       
  5193 
       
  5194 void M::DestroyPhysicalPinObject(TPhysicalPinObject*& aPinObject)
       
  5195 	{
       
  5196 	K::Fault(K::EPhysicalPinObjectBad);
       
  5197 	}
       
  5198 
       
  5199 // Misc DPagingDevice methods
       
  5200 
       
  5201 EXPORT_C void DPagingDevice::NotifyIdle()
       
  5202 	{
       
  5203 	// Not used on this memory model
       
  5204 	}
       
  5205 
       
  5206 EXPORT_C void DPagingDevice::NotifyBusy()
       
  5207 	{
       
  5208 	// Not used on this memory model
       
  5209 	}
       
  5210 
       
  5211 EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* , TUint , TUint , TUint , TUint32 )
       
  5212 	{
       
  5213 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaWrite");
       
  5214 	return KErrNotSupported;
       
  5215 	}
       
  5216 
       
  5217 EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaRead(TPhysAddr* , TUint , TUint , TUint , TUint32 )
       
  5218 	{
       
  5219 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaRead");
       
  5220 	return KErrNotSupported;
       
  5221 	}
       
  5222 EXPORT_C TInt Cache::SyncPhysicalMemoryAfterDmaRead(TPhysAddr* , TUint , TUint , TUint , TUint32 )
       
  5223 	{
       
  5224 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryAfterDmaRead");
       
  5225 	return KErrNotSupported;
       
  5226 	}
       
  5227 
       
  5228 //
       
  5229 //	Page moving methods
       
  5230 //
       
  5231 
       
  5232 /*
       
  5233  * Move a page from aOld to aNew safely, updating any references to the page
       
  5234  * stored elsewhere (such as page table entries). The destination page must
       
  5235  * already be allocated. If the move is successful, the source page will be
       
  5236  * freed and returned to the allocator.
       
  5237  *
       
  5238  * @pre RAM alloc mutex must be held.
       
  5239  * @pre Calling thread must be in a critical section.
       
  5240  * @pre Interrupts must be enabled.
       
  5241  * @pre Kernel must be unlocked.
       
  5242  * @pre No fast mutex can be held.
       
  5243  * @pre Call in a thread context.
       
  5244  */
       
  5245 TInt MmuBase::MovePage(TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)
       
  5246 	{
       
  5247 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Defrag::DoMovePage");
       
  5248 	__ASSERT_WITH_MESSAGE_MUTEX(MmuBase::RamAllocatorMutex, "Ram allocator mutex must be held", "Defrag::DoMovePage");
       
  5249 	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() old=%08x",aOld));
       
  5250 	TInt r = KErrNotSupported;
       
  5251 #if defined(__CPU_X86) && defined(__MEMMODEL_MULTIPLE__)
       
  5252 	return r;
       
  5253 #endif
       
  5254 	aNew = KPhysAddrInvalid;
       
  5255 	NKern::LockSystem();
       
  5256 	SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aOld);
       
  5257 	if (!pi)
       
  5258 		{
       
  5259 		__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() fails: page has no PageInfo"));
       
  5260 		r = KErrArgument;
       
  5261 		goto fail;
       
  5262 		}
       
  5263 	if (pi->LockCount())
       
  5264 		{
       
  5265 		__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() fails: page is locked"));
       
  5266 		goto fail;
       
  5267 		}
       
  5268 	
       
  5269 	switch(pi->Type())
       
  5270 		{
       
  5271 	case SPageInfo::EUnused:
       
  5272 		// Nothing to do - we allow this, though, in case the caller wasn't
       
  5273 		// actually checking the free bitmap.
       
  5274 		r = KErrNotFound;
       
  5275 		__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage(): page unused"));
       
  5276 		break;
       
  5277 
       
  5278 	case SPageInfo::EChunk:
       
  5279 		{
       
  5280 		// It's a chunk - we need to investigate what it's used for.
       
  5281 		DChunk* chunk = (DChunk*)pi->Owner();
       
  5282 		TInt offset = pi->Offset()<<KPageShift;
       
  5283 
       
  5284 		switch(chunk->iChunkType)
       
  5285 			{
       
  5286 		case EKernelData:
       
  5287 		case EKernelMessage:
       
  5288 			// The kernel data/bss/heap chunk pages are not moved as DMA may be accessing them.
       
  5289 			__KTRACE_OPT(KMMU, Kern::Printf("MmuBase::MovePage() fails: kernel data"));
       
  5290 			goto fail;
       
  5291 
       
  5292 		case EKernelStack:
       
  5293 			// The kernel thread stack chunk.
       
  5294 			r = MoveKernelStackPage(chunk, offset, aOld, aNew, aBlockZoneId, aBlockRest);
       
  5295 			__KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: k stack r%d",r));
       
  5296 			__NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0);
       
  5297  			goto released;
       
  5298 
       
  5299 		case EKernelCode:
       
  5300 		case EDll:
       
  5301 			// The kernel code chunk, or a global user code chunk.
       
  5302 			r = MoveCodeChunkPage(chunk, offset, aOld, aNew, aBlockZoneId, aBlockRest);
       
  5303 			__KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: code chk r%d",r));
       
  5304 			__NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0);
       
  5305 			goto released;
       
  5306 
       
  5307 		case ERamDrive:
       
  5308 		case EUserData:
       
  5309 		case EDllData:
       
  5310 		case EUserSelfModCode:
       
  5311 			// A data chunk of some description.
       
  5312 			r = MoveDataChunkPage(chunk, offset, aOld, aNew, aBlockZoneId, aBlockRest);
       
  5313 			__KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: data chk r%d",r));
       
  5314 			__NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0);
       
  5315 			goto released;
       
  5316 
       
  5317 		case ESharedKernelSingle:
       
  5318 		case ESharedKernelMultiple:
       
  5319 		case ESharedIo:
       
  5320 		case ESharedKernelMirror:
       
  5321 			// These chunk types cannot be moved
       
  5322 			r = KErrNotSupported;
       
  5323 			__KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: shared r%d",r));
       
  5324 			break;
       
  5325 
       
  5326 		case EUserCode:
       
  5327 		default:
       
  5328 			// Unknown page type, or EUserCode.
       
  5329 			// EUserCode is not used in moving model, and on multiple model
       
  5330 			// it never owns any pages so shouldn't be found via SPageInfo
       
  5331 			__KTRACE_OPT(KMMU,Kern::Printf("Defrag::DoMovePage fails: unknown chunk type %d",chunk->iChunkType));
       
  5332 			Panic(EDefragUnknownChunkType);
       
  5333 			}
       
  5334 		}
       
  5335 		break;
       
  5336 
       
  5337 	case SPageInfo::ECodeSegMemory:
       
  5338 		// It's a code segment memory section (multiple model only)
       
  5339 		r = MoveCodeSegMemoryPage((DMemModelCodeSegMemory*)pi->Owner(), pi->Offset()<<KPageShift, aOld, aNew, aBlockZoneId, aBlockRest);
       
  5340 		__KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: codeseg r%d",r));
       
  5341 		__NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0);
       
  5342 		goto released;
       
  5343 
       
  5344 	case SPageInfo::EPagedROM:
       
  5345 	case SPageInfo::EPagedCode:
       
  5346 	case SPageInfo::EPagedData:
       
  5347 	case SPageInfo::EPagedCache:
       
  5348 	case SPageInfo::EPagedFree:
       
  5349 		{// DP or RamCache page so attempt to discard it. Added for testing purposes only
       
  5350 		//  In normal use ClearDiscardableFromZone will have already removed RAM cache pages
       
  5351 		r = KErrInUse;
       
  5352 		MmuBase& mmu = *MmuBase::TheMmu;
       
  5353 		RamCacheBase& ramCache = *(mmu.iRamCache);
       
  5354 		if (ramCache.IsPageDiscardable(*pi))
       
  5355 			{
       
  5356 			if (ramCache.DoDiscardPage(*pi, KRamZoneInvalidId, EFalse))
       
  5357 				{// Sucessfully discarded the page.
       
  5358 				r = KErrNone;
       
  5359 				}
       
  5360 			}
       
  5361 		__KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: paged r%d",r));
       
  5362 		goto fail; // Goto fail to release the system lock.	
       
  5363 		}
       
  5364 
       
  5365 		
       
  5366 	case SPageInfo::EPageTable:
       
  5367 	case SPageInfo::EPageDir:
       
  5368 	case SPageInfo::EPtInfo:
       
  5369 	case SPageInfo::EInvalid:
       
  5370 	case SPageInfo::EFixed:
       
  5371 	case SPageInfo::EShadow:
       
  5372 		// These page types cannot be moved (or don't need to be moved)
       
  5373 		r = KErrNotSupported;
       
  5374 		__KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: PT etc r%d",r));
       
  5375 		break;
       
  5376 
       
  5377 	default:
       
  5378 		// Unknown page type
       
  5379 		__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() fails: unknown page type %d",pi->Type()));
       
  5380 		Panic(EDefragUnknownPageType);
       
  5381 		}
       
  5382 
       
  5383 fail:
       
  5384 	NKern::UnlockSystem();
       
  5385 released:
       
  5386 	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() returns %d",r));
       
  5387 	return r;
       
  5388 	}
       
  5389 
       
  5390 
       
  5391 TInt MmuBase::DiscardPage(TPhysAddr aAddr, TUint aBlockZoneId, TBool aBlockRest)
       
  5392 	{
       
  5393 	TInt r = KErrInUse;
       
  5394 	NKern::LockSystem();
       
  5395 	SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(aAddr);
       
  5396 	if (pageInfo != NULL)
       
  5397 		{// Allocatable page at this address so is it a discardable one?
       
  5398 		if (iRamCache->IsPageDiscardable(*pageInfo))
       
  5399 			{
       
  5400 			// Discard this page and return it to the ram allocator
       
  5401 			if (!iRamCache->DoDiscardPage(*pageInfo, aBlockZoneId, aBlockRest))
       
  5402 				{// Couldn't discard the page.
       
  5403 				if (aBlockRest)
       
  5404 					{
       
  5405 					__KTRACE_OPT(KMMU, Kern::Printf("ClearDiscardableFromZone: page discard fail addr %x", aAddr));
       
  5406 					NKern::UnlockSystem();
       
  5407 					return KErrNoMemory;
       
  5408 					}
       
  5409 				}
       
  5410 			else
       
  5411 				{// Page discarded successfully.
       
  5412 				r = KErrNone;
       
  5413 				}
       
  5414 			}
       
  5415 		}
       
  5416 	NKern::UnlockSystem();
       
  5417 	return r;
       
  5418 	}
       
  5419 
       
  5420 TUint MmuBase::NumberOfFreeDpPages()
       
  5421 	{
       
  5422 	TUint free = 0;
       
  5423 	if(iRamCache)
       
  5424 		{
       
  5425 		free = iRamCache->NumberOfFreePages();
       
  5426 		}
       
  5427 	return free;
       
  5428 	}
       
  5429 
       
  5430 
       
  5431 EXPORT_C TInt Epoc::MovePhysicalPage(TPhysAddr aOld, TPhysAddr& aNew, TRamDefragPageToMove aPageToMove)
       
  5432 	{
       
  5433 	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::MovePhysicalPage");
       
  5434 	__KTRACE_OPT(KMMU,Kern::Printf("Epoc::MovePhysicalPage() old=%08x pageToMove=%d",aOld,aPageToMove));
       
  5435 
       
  5436 	switch(aPageToMove)
       
  5437 		{
       
  5438 		case ERamDefragPage_Physical:
       
  5439 			break;
       
  5440 		default:
       
  5441 			return KErrNotSupported;
       
  5442 		}
       
  5443 
       
  5444 	MmuBase::Wait();
       
  5445 	TInt r=M::MovePage(aOld,aNew,KRamZoneInvalidId,EFalse);
       
  5446 	if (r!=KErrNone)
       
  5447 		aNew = KPhysAddrInvalid;
       
  5448 	MmuBase::Signal();
       
  5449 	__KTRACE_OPT(KMMU,Kern::Printf("Epoc::MovePhysicalPage() returns %d",r));
       
  5450 	return r;
       
  5451 	}
       
  5452 
       
  5453 
       
  5454 TInt M::RamDefragFault(TAny* aExceptionInfo)
       
  5455 	{
       
  5456 	// If the mmu has been initialised then let it try processing the fault.
       
  5457 	if(MmuBase::TheMmu)
       
  5458 		return MmuBase::TheMmu->RamDefragFault(aExceptionInfo);
       
  5459 	return KErrAbort;
       
  5460 	}
       
  5461 
       
  5462 
       
  5463 void M::RamZoneClaimed(SZone* aZone)
       
  5464 	{
       
  5465 	// Lock each page.  OK to traverse SPageInfo array as we know no unknown
       
  5466 	// pages are in the zone.
       
  5467 	SPageInfo* pageInfo = SPageInfo::FromPhysAddr(aZone->iPhysBase);
       
  5468 	SPageInfo* pageInfoEnd = pageInfo + aZone->iPhysPages;
       
  5469 	for (; pageInfo < pageInfoEnd; ++pageInfo)
       
  5470 		{
       
  5471 		NKern::LockSystem();
       
  5472 		__NK_ASSERT_DEBUG(pageInfo->Type()==SPageInfo::EUnused);
       
  5473 		pageInfo->Lock();
       
  5474 		NKern::UnlockSystem();
       
  5475 		}
       
  5476 	// For the sake of platform security we have to clear the memory. E.g. the driver
       
  5477 	// could assign it to a chunk visible to user side.  Set LSB so ClearPages
       
  5478 	// knows this is a contiguous memory region.
       
  5479 	Mmu::Get().ClearPages(aZone->iPhysPages, (TPhysAddr*)(aZone->iPhysBase|1));
       
  5480 	}