kernel/eka/memmodel/epoc/multiple/arm/xmmu.cpp
changeset 0 a41df078684a
child 4 56f325a607ea
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32\memmodel\epoc\multiple\arm\xmmu.cpp
       
    15 // 
       
    16 //
       
    17 
       
    18 #include "arm_mem.h"
       
    19 #include <mmubase.inl>
       
    20 #include <ramcache.h>
       
    21 #include <demand_paging.h>
       
    22 #include "execs.h"
       
    23 #include <defrag.h>
       
    24 #include "cache_maintenance.inl"
       
    25 
       
    26 #undef __MMU_MACHINE_CODED__
       
    27 
       
    28 // SECTION_PDE(perm, attr, domain, execute, global)
       
    29 // PT_PDE(domain)
       
    30 // LP_PTE(perm, attr, execute, global)
       
    31 // SP_PTE(perm, attr, execute, global)
       
    32 
       
    33 const TInt KPageColourShift=2;
       
    34 const TInt KPageColourCount=(1<<KPageColourShift);
       
    35 const TInt KPageColourMask=KPageColourCount-1;
       
    36 
       
    37 
       
    38 const TPde KPdPdePerm=PT_PDE(0);
       
    39 const TPde KPtPdePerm=PT_PDE(0);
       
    40 const TPde KShadowPdePerm=PT_PDE(0);
       
    41 
       
    42 #if defined(__CPU_MEMORY_TYPE_REMAPPING)
       
    43 // ARM1176, ARM11MPCore, ARMv7 and later
       
    44 // __CPU_MEMORY_TYPE_REMAPPING means that only three bits (TEX0:C:B) in page table define
       
    45 // memory attributes. Kernel runs with a limited set of memory types: stronlgy ordered,
       
    46 // device, normal un-cached & and normal WBWA. Due to lack of write through mode, page tables are
       
    47 // write-back which means that cache has to be cleaned on every page/directory table update.
       
    48 const TPte KPdPtePerm=				SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1);
       
    49 const TPte KPtPtePerm=				SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1);
       
    50 const TPte KPtInfoPtePerm=			SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1);
       
    51 const TPte KRomPtePerm=				SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 1);
       
    52 const TPte KShadowPtePerm=			SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 1);
       
    53 const TPde KRomSectionPermissions=	SECTION_PDE(KArmV6PermRORO, EMemAttNormalCached, 0, 1, 1);
       
    54 const TPte KUserCodeLoadPte=		SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 1, 0);
       
    55 const TPte KUserCodeRunPte=			SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 0);
       
    56 const TPte KGlobalCodeRunPte=		SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 1);
       
    57 const TPte KKernelCodeRunPte=		SP_PTE(KArmV6PermRONO, EMemAttNormalCached, 1, 1);
       
    58 
       
    59 const TInt KNormalUncachedAttr = EMemAttNormalUncached;
       
    60 const TInt KNormalCachedAttr = EMemAttNormalCached;
       
    61 
       
    62 #else
       
    63 
       
    64 //ARM1136 
       
    65 const TPte KPtInfoPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1);
       
    66 #if defined (__CPU_WriteThroughDisabled)
       
    67 const TPte KPdPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1);
       
    68 const TPte KPtPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1);
       
    69 const TPte KRomPtePerm=SP_PTE(KArmV6PermRORO, KArmV6MemAttWBWAWBWA, 1, 1);
       
    70 const TPte KShadowPtePerm=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 1);
       
    71 const TPde KRomSectionPermissions	=	SECTION_PDE(KArmV6PermRORO, KArmV6MemAttWBWAWBWA, 0, 1, 1);
       
    72 const TPte KUserCodeLoadPte=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 1, 0);
       
    73 const TPte KUserCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 0);
       
    74 const TPte KGlobalCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 1);
       
    75 const TInt KKernelCodeRunPteAttr = KArmV6MemAttWBWAWBWA;
       
    76 #else
       
    77 const TPte KPdPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBRAWTRA, 0, 1);
       
    78 const TPte KPtPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBRAWTRA, 0, 1);
       
    79 const TPte KRomPtePerm=SP_PTE(KArmV6PermRORO, KArmV6MemAttWTRAWTRA, 1, 1);
       
    80 const TPte KShadowPtePerm=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 1);
       
    81 const TPde KRomSectionPermissions	=	SECTION_PDE(KArmV6PermRORO, KArmV6MemAttWTRAWTRA, 0, 1, 1);
       
    82 const TPte KUserCodeLoadPte=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTRAWTRA, 1, 0);
       
    83 const TPte KUserCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 0);
       
    84 const TPte KGlobalCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 1);
       
    85 const TInt KKernelCodeRunPteAttr = KArmV6MemAttWTRAWTRA;
       
    86 #endif
       
    87 
       
    88 
       
    89 #if defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
       
    90 const TInt KKernelCodeRunPtePerm = KArmV6PermRONO;
       
    91 #else
       
    92 const TInt KKernelCodeRunPtePerm = KArmV6PermRORO;
       
    93 #endif
       
    94 const TPte KKernelCodeRunPte=SP_PTE(KKernelCodeRunPtePerm, KKernelCodeRunPteAttr, 1, 1);
       
    95 
       
    96 const TInt KNormalUncachedAttr = KArmV6MemAttNCNC;
       
    97 const TInt KNormalCachedAttr = KArmV6MemAttWBWAWBWA;
       
    98 
       
    99 #endif
       
   100 
       
   101 
       
   102 extern void __FlushBtb();
       
   103 
       
   104 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
       
   105 extern void remove_and_invalidate_page(TPte* aPte, TLinAddr aAddr, TInt aAsid);
       
   106 extern void remove_and_invalidate_section(TPde* aPde, TLinAddr aAddr, TInt aAsid);
       
   107 #endif
       
   108 
       
   109 
       
   110 LOCAL_D const TPte ChunkPtePermissions[ENumChunkTypes] =
       
   111 	{
       
   112 #if defined(__CPU_MEMORY_TYPE_REMAPPING)
       
   113 // ARM1176, ARM11 mcore, ARMv7 and later
       
   114 	SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1),		// EKernelData
       
   115 	SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1),		// EKernelStack
       
   116 	SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 1, 1),		// EKernelCode - loading
       
   117 	SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 1, 1),		// EDll (used for global code) - loading
       
   118 	SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 0),		// EUserCode - run
       
   119 	SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 1),		// ERamDrive
       
   120 	SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0),		// EUserData
       
   121 	SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0),		// EDllData
       
   122 	SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 1, 0),		// EUserSelfModCode
       
   123 	SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0),		// ESharedKernelSingle
       
   124 	SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0),		// ESharedKernelMultiple
       
   125 	SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0),		// ESharedIo
       
   126 	SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1),		// ESharedKernelMirror
       
   127 	SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1),		// EKernelMessage
       
   128 #else
       
   129 	SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1),		// EKernelData
       
   130 	SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1),		// EKernelStack
       
   131 #if defined (__CPU_WriteThroughDisabled)
       
   132 	SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 1, 1),		// EKernelCode - loading
       
   133 	SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 1, 1),		// EDll (used for global code) - loading
       
   134 	SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 0),		// EUserCode - run
       
   135 #else
       
   136 	SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTRAWTRA, 1, 1),		// EKernelCode - loading
       
   137 	SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTRAWTRA, 1, 1),		// EDll (used for global code) - loading
       
   138 	SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 0),		// EUserCode - run
       
   139 #endif
       
   140 	SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 1),		// ERamDrive
       
   141 	SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0),		// EUserData
       
   142 	SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0),		// EDllData
       
   143 	SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 1, 0),		// EUserSelfModCode
       
   144 	SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0),		// ESharedKernelSingle
       
   145 	SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0),		// ESharedKernelMultiple
       
   146 	SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0),		// ESharedIo
       
   147 	SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1),		// ESharedKernelMirror
       
   148 	SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1),		// EKernelMessage
       
   149 #endif
       
   150 	};
       
   151 
       
   152 #ifdef __USER_MEMORY_GUARDS_ENABLED__
       
   153 #define USER_DOMAIN 15
       
   154 #else
       
   155 #define USER_DOMAIN 0
       
   156 #endif
       
   157 
       
   158 LOCAL_D const TPde ChunkPdePermissions[ENumChunkTypes] =
       
   159 	{
       
   160 	PT_PDE(0),				// EKernelData
       
   161 	PT_PDE(0),				// EKernelStack
       
   162 	PT_PDE(0),				// EKernelCode
       
   163 	PT_PDE(0),				// EDll
       
   164 	PT_PDE(USER_DOMAIN),	// EUserCode
       
   165 	PT_PDE(1),				// ERamDrive
       
   166 	PT_PDE(USER_DOMAIN),	// EUserData
       
   167 	PT_PDE(USER_DOMAIN),	// EDllData
       
   168 	PT_PDE(USER_DOMAIN),	// EUserSelfModCode
       
   169 	PT_PDE(USER_DOMAIN),	// ESharedKernelSingle
       
   170 	PT_PDE(USER_DOMAIN),	// ESharedKernelMultiple
       
   171 	PT_PDE(0),				// ESharedIo
       
   172 	PT_PDE(0),				// ESharedKernelMirror
       
   173 	PT_PDE(0),				// EKernelMessage
       
   174 	};
       
   175 
       
   176 // Inline functions for simple transformations
       
   177 inline TLinAddr PageTableLinAddr(TInt aId)
       
   178 	{
       
   179 	return (KPageTableBase+(aId<<KPageTableShift));
       
   180 	}
       
   181 
       
   182 inline TPte* PageTable(TInt aId)
       
   183 	{
       
   184 	return (TPte*)(KPageTableBase+(aId<<KPageTableShift));
       
   185 	}
       
   186 
       
   187 inline TPte* PageTableEntry(TInt aId, TLinAddr aAddress)
       
   188 	{
       
   189 	return PageTable(aId) + ((aAddress >> KPageShift) & (KChunkMask >> KPageShift));
       
   190 	}
       
   191 
       
   192 inline TLinAddr PageDirectoryLinAddr(TInt aOsAsid)
       
   193 	{
       
   194 	return (KPageDirectoryBase+(aOsAsid<<KPageDirectoryShift));
       
   195 	}
       
   196 
       
   197 inline TPde* PageDirectoryEntry(TInt aOsAsid, TLinAddr aAddress)
       
   198 	{
       
   199 	return PageDirectory(aOsAsid) + (aAddress >> KChunkShift);
       
   200 	}
       
   201 
       
   202 extern void InvalidateTLBForPage(TLinAddr /*aLinAddr*/, TInt /*aAsid*/);
       
   203 extern void FlushTLBs();
       
   204 extern TUint32 TTCR();
       
   205 
       
   206 TPte* SafePageTableFromPde(TPde aPde)
       
   207 	{
       
   208 	if((aPde&KPdeTypeMask)==KArmV6PdePageTable)
       
   209 		{
       
   210 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde);
       
   211 		if(pi)
       
   212 			{
       
   213 			TInt id = (pi->Offset()<<KPtClusterShift) | ((aPde>>KPageTableShift)&KPtClusterMask);
       
   214 			return PageTable(id);
       
   215 			}
       
   216 		}
       
   217 	return 0;
       
   218 	}
       
   219 
       
   220 TPte* SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid=0)
       
   221 	{
       
   222 	if ((TInt)(aAddress>>KChunkShift)>=(TheMmu.iLocalPdSize>>2))
       
   223 		aOsAsid = 0;
       
   224 	TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
       
   225 	TPte* pt = SafePageTableFromPde(pde);
       
   226 	if(pt)
       
   227 		pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
       
   228 	return pt;
       
   229 	}
       
   230 
       
   231 #ifndef _DEBUG
       
   232 // inline in UREL builds...
       
   233 #ifdef __ARMCC__
       
   234 	__forceinline /* RVCT ignores normal inline qualifier :-( */
       
   235 #else
       
   236 	inline
       
   237 #endif
       
   238 #endif
       
   239 TPte* PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid=0)
       
   240 	{
       
   241 	// this function only works for process local memory addresses, or for kernel memory (asid==0).
       
   242 	__NK_ASSERT_DEBUG(aOsAsid==0 || (TInt)(aAddress>>KChunkShift)<(TheMmu.iLocalPdSize>>2));
       
   243 	TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
       
   244 	SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
       
   245 	TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
       
   246 	TPte* pt = PageTable(id);
       
   247 	pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
       
   248 	return pt;
       
   249 	}
       
   250 
       
   251 
       
   252 TInt ArmMmu::LinearToPhysical(TLinAddr aLinAddr, TInt aSize, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList, TInt aOsAsid)
       
   253 	{
       
   254 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical %08x+%08x, asid=%d",aLinAddr,aSize,aOsAsid));
       
   255 	TPhysAddr physStart = ArmMmu::LinearToPhysical(aLinAddr,aOsAsid);
       
   256 	TPhysAddr nextPhys = physStart&~KPageMask;
       
   257 
       
   258 	TUint32* pageList = aPhysicalPageList;
       
   259 
       
   260 	TInt pageIndex = aLinAddr>>KPageShift;
       
   261 	TInt pagesLeft = ((aLinAddr+aSize-1)>>KPageShift)+1 - pageIndex;
       
   262 	TInt pdeIndex = aLinAddr>>KChunkShift;
       
   263 	TPde* pdePtr = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1))
       
   264 					? PageDirectory(aOsAsid)
       
   265 					: ::InitPageDirectory;
       
   266 	pdePtr += pdeIndex;
       
   267 	while(pagesLeft)
       
   268 		{
       
   269 		pageIndex &= KChunkMask>>KPageShift;
       
   270 		TInt pagesLeftInChunk = (1<<(KChunkShift-KPageShift))-pageIndex;
       
   271 		if(pagesLeftInChunk>pagesLeft)
       
   272 			pagesLeftInChunk = pagesLeft;
       
   273 		pagesLeft -= pagesLeftInChunk;
       
   274 
       
   275 		TPhysAddr phys;
       
   276 		TPde pde = *pdePtr++;
       
   277 		TUint pdeType = pde&KPdeTypeMask;
       
   278 		if(pdeType==KArmV6PdeSection)
       
   279 			{
       
   280 			phys = (pde & KPdeSectionAddrMask) + (pageIndex*KPageSize);
       
   281 			__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Section phys=%8x",phys));
       
   282 			TInt n=pagesLeftInChunk;
       
   283 			phys==nextPhys ? nextPhys+=n*KPageSize : nextPhys=KPhysAddrInvalid;
       
   284 			if(pageList)
       
   285 				{
       
   286 				TUint32* pageEnd = pageList+n;
       
   287 				do
       
   288 					{
       
   289 					*pageList++ = phys;
       
   290 					phys+=KPageSize;
       
   291 					}
       
   292 				while(pageList<pageEnd);
       
   293 				}
       
   294 			}
       
   295 		else
       
   296 			{
       
   297 			TPte* pt = SafePageTableFromPde(pde);
       
   298 			if(!pt)
       
   299 				{
       
   300 				__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical missing page table: PDE=%8x",pde));
       
   301 				return KErrNotFound;
       
   302 				}
       
   303 			pt += pageIndex;
       
   304 			for(;;)
       
   305 				{
       
   306 				TPte pte = *pt++;
       
   307 				TUint pte_type = pte & KPteTypeMask;
       
   308 				if (pte_type >= KArmV6PteSmallPage)
       
   309 					{
       
   310 					phys = (pte & KPteSmallPageAddrMask);
       
   311 					__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Small Page phys=%8x",phys));
       
   312 					phys==nextPhys ? nextPhys+=KPageSize : nextPhys=KPhysAddrInvalid;
       
   313 					if(pageList)
       
   314 						*pageList++ = phys;
       
   315 					if(--pagesLeftInChunk)
       
   316 						continue;
       
   317 					break;
       
   318 					}
       
   319 				if (pte_type == KArmV6PteLargePage)
       
   320 					{
       
   321 					--pt; // back up ptr
       
   322 					TUint pageOffset = ((TUint)pt>>2)&(KLargeSmallPageRatio-1);
       
   323 					phys = (pte & KPteLargePageAddrMask) + pageOffset*KPageSize;
       
   324 					__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Large Page phys=%8x",phys));
       
   325 					TInt n=KLargeSmallPageRatio-pageOffset;
       
   326 					if(n>pagesLeftInChunk)
       
   327 						n = pagesLeftInChunk;
       
   328 					phys==nextPhys ? nextPhys+=n*KPageSize : nextPhys=KPhysAddrInvalid;
       
   329 					if(pageList)
       
   330 						{
       
   331 						TUint32* pageEnd = pageList+n;
       
   332 						do
       
   333 							{
       
   334 							*pageList++ = phys;
       
   335 							phys+=KPageSize;
       
   336 							}
       
   337 						while(pageList<pageEnd);
       
   338 						}
       
   339 					pt += n;
       
   340 					if(pagesLeftInChunk-=n)
       
   341 						continue;
       
   342 					break;
       
   343 					}
       
   344 				__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical bad PTE %8x",pte));
       
   345 				return KErrNotFound;
       
   346 				}
       
   347 			}
       
   348 		if(!pageList && nextPhys==KPhysAddrInvalid)
       
   349 			{
       
   350 			__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical not contiguous"));
       
   351 			return KErrNotFound;
       
   352 			}
       
   353 		pageIndex = 0;
       
   354 		}
       
   355 
       
   356 	if(nextPhys==KPhysAddrInvalid)
       
   357 		{
       
   358 		// Memory is discontiguous...
       
   359 		aPhysicalAddress = KPhysAddrInvalid;
       
   360 		return 1;
       
   361 		}
       
   362 	else
       
   363 		{
       
   364 		// Memory is contiguous...
       
   365 		aPhysicalAddress = physStart;
       
   366 		return KErrNone;
       
   367 		}
       
   368 	}
       
   369 
       
   370 TInt ArmMmu::PreparePagesForDMA(TLinAddr aLinAddr, TInt aSize, TInt aOsAsid, TPhysAddr* aPhysicalPageList)
       
   371 //Returns the list of physical pages belonging to the specified memory space.
       
   372 //Checks these pages belong to a chunk marked as being trusted. 
       
   373 //Locks these pages so they can not be moved by e.g. ram defragmenation.
       
   374 	{
       
   375 	SPageInfo* pi = NULL;
       
   376 	DChunk* chunk = NULL;
       
   377 	TInt err = KErrNone;
       
   378 	
       
   379 	__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::PreparePagesForDMA %08x+%08x, asid=%d",aLinAddr,aSize,aOsAsid));
       
   380 
       
   381 	TUint32* pageList = aPhysicalPageList;
       
   382 	TInt pagesInList = 0;				//The number of pages we put in the list so far
       
   383 	
       
   384 	TInt pageIndex = (aLinAddr & KChunkMask) >> KPageShift;	// Index of the page within the section
       
   385 	TInt pagesLeft = ((aLinAddr & KPageMask) + aSize + KPageMask) >> KPageShift;
       
   386 
       
   387 	TInt pdeIndex = aLinAddr>>KChunkShift;
       
   388 
       
   389 
       
   390 	MmuBase::Wait(); 	// RamAlloc Mutex for accessing page/directory tables.
       
   391 	NKern::LockSystem();// SystemlLock for accessing SPageInfo objects.
       
   392 
       
   393 	TPde* pdePtr = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) ? PageDirectory(aOsAsid) : ::InitPageDirectory;
       
   394 	pdePtr += pdeIndex;//This points to the first pde 
       
   395 
       
   396 	while(pagesLeft)
       
   397 		{
       
   398 		TInt pagesLeftInChunk = (1<<(KChunkShift-KPageShift))-pageIndex;
       
   399 		if(pagesLeftInChunk>pagesLeft)
       
   400 			pagesLeftInChunk = pagesLeft;
       
   401 		
       
   402 		pagesLeft -= pagesLeftInChunk;
       
   403 
       
   404 		TPte* pt = SafePageTableFromPde(*pdePtr++);
       
   405 		if(!pt) { err = KErrNotFound; goto fail; }// Cannot get page table.
       
   406 		
       
   407 		pt += pageIndex;
       
   408 
       
   409 		for(;pagesLeftInChunk--;)
       
   410 			{
       
   411 			TPhysAddr phys = (*pt++ & KPteSmallPageAddrMask);
       
   412 			pi =  SPageInfo::SafeFromPhysAddr(phys);
       
   413 			if(!pi)	{ err = KErrNotFound; goto fail; }// Invalid address
       
   414 			
       
   415 			__KTRACE_OPT(KMMU2,Kern::Printf("PageInfo: PA:%x T:%x S:%x O:%x C:%x",phys, pi->Type(), pi->State(), pi->Owner(), pi->LockCount()));
       
   416 			if (chunk==NULL)
       
   417 				{//This is the first page. Check 'trusted' bit.
       
   418 				if (pi->Type()!= SPageInfo::EChunk)
       
   419 					{ err = KErrAccessDenied; goto fail; }// The first page do not belong to chunk.	
       
   420 
       
   421 				chunk = (DChunk*)pi->Owner();
       
   422 				if ( (chunk == NULL) || ((chunk->iAttributes & DChunk::ETrustedChunk)== 0) )
       
   423 					{ err = KErrAccessDenied; goto fail; }// Not a trusted chunk
       
   424 				}
       
   425 			pi->Lock();
       
   426 
       
   427 			*pageList++ = phys;
       
   428 			if ( (++pagesInList&127) == 0) //release system lock temporarily on every 512K
       
   429 				NKern::FlashSystem();
       
   430 			}
       
   431 		pageIndex = 0;
       
   432 		}
       
   433 
       
   434 	if (pi->Type()!= SPageInfo::EChunk)
       
   435 		{ err = KErrAccessDenied; goto fail; }// The last page do not belong to chunk.	
       
   436 
       
   437 	if (chunk && (chunk != (DChunk*)pi->Owner()))
       
   438 		{ err = KErrArgument; goto fail; }//The first & the last page do not belong to the same chunk.
       
   439 
       
   440 	NKern::UnlockSystem();
       
   441 	MmuBase::Signal();
       
   442 	return KErrNone;
       
   443 
       
   444 fail:
       
   445 	__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::PreparePagesForDMA failed"));
       
   446 	NKern::UnlockSystem();
       
   447 	MmuBase::Signal();
       
   448 	ReleasePagesFromDMA(aPhysicalPageList, pagesInList);
       
   449 	return err;
       
   450 	}
       
   451 
       
   452 TInt ArmMmu::ReleasePagesFromDMA(TPhysAddr* aPhysicalPageList, TInt aPageCount)
       
   453 // Unlocks physical pages.
       
   454 // @param aPhysicalPageList - points to the list of physical pages that should be released.
       
   455 // @param aPageCount		- the number of physical pages in the list.
       
   456 	{
       
   457 	NKern::LockSystem();
       
   458 	__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::ReleasePagesFromDMA count:%d",aPageCount));
       
   459 
       
   460 	while (aPageCount--)
       
   461 		{
       
   462 		SPageInfo* pi =  SPageInfo::SafeFromPhysAddr(*aPhysicalPageList++);
       
   463 		if(!pi)
       
   464 			{
       
   465 			NKern::UnlockSystem();
       
   466 			return KErrArgument;
       
   467 			}
       
   468 		__KTRACE_OPT(KMMU2,Kern::Printf("PageInfo: T:%x S:%x O:%x C:%x",pi->Type(), pi->State(), pi->Owner(), pi->LockCount()));
       
   469 		pi->Unlock();
       
   470 		}
       
   471 	NKern::UnlockSystem();
       
   472 	return KErrNone;
       
   473 	}
       
   474 
       
   475 TPhysAddr ArmMmu::LinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid)
       
   476 //
       
   477 // Find the physical address corresponding to a given linear address in a specified OS
       
   478 // address space. Call with system locked.
       
   479 //
       
   480 	{
       
   481 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical(%08x,%d)",aLinAddr,aOsAsid));
       
   482 	TInt pdeIndex=aLinAddr>>KChunkShift;
       
   483 	TPde pde = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) ? PageDirectory(aOsAsid)[pdeIndex] : ::InitPageDirectory[pdeIndex];
       
   484 	TPhysAddr pa=KPhysAddrInvalid;
       
   485 	if ((pde&KPdePresentMask)==KArmV6PdePageTable)
       
   486 		{
       
   487 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
       
   488 		if (pi)
       
   489 			{
       
   490 			TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
       
   491 			TPte* pPte=PageTable(id);
       
   492 			TPte pte=pPte[(aLinAddr&KChunkMask)>>KPageShift];
       
   493 			if (pte & KArmV6PteSmallPage)
       
   494 				{
       
   495 				pa=(pte&KPteSmallPageAddrMask)+(aLinAddr&~KPteSmallPageAddrMask);
       
   496 				__KTRACE_OPT(KMMU,Kern::Printf("Mapped with small page - returning %08x",pa));
       
   497 				}
       
   498 			else if ((pte & KArmV6PteTypeMask) == KArmV6PteLargePage)
       
   499 				{
       
   500 				pa=(pte&KPteLargePageAddrMask)+(aLinAddr&~KPteLargePageAddrMask);
       
   501 				__KTRACE_OPT(KMMU,Kern::Printf("Mapped with large page - returning %08x",pa));
       
   502 				}
       
   503 			}
       
   504 		}
       
   505 	else if ((pde&KPdePresentMask)==KArmV6PdeSection)
       
   506 		{
       
   507 		pa=(pde&KPdeSectionAddrMask)|(aLinAddr&~KPdeSectionAddrMask);
       
   508 		__KTRACE_OPT(KMMU,Kern::Printf("Mapped with section - returning %08x",pa));
       
   509 		}
       
   510 	return pa;
       
   511 	}
       
   512 
       
   513 // permission table indexed by XN:APX:AP1:AP0
       
   514 static const TInt PermissionLookup[16]=
       
   515 	{													//XN:APX:AP1:AP0
       
   516 	0,													//0   0   0   0  no access
       
   517 	EMapAttrWriteSup|EMapAttrReadSup|EMapAttrExecSup,	//0   0   0   1  RW sup			execute
       
   518 	EMapAttrWriteSup|EMapAttrReadUser|EMapAttrExecUser,	//0   0   1   0  supRW usrR		execute
       
   519 	EMapAttrWriteUser|EMapAttrReadUser|EMapAttrExecUser,//0   0   1   1  supRW usrRW	execute
       
   520 	0,													//0   1   0   0  reserved
       
   521 	EMapAttrReadSup|EMapAttrExecSup,					//0   1   0   1  supR			execute
       
   522 	EMapAttrReadUser|EMapAttrExecUser,					//0   1   1   0  supR usrR		execute
       
   523 	0,													//0   1   1   1  reserved
       
   524 	0,													//1   0   0   0  no access
       
   525 	EMapAttrWriteSup|EMapAttrReadSup,					//1   0   0   1  RW sup
       
   526 	EMapAttrWriteSup|EMapAttrReadUser,					//1   0   1   0  supRW usrR
       
   527 	EMapAttrWriteUser|EMapAttrReadUser,					//1   0   1   1  supRW usrRW
       
   528 	0,													//1   1   0   0  reserved
       
   529 	EMapAttrReadSup,									//1   1   0   1  supR
       
   530 	EMapAttrReadUser,									//1   1   1   0  supR usrR
       
   531 	EMapAttrReadUser,									//1   1   1   1  supR usrR
       
   532 	};
       
   533 
       
   534 TInt ArmMmu::PageTableId(TLinAddr aAddr, TInt aOsAsid)
       
   535 	{
       
   536 	TInt id=-1;
       
   537 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::PageTableId(%08x,%d)",aAddr,aOsAsid));
       
   538 	TInt pdeIndex=aAddr>>KChunkShift;
       
   539 	TPde pde = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) ? PageDirectory(aOsAsid)[pdeIndex] : ::InitPageDirectory[pdeIndex];
       
   540 	if ((pde&KArmV6PdeTypeMask)==KArmV6PdePageTable)
       
   541 		{
       
   542 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
       
   543 		if (pi)
       
   544 			id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
       
   545 		}
       
   546 	__KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id));
       
   547 	return id;
       
   548 	}
       
   549 
       
   550 // Used only during boot for recovery of RAM drive
       
   551 TInt ArmMmu::BootPageTableId(TLinAddr aAddr, TPhysAddr& aPtPhys)
       
   552 	{
       
   553 	TInt id=KErrNotFound;
       
   554 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:BootPageTableId(%08x,&)",aAddr));
       
   555 	TPde* kpd=(TPde*)KPageDirectoryBase;	// kernel page directory
       
   556 	TInt pdeIndex=aAddr>>KChunkShift;
       
   557 	TPde pde = kpd[pdeIndex];
       
   558 	if ((pde & KArmV6PdeTypeMask) == KArmV6PdePageTable)
       
   559 		{
       
   560 		aPtPhys = pde & KPdePageTableAddrMask;
       
   561 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
       
   562 		if (pi)
       
   563 			{
       
   564 			SPageInfo::TType type = pi->Type();
       
   565 			if (type == SPageInfo::EPageTable)
       
   566 				id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
       
   567 			else if (type == SPageInfo::EUnused)
       
   568 				id = KErrUnknown;
       
   569 			}
       
   570 		}
       
   571 	__KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id));
       
   572 	return id;
       
   573 	}
       
   574 
       
   575 TBool ArmMmu::PteIsPresent(TPte aPte)
       
   576 	{
       
   577 	return aPte & KArmV6PteTypeMask;
       
   578 	}
       
   579 
       
   580 TPhysAddr ArmMmu::PtePhysAddr(TPte aPte, TInt aPteIndex)
       
   581 	{
       
   582 	TUint32 pte_type = aPte & KArmV6PteTypeMask;
       
   583 	if (pte_type == KArmV6PteLargePage)
       
   584 		return (aPte & KPteLargePageAddrMask) + (TPhysAddr(aPteIndex << KPageShift) & KLargePageMask);
       
   585 	else if (pte_type != 0)
       
   586 		return aPte & KPteSmallPageAddrMask;
       
   587 	return KPhysAddrInvalid;
       
   588 	}
       
   589 
       
   590 TPhysAddr ArmMmu::PdePhysAddr(TLinAddr aAddr)
       
   591 	{
       
   592 	TPde* kpd = (TPde*)KPageDirectoryBase;	// kernel page directory
       
   593 	TPde pde = kpd[aAddr>>KChunkShift];
       
   594 	if ((pde & KPdePresentMask) == KArmV6PdeSection)
       
   595 		return pde & KPdeSectionAddrMask;
       
   596 	return KPhysAddrInvalid;
       
   597 	}
       
   598 
       
   599 void ArmMmu::Init1()
       
   600 	{
       
   601 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ArmMmu::Init1"));
       
   602 
       
   603 	// MmuBase data
       
   604 	iPageSize=KPageSize;
       
   605 	iPageMask=KPageMask;
       
   606 	iPageShift=KPageShift;
       
   607 	iChunkSize=KChunkSize;
       
   608 	iChunkMask=KChunkMask;
       
   609 	iChunkShift=KChunkShift;
       
   610 	iPageTableSize=KPageTableSize;
       
   611 	iPageTableMask=KPageTableMask;
       
   612 	iPageTableShift=KPageTableShift;
       
   613 	iPtClusterSize=KPtClusterSize;
       
   614 	iPtClusterMask=KPtClusterMask;
       
   615 	iPtClusterShift=KPtClusterShift;
       
   616 	iPtBlockSize=KPtBlockSize;
       
   617 	iPtBlockMask=KPtBlockMask;
       
   618 	iPtBlockShift=KPtBlockShift;
       
   619 	iPtGroupSize=KChunkSize/KPageTableSize;
       
   620 	iPtGroupMask=iPtGroupSize-1;
       
   621 	iPtGroupShift=iChunkShift-iPageTableShift;
       
   622 	//TInt* iPtBlockCount;		// dynamically allocated - Init2
       
   623 	//TInt* iPtGroupCount;		// dynamically allocated - Init2
       
   624 	iPtInfo=(SPageTableInfo*)KPageTableInfoBase;
       
   625 	iPageTableLinBase=KPageTableBase;
       
   626 	//iRamPageAllocator;		// dynamically allocated - Init2
       
   627 	//iAsyncFreeList;			// dynamically allocated - Init2
       
   628 	//iPageTableAllocator;		// dynamically allocated - Init2
       
   629 	//iPageTableLinearAllocator;// dynamically allocated - Init2
       
   630 	iPtInfoPtePerm=KPtInfoPtePerm;
       
   631 	iPtPtePerm=KPtPtePerm;
       
   632 	iPtPdePerm=KPtPdePerm;
       
   633 	iUserCodeLoadPtePerm=KUserCodeLoadPte;
       
   634 	iKernelCodePtePerm=KKernelCodeRunPte;
       
   635 	iTempAddr=KTempAddr;
       
   636 	iSecondTempAddr=KSecondTempAddr;
       
   637 	iMapSizes=KPageSize|KLargePageSize|KChunkSize;
       
   638 	iRomLinearBase = ::RomHeaderAddress;
       
   639 	iRomLinearEnd = KRomLinearEnd;
       
   640 	iShadowPtePerm = KShadowPtePerm;
       
   641 	iShadowPdePerm = KShadowPdePerm;
       
   642 
       
   643 	// Mmu data
       
   644 	TInt total_ram=TheSuperPage().iTotalRamSize;
       
   645 
       
   646 	// Large or small configuration?
       
   647 	// This is determined by the bootstrap based on RAM size
       
   648 	TUint32 ttcr=TTCR();
       
   649 	__NK_ASSERT_ALWAYS(ttcr==1 || ttcr==2);
       
   650 	TBool large = (ttcr==1);
       
   651 
       
   652 	// calculate cache colouring...
       
   653 	TInt iColourCount = 0;
       
   654 	TInt dColourCount = 0;
       
   655 	TUint32 ctr = InternalCache::TypeRegister();
       
   656 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("CacheTypeRegister = %08x",ctr));
       
   657 #ifdef __CPU_ARMV6
       
   658 	__NK_ASSERT_ALWAYS((ctr>>29)==0);	// check ARMv6 format
       
   659 	if(ctr&0x800)
       
   660 		iColourCount = 4;
       
   661 	if(ctr&0x800000)
       
   662 		dColourCount = 4;
       
   663 #else
       
   664 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("CacheTypeRegister = %08x",ctr));
       
   665 	__NK_ASSERT_ALWAYS((ctr>>29)==4);	// check ARMv7 format
       
   666 	TUint l1ip = (ctr>>14)&3;			// L1 instruction cache indexing and tagging policy
       
   667 	__NK_ASSERT_ALWAYS(l1ip>=2);		// check I cache is physically tagged
       
   668 
       
   669 	TUint32 clidr = InternalCache::LevelIDRegister();
       
   670 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("CacheLevelIDRegister = %08x",clidr));
       
   671 	TUint l1type = clidr&7;
       
   672 	if(l1type)
       
   673 		{
       
   674 		if(l1type==2 || l1type==3 || l1type==4)
       
   675 			{
       
   676 			// we have an L1 data cache...
       
   677 			TUint32 csir = InternalCache::SizeIdRegister(0,0);
       
   678 			TUint sets = ((csir>>13)&0x7fff)+1;
       
   679 			TUint ways = ((csir>>3)&0x3ff)+1;
       
   680 			TUint lineSizeShift = (csir&7)+4;
       
   681 			// assume L1 data cache is VIPT and alias checks broken and so we need data cache colouring...
       
   682 			dColourCount = (sets<<lineSizeShift)>>KPageShift;
       
   683 			if(l1type==4) // unified cache, so set instruction cache colour as well...
       
   684 				iColourCount = (sets<<lineSizeShift)>>KPageShift;
       
   685 			__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1DCache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<<lineSizeShift)>>KPageShift));
       
   686 			}
       
   687 
       
   688 		if(l1type==1 || l1type==3)
       
   689 			{
       
   690 			// we have a separate L1 instruction cache...
       
   691 			TUint32 csir = InternalCache::SizeIdRegister(1,0);
       
   692 			TUint sets = ((csir>>13)&0x7fff)+1;
       
   693 			TUint ways = ((csir>>3)&0x3ff)+1;
       
   694 			TUint lineSizeShift = (csir&7)+4;
       
   695 			iColourCount = (sets<<lineSizeShift)>>KPageShift;
       
   696 			__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1ICache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<<lineSizeShift)>>KPageShift));
       
   697 			}
       
   698 		}
       
   699 	if(l1ip==3)
       
   700 		{
       
   701 		// PIPT cache, so no colouring restrictions...
       
   702 		__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1ICache is PIPT"));
       
   703 		iColourCount = 0;
       
   704 		}
       
   705 	else
       
   706 		{
       
   707 		// VIPT cache...
       
   708 		__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1ICache is VIPT"));
       
   709 		}
       
   710 #endif
       
   711 	TUint colourShift = 0;
       
   712 	for(TUint colourCount=Max(iColourCount,dColourCount); colourCount!=0; colourCount>>=1)
       
   713 		++colourShift;
       
   714 	iAliasSize=KPageSize<<colourShift;
       
   715 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iAliasSize=0x%x",iAliasSize));
       
   716 	iAliasMask=iAliasSize-1;
       
   717 	iAliasShift=KPageShift+colourShift;
       
   718 
       
   719 	iDecommitThreshold = CacheMaintenance::SyncAllPerformanceThresholdPages();
       
   720 
       
   721 	iNumOsAsids=KArmV6NumAsids;
       
   722 	iNumGlobalPageDirs=1;
       
   723 	//iOsAsidAllocator;			// dynamically allocated - Init2
       
   724 	iGlobalPdSize=KPageDirectorySize;
       
   725 	iGlobalPdShift=KPageDirectoryShift;
       
   726 	iAsidGroupSize=KChunkSize/KPageDirectorySize;
       
   727 	iAsidGroupMask=iAsidGroupSize-1;
       
   728 	iAsidGroupShift=KChunkShift-KPageDirectoryShift;
       
   729 	iUserLocalBase=KUserLocalDataBase;
       
   730 	iAsidInfo=(TUint32*)KAsidInfoBase;
       
   731 	iPdeBase=KPageDirectoryBase;
       
   732 	iPdPtePerm=KPdPtePerm;
       
   733 	iPdPdePerm=KPdPdePerm;
       
   734 	iRamDriveMask=0x00f00000;
       
   735 	iGlobalCodePtePerm=KGlobalCodeRunPte;
       
   736 #if defined(__CPU_MEMORY_TYPE_REMAPPING)
       
   737 	iCacheMaintenanceTempMapAttr = CacheMaintenance::TemporaryMapping();
       
   738 #else
       
   739 	switch(CacheMaintenance::TemporaryMapping())
       
   740 		{
       
   741 		case EMemAttNormalUncached:
       
   742 			iCacheMaintenanceTempMapAttr = KArmV6MemAttNCNC;
       
   743 			break;
       
   744 		case EMemAttNormalCached:
       
   745 			iCacheMaintenanceTempMapAttr = KArmV6MemAttWBWAWBWA;
       
   746 			break;
       
   747 		default:
       
   748 			Panic(ETempMappingFailed);
       
   749 		}
       
   750 #endif	
       
   751 	iMaxDllDataSize=Min(total_ram/2, 0x08000000);				// phys RAM/2 up to 128Mb
       
   752 	iMaxDllDataSize=(iMaxDllDataSize+iChunkMask)&~iChunkMask;	// round up to chunk size
       
   753 	iMaxUserCodeSize=Min(total_ram, 0x10000000);				// phys RAM up to 256Mb
       
   754 	iMaxUserCodeSize=(iMaxUserCodeSize+iChunkMask)&~iChunkMask;	// round up to chunk size
       
   755 	if (large)
       
   756 		{
       
   757 		iLocalPdSize=KPageDirectorySize/2;
       
   758 		iLocalPdShift=KPageDirectoryShift-1;
       
   759 		iUserSharedBase=KUserSharedDataBase2GB;
       
   760 		iUserLocalEnd=iUserSharedBase-iMaxDllDataSize;
       
   761 		iUserSharedEnd=KUserSharedDataEnd2GB-iMaxUserCodeSize;
       
   762 		iDllDataBase=iUserLocalEnd;
       
   763 		iUserCodeBase=iUserSharedEnd;
       
   764 		}
       
   765 	else
       
   766 		{
       
   767 		iLocalPdSize=KPageDirectorySize/4;
       
   768 		iLocalPdShift=KPageDirectoryShift-2;
       
   769 		iUserSharedBase=KUserSharedDataBase1GB;
       
   770 		iUserLocalEnd=iUserSharedBase;
       
   771 		iDllDataBase=KUserSharedDataEnd1GB-iMaxDllDataSize;
       
   772 		iUserCodeBase=iDllDataBase-iMaxUserCodeSize;
       
   773 		iUserSharedEnd=iUserCodeBase;
       
   774 		}
       
   775 	__KTRACE_OPT(KMMU,Kern::Printf("LPD size %08x GPD size %08x Alias size %08x",
       
   776 													iLocalPdSize, iGlobalPdSize, iAliasSize));
       
   777 	__KTRACE_OPT(KMMU,Kern::Printf("ULB %08x ULE %08x USB %08x USE %08x",iUserLocalBase,iUserLocalEnd,
       
   778 																			iUserSharedBase,iUserSharedEnd));
       
   779 	__KTRACE_OPT(KMMU,Kern::Printf("DDB %08x UCB %08x",iDllDataBase,iUserCodeBase));
       
   780 
       
   781 	// ArmMmu data
       
   782 
       
   783 	// other
       
   784 	PP::MaxUserThreadStack=0x14000;			// 80K - STDLIB asks for 64K for PosixServer!!!!
       
   785 	PP::UserThreadStackGuard=0x2000;		// 8K
       
   786 	PP::MaxStackSpacePerProcess=0x200000;	// 2Mb
       
   787 	K::SupervisorThreadStackSize=0x1000;	// 4K
       
   788 	PP::SupervisorThreadStackGuard=0x1000;	// 4K
       
   789 	K::MachineConfig=(TMachineConfig*)KMachineConfigLinAddr;
       
   790 	PP::RamDriveStartAddress=KRamDriveStartAddress;
       
   791 	PP::RamDriveRange=KRamDriveMaxSize;
       
   792 	PP::RamDriveMaxSize=KRamDriveMaxSize;	// may be reduced later
       
   793 	K::MemModelAttributes=EMemModelTypeMultiple|EMemModelAttrNonExProt|EMemModelAttrKernProt|EMemModelAttrWriteProt|
       
   794 						EMemModelAttrVA|EMemModelAttrProcessProt|EMemModelAttrSameVA|EMemModelAttrSvKernProt|
       
   795 						EMemModelAttrIPCKernProt|EMemModelAttrRamCodeProt;
       
   796 
       
   797 	Arm::DefaultDomainAccess=KDefaultDomainAccess;
       
   798 
       
   799 	Mmu::Init1();
       
   800 	}
       
   801 
       
   802 void ArmMmu::DoInit2()
       
   803 	{
       
   804 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ArmMmu::DoInit2"));
       
   805 	iTempPte=PageTable(PageTableId(iTempAddr,0))+((iTempAddr&KChunkMask)>>KPageShift);
       
   806 	iSecondTempPte=PageTable(PageTableId(iSecondTempAddr,0))+((iSecondTempAddr&KChunkMask)>>KPageShift);
       
   807 	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iTempAddr=%08x, iTempPte=%08x, iSecondTempAddr=%08x, iSecondTempPte=%08x",
       
   808 			iTempAddr, iTempPte, iSecondTempAddr, iSecondTempPte));
       
   809 	CreateKernelSection(KKernelSectionEnd, iAliasShift);
       
   810 	CreateUserGlobalSection(KUserGlobalDataBase, KUserGlobalDataEnd);
       
   811 	Mmu::DoInit2();
       
   812 	}
       
   813 
       
   814 #ifndef __MMU_MACHINE_CODED__
       
   815 void ArmMmu::MapRamPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, const TPhysAddr* aPageList, TInt aNumPages, TPte aPtePerm)
       
   816 //
       
   817 // Map a list of physical RAM pages into a specified page table with specified PTE permissions.
       
   818 // Update the page information array.
       
   819 // Call this with the system locked.
       
   820 //
       
   821 	{
       
   822 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::MapRamPages() id=%d type=%d ptr=%08x off=%08x n=%d perm=%08x",
       
   823 			aId, aType, aPtr, aOffset, aNumPages, aPtePerm));
       
   824 
       
   825 	SPageTableInfo& ptinfo=iPtInfo[aId];
       
   826 	ptinfo.iCount+=aNumPages;
       
   827 	aOffset>>=KPageShift;
       
   828 	TInt ptOffset=aOffset & KPagesInPDEMask;				// entry number in page table
       
   829 	TPte* pPte=PageTable(aId)+ptOffset;						// address of first PTE
       
   830 
       
   831 	TLinAddr firstPte = (TLinAddr)pPte; //Will need this to clean page table changes in cache.
       
   832 
       
   833 	while(aNumPages--)
       
   834 		{
       
   835 		TPhysAddr pa = *aPageList++;
       
   836 		if(pa==KPhysAddrInvalid)
       
   837 			{
       
   838 			++pPte;
       
   839 			__NK_ASSERT_DEBUG(aType==SPageInfo::EInvalid);
       
   840 			continue;
       
   841 			}
       
   842 		*pPte++ =  pa | aPtePerm;					// insert PTE
       
   843 		__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1));
       
   844 		if (aType!=SPageInfo::EInvalid)
       
   845 			{
       
   846 			SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
       
   847 			if(pi)
       
   848 				{
       
   849 				pi->Set(aType,aPtr,aOffset);
       
   850 				__KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset));
       
   851 				++aOffset;	// increment offset for next page
       
   852 				}
       
   853 			}
       
   854 		}
       
   855 	CacheMaintenance::MultiplePtesUpdated(firstPte, (TUint)pPte-firstPte);
       
   856 	}
       
   857 
       
   858 void ArmMmu::MapPhysicalPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, TPhysAddr aPhysAddr, TInt aNumPages, TPte aPtePerm)
       
   859 //
       
   860 // Map consecutive physical pages into a specified page table with specified PTE permissions.
       
   861 // Update the page information array if RAM pages are being mapped.
       
   862 // Call this with the system locked.
       
   863 //
       
   864 	{
       
   865 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::MapPhysicalPages() id=%d type=%d ptr=%08x off=%08x phys=%08x n=%d perm=%08x",
       
   866 			aId, aType, aPtr, aOffset, aPhysAddr, aNumPages, aPtePerm));
       
   867 	SPageTableInfo& ptinfo=iPtInfo[aId];
       
   868 	ptinfo.iCount+=aNumPages;
       
   869 	aOffset>>=KPageShift;
       
   870 	TInt ptOffset=aOffset & KPagesInPDEMask;				// entry number in page table
       
   871 	TPte* pPte=(TPte*)(PageTableLinAddr(aId))+ptOffset;		// address of first PTE
       
   872 
       
   873 	TLinAddr firstPte = (TLinAddr)pPte; //Will need this to clean page table changes in cache
       
   874 
       
   875 	SPageInfo* pi;
       
   876 	if(aType==SPageInfo::EInvalid)
       
   877 		pi = NULL;
       
   878 	else
       
   879 		pi = SPageInfo::SafeFromPhysAddr(aPhysAddr);
       
   880 	while(aNumPages--)
       
   881 		{
       
   882 		*pPte++ = aPhysAddr|aPtePerm;						// insert PTE
       
   883 		aPhysAddr+=KPageSize;
       
   884 		__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1));
       
   885 		if (pi)
       
   886 			{
       
   887 			pi->Set(aType,aPtr,aOffset);
       
   888 			__KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset));
       
   889 			++aOffset;	// increment offset for next page
       
   890 			++pi;
       
   891 			}
       
   892 		}
       
   893 
       
   894 	CacheMaintenance::MultiplePtesUpdated(firstPte, (TUint)pPte-firstPte);
       
   895 	}
       
   896 
       
   897 void ArmMmu::MapVirtual(TInt aId, TInt aNumPages)
       
   898 //
       
   899 // Called in place of MapRamPages or MapPhysicalPages to update mmu data structures when committing
       
   900 // virtual address space to a chunk.  No pages are mapped.
       
   901 // Call this with the system locked.
       
   902 //
       
   903 	{
       
   904 	SPageTableInfo& ptinfo=iPtInfo[aId];
       
   905 	ptinfo.iCount+=aNumPages;
       
   906 	}
       
   907 
       
   908 void ArmMmu::RemapPage(TInt aId, TUint32 aAddr, TPhysAddr aOldAddr, TPhysAddr aNewAddr, TPte aPtePerm, DProcess* aProcess)
       
   909 //
       
   910 // Replace the mapping at address aAddr in page table aId.
       
   911 // Update the page information array for both the old and new pages.
       
   912 // Return physical address of old page if it is now ready to be freed.
       
   913 // Call this with the system locked.
       
   914 // May be called with interrupts disabled, do not enable/disable them.
       
   915 //
       
   916 	{
       
   917 	TInt ptOffset=(aAddr&KChunkMask)>>KPageShift;			// entry number in page table
       
   918 	TPte* pPte=PageTable(aId)+ptOffset;						// address of PTE
       
   919 	TPte pte=*pPte;
       
   920 	TInt asid=aProcess ? ((DMemModelProcess*)aProcess)->iOsAsid :
       
   921 						 (aAddr<KRomLinearBase ? (TInt)UNKNOWN_MAPPING : (TInt)KERNEL_MAPPING );
       
   922 	
       
   923 	if (pte & KArmV6PteSmallPage)
       
   924 		{
       
   925 		__ASSERT_ALWAYS((pte & KPteSmallPageAddrMask) == aOldAddr, Panic(ERemapPageFailed));
       
   926 		SPageInfo* oldpi = SPageInfo::FromPhysAddr(aOldAddr);
       
   927 		__ASSERT_DEBUG(oldpi->LockCount()==0,Panic(ERemapPageFailed));
       
   928 
       
   929 		// remap page
       
   930 		*pPte = aNewAddr | aPtePerm;					// overwrite PTE
       
   931 		CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
       
   932 		InvalidateTLBForPage(aAddr,asid);	// flush TLB entry
       
   933 		
       
   934 		// update new pageinfo, clear old
       
   935 		SPageInfo* pi = SPageInfo::FromPhysAddr(aNewAddr);
       
   936 		pi->Set(oldpi->Type(),oldpi->Owner(),oldpi->Offset());
       
   937 		oldpi->SetUnused();
       
   938 		}
       
   939 	else
       
   940 		{
       
   941 		Panic(ERemapPageFailed);
       
   942 		}
       
   943 	}
       
   944 
       
   945 void ArmMmu::RemapPageByAsid(TBitMapAllocator* aOsAsids, TLinAddr aLinAddr, TPhysAddr aOldAddr, TPhysAddr aNewAddr, TPte aPtePerm)
       
   946 //
       
   947 // Replace the mapping at address aLinAddr in the relevant page table for all
       
   948 // ASIDs specified in aOsAsids, but only if the currently mapped address is
       
   949 // aOldAddr.
       
   950 // Update the page information array for both the old and new pages.
       
   951 // Call this with the system unlocked.
       
   952 //
       
   953 	{
       
   954 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageByAsid() linaddr=%08x oldaddr=%08x newaddr=%08x perm=%08x", aLinAddr, aOldAddr, aNewAddr, aPtePerm));
       
   955 
       
   956 	TInt asid = -1;
       
   957 	TInt lastAsid = KArmV6NumAsids - 1;
       
   958 	TUint32* ptr = aOsAsids->iMap;
       
   959 	NKern::LockSystem();
       
   960 	do
       
   961 		{
       
   962 		TUint32 bits = *ptr++;
       
   963 		do
       
   964 			{
       
   965 			++asid;
       
   966 			if(bits & 0x80000000u)
       
   967 				{
       
   968 				// mapped in this address space, so update PTE...
       
   969 				TPte* pPte = PtePtrFromLinAddr(aLinAddr, asid);
       
   970 				TPte pte = *pPte;
       
   971 				if ((pte&~KPageMask) == aOldAddr)
       
   972 					{
       
   973 					*pPte = aNewAddr | aPtePerm;
       
   974 					__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x in asid %d",*pPte,pPte,asid));
       
   975 					CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
       
   976 					InvalidateTLBForPage(aLinAddr,asid);	// flush TLB entry
       
   977 					}
       
   978 				}
       
   979 			}
       
   980 		while(bits<<=1);
       
   981 		NKern::FlashSystem();
       
   982 		asid |= 31;
       
   983 		}
       
   984 	while(asid<lastAsid);
       
   985 
       
   986 	// copy pageinfo attributes and mark old page unused
       
   987 	SPageInfo* oldpi = SPageInfo::FromPhysAddr(aOldAddr);
       
   988 	SPageInfo::FromPhysAddr(aNewAddr)->Set(oldpi->Type(),oldpi->Owner(),oldpi->Offset());
       
   989 	oldpi->SetUnused();
       
   990 
       
   991 	NKern::UnlockSystem();
       
   992 	}
       
   993 
       
   994 TInt ArmMmu::UnmapPages(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
       
   995 //
       
   996 // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
       
   997 // pages into aPageList, and count of unmapped pages into aNumPtes.
       
   998 // Return number of pages still mapped using this page table.
       
   999 // Call this with the system locked.
       
  1000 // On multiple memory model, do not call this method with aSetPagesFree false. Call UnmapUnownedPages instead.
       
  1001 	{
       
  1002 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::UnmapPages() id=%d addr=%08x n=%d pl=%08x set-free=%d",aId,aAddr,aNumPages,aPageList,aSetPagesFree));
       
  1003 	TInt ptOffset=(aAddr&KChunkMask)>>KPageShift;			// entry number in page table
       
  1004 	TPte* pPte=PageTable(aId)+ptOffset;						// address of first PTE
       
  1005 	TInt np=0;
       
  1006 	TInt nf=0;
       
  1007 	TUint32 ng=0;
       
  1008 	TInt asid=aProcess ? ((DMemModelProcess*)aProcess)->iOsAsid :
       
  1009 	                     (aAddr<KRomLinearBase ? (TInt)UNKNOWN_MAPPING : (TInt)KERNEL_MAPPING );
       
  1010 
       
  1011 	
       
  1012 	while(aNumPages--)
       
  1013 		{
       
  1014 		TPte pte=*pPte;						// get original PTE
       
  1015 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
       
  1016 		remove_and_invalidate_page(pPte, aAddr, asid);
       
  1017 		++pPte;
       
  1018 #else
       
  1019 		*pPte++=0;							// clear PTE
       
  1020 #endif
       
  1021 		
       
  1022 		// We count all unmapped pages in np, including demand paged 'old' pages - but we don't pass
       
  1023 		// these to PageUnmapped, as the page doesn't become free until it's unmapped from all
       
  1024 		// processes		
       
  1025 		if (pte != KPteNotPresentEntry)
       
  1026 			++np;
       
  1027 		
       
  1028 		if (pte & KArmV6PteSmallPage)
       
  1029 			{
       
  1030 			ng |= pte;
       
  1031 #if !defined(__CPU_ARM1136__) || defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
       
  1032 			// Remove_and_invalidate_page will sort out cache and TLB. 
       
  1033 			// When __CPU_ARM1136_ERRATUM_353494_FIXED, we have to do it here.
       
  1034 			CacheMaintenance::SinglePteUpdated((TLinAddr)(pPte-1));
       
  1035 			if (asid >= 0) //otherwise, KUnmapPagesTLBFlushDeferred will be returned.
       
  1036 				InvalidateTLBForPage(aAddr,asid);	// flush any corresponding TLB entry
       
  1037 #endif
       
  1038 			TPhysAddr pa=pte & KPteSmallPageAddrMask;	// physical address of unmapped page
       
  1039 			if (aSetPagesFree)
       
  1040 				{
       
  1041 				SPageInfo* pi = SPageInfo::FromPhysAddr(pa);
       
  1042 				if(iRamCache->PageUnmapped(pi))
       
  1043 					{
       
  1044 					pi->SetUnused();					// mark page as unused
       
  1045 					if (pi->LockCount()==0)
       
  1046 						{
       
  1047 						*aPageList++=pa;			// store in page list
       
  1048 						++nf;						// count free pages
       
  1049 						}
       
  1050 					}
       
  1051 				}
       
  1052 			else
       
  1053 				*aPageList++=pa;				// store in page list
       
  1054 			}
       
  1055 		aAddr+=KPageSize;
       
  1056 		}
       
  1057 
       
  1058 	aNumPtes=np;
       
  1059 	aNumFree=nf;
       
  1060 	SPageTableInfo& ptinfo=iPtInfo[aId];
       
  1061 	TInt r=(ptinfo.iCount-=np);
       
  1062 	if (asid<0)
       
  1063 		r|=KUnmapPagesTLBFlushDeferred;
       
  1064 
       
  1065 	
       
  1066 	#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
       
  1067 	__FlushBtb();
       
  1068 	#endif
       
  1069 
       
  1070 	__KTRACE_OPT(KMMU,Kern::Printf("Unmapped %d; Freed: %d; Return %08x",np,nf,r));
       
  1071 	return r;								// return number of pages remaining in this page table
       
  1072 	}
       
  1073 
       
  1074 TInt ArmMmu::UnmapVirtual(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
       
  1075 //
       
  1076 // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
       
  1077 // pages into aPageList, and count of unmapped pages into aNumPtes.
       
  1078 // Adjust the page table reference count as if aNumPages pages were unmapped.
       
  1079 // Return number of pages still mapped using this page table.
       
  1080 // Call this with the system locked.
       
  1081 // On multiple memory model, do not call this method with aSetPagesFree false. Call UnmapUnownedVirtual instead.
       
  1082 //
       
  1083 	{
       
  1084 	SPageTableInfo& ptinfo=iPtInfo[aId];
       
  1085 	TInt newCount = ptinfo.iCount - aNumPages;
       
  1086 	UnmapPages(aId, aAddr, aNumPages, aPageList, aSetPagesFree, aNumPtes, aNumFree, aProcess);
       
  1087 	ptinfo.iCount = newCount;
       
  1088 	aNumPtes = aNumPages;
       
  1089 	return newCount;
       
  1090 	}
       
  1091 
       
  1092 TInt ArmMmu::UnmapUnownedPages(TInt aId, TUint32 aAddr, TInt aNumPages,
       
  1093 		TPhysAddr* aPageList, TLinAddr* aLAPageList,TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
       
  1094 /*
       
  1095  * Unmaps specified area at address aAddr in page table aId.
       
  1096  * Places physical addresses of not-demaned-paged unmapped pages into aPageList.
       
  1097  * Corresponding linear addresses are placed into aLAPageList.
       
  1098  * 'Old' demand-paged pages (holds invalid PE entry with physucal address) are neither unmapped nor
       
  1099  * encountered in aPageList but are still counted in aNumPtes.
       
  1100  * 
       
  1101  * This method should be called to decommit physical memory not owned by the chunk. As we do not know
       
  1102  * the origin of such memory, PtInfo could be invalid (or does't exist) so cache maintenance may not be
       
  1103  * able to obtain mapping colour. For that reason, this also returns former linear address of each page 
       
  1104  * in aPageList.   
       
  1105  *   
       
  1106  * @pre All pages are mapped within a single page table identified by aId.
       
  1107  * @pre On entry, system locked is held and is not released during the execution.
       
  1108  *
       
  1109  * @arg aId             Id of the page table that maps tha pages.
       
  1110  * @arg aAddr           Linear address of the start of the area.
       
  1111  * @arg aNumPages       The number of pages to unmap.
       
  1112  * @arg aProcess        The owning process of the mamory area to unmap.
       
  1113  * @arg aPageList       On  exit, holds the list of unmapped pages.
       
  1114  * @arg aLAPageList     On  exit, holds the list of linear addresses of unmapped pages.
       
  1115  * @arg aNumFree        On exit, holds the number of pages in aPageList.
       
  1116  * @arg aNumPtes        On exit, holds the number of unmapped pages. This includes demand-paged 'old'
       
  1117  *                      pages (with invalid page table entry still holding the address of physical page.)
       
  1118  *                      
       
  1119  * @return              The number of pages still mapped using this page table. It is orred by
       
  1120  *                      KUnmapPagesTLBFlushDeferred if TLB flush is not executed - which requires 
       
  1121  *                      the caller to do global TLB flush.
       
  1122  */ 
       
  1123     {
       
  1124 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::UnmapUnownedPages() id=%d addr=%08x n=%d pl=%08x",aId,aAddr,aNumPages,aPageList));
       
  1125 	TInt ptOffset=(aAddr&KChunkMask)>>KPageShift;			// entry number in page table
       
  1126 	TPte* pPte=PageTable(aId)+ptOffset;						// address of first PTE
       
  1127 	TInt np=0;
       
  1128 	TInt nf=0;
       
  1129 	TUint32 ng=0;
       
  1130 	TInt asid=aProcess ? ((DMemModelProcess*)aProcess)->iOsAsid :
       
  1131 	                     (aAddr<KRomLinearBase ? (TInt)UNKNOWN_MAPPING : (TInt)KERNEL_MAPPING );
       
  1132 
       
  1133 	while(aNumPages--)
       
  1134 		{
       
  1135 		TPte pte=*pPte;						// get original PTE
       
  1136 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
       
  1137 		remove_and_invalidate_page(pPte, aAddr, asid);
       
  1138 		++pPte;
       
  1139 #else
       
  1140 		*pPte++=0;							// clear PTE
       
  1141 #endif
       
  1142 		
       
  1143 		// We count all unmapped pages in np, including demand paged 'old' pages - but we don't pass
       
  1144 		// these to PageUnmapped, as the page doesn't become free until it's unmapped from all
       
  1145 		// processes		
       
  1146 		if (pte != KPteNotPresentEntry)
       
  1147 			++np;
       
  1148 		
       
  1149 		if (pte & KArmV6PteSmallPage)
       
  1150 			{
       
  1151 			ng |= pte;
       
  1152 #if !defined(__CPU_ARM1136__) || defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
       
  1153 			// Remove_and_invalidate_page will sort out cache and TLB. 
       
  1154 			// When __CPU_ARM1136_ERRATUM_353494_FIXED, we have to do it here.
       
  1155 			CacheMaintenance::SinglePteUpdated((TLinAddr)(pPte-1));
       
  1156 			if (asid >= 0) //otherwise, KUnmapPagesTLBFlushDeferred will be returned.
       
  1157 				InvalidateTLBForPage(aAddr,asid);	// flush any corresponding TLB entry
       
  1158 #endif
       
  1159 			TPhysAddr pa=pte & KPteSmallPageAddrMask;	// physical address of unmapped page
       
  1160 	        ++nf;
       
  1161 	        *aPageList++=pa;				// store physical aaddress in page list
       
  1162 	        *aLAPageList++=aAddr;			// store linear address in page list
       
  1163 			}
       
  1164 		aAddr+=KPageSize;
       
  1165 		}
       
  1166 
       
  1167 	aNumPtes=np;
       
  1168 	aNumFree=nf;
       
  1169 	SPageTableInfo& ptinfo=iPtInfo[aId];
       
  1170 	TInt r=(ptinfo.iCount-=np);
       
  1171 	if (asid<0)
       
  1172 		r|=KUnmapPagesTLBFlushDeferred;
       
  1173 
       
  1174 	
       
  1175 	#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
       
  1176 	__FlushBtb();
       
  1177 	#endif
       
  1178 
       
  1179 	__KTRACE_OPT(KMMU,Kern::Printf("Unmapped %d; Freed: %d; Return %08x",np,nf,r));
       
  1180 	return r;								// return number of pages remaining in this page table
       
  1181 	}
       
  1182 
       
  1183 
       
  1184 TInt ArmMmu::UnmapUnownedVirtual(TInt aId, TUint32 aAddr, TInt aNumPages,
       
  1185 		TPhysAddr* aPageList, TLinAddr* aLAPageList,TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
       
  1186 //
       
  1187 // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
       
  1188 // pages into aPageList, and count of unmapped pages into aNumPtes.
       
  1189 // Adjust the page table reference count as if aNumPages pages were unmapped.
       
  1190 // Return number of pages still mapped using this page table.
       
  1191 // Call this with the system locked.
       
  1192 //
       
  1193 	{
       
  1194 	SPageTableInfo& ptinfo=iPtInfo[aId];
       
  1195 	TInt newCount = ptinfo.iCount - aNumPages;
       
  1196 	UnmapUnownedPages(aId, aAddr, aNumPages, aPageList,  aLAPageList, aNumPtes,  aNumFree,  aProcess);
       
  1197 	ptinfo.iCount = newCount;
       
  1198 	aNumPtes = aNumPages;	
       
  1199 	return newCount;
       
  1200 	}
       
  1201 
       
  1202 void ArmMmu::DoAssignPageTable(TInt aId, TLinAddr aAddr, TPde aPdePerm, const TAny* aOsAsids)
       
  1203 //
       
  1204 // Assign an allocated page table to map a given linear address with specified permissions.
       
  1205 // This should be called with the system unlocked and the MMU mutex held.
       
  1206 //
       
  1207 	{
       
  1208 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DoAssignPageTable %d to %08x perm %08x asid %08x",aId,aAddr,aPdePerm,aOsAsids));
       
  1209 	TLinAddr ptLin=PageTableLinAddr(aId);
       
  1210 	TPhysAddr ptPhys=LinearToPhysical(ptLin,0);
       
  1211 	TInt pdeIndex=TInt(aAddr>>KChunkShift);
       
  1212 	TBool gpd=(pdeIndex>=(iLocalPdSize>>2));
       
  1213 	TInt os_asid=(TInt)aOsAsids;
       
  1214 	if (TUint32(os_asid)<TUint32(iNumOsAsids))
       
  1215 		{
       
  1216 		// single OS ASID
       
  1217 		TPde* pageDir=PageDirectory(os_asid);
       
  1218 		NKern::LockSystem();
       
  1219 		pageDir[pdeIndex]=ptPhys|aPdePerm;	// will blow up here if address is in global region aOsAsid doesn't have a global PD
       
  1220 		CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
       
  1221 		NKern::UnlockSystem();
       
  1222 				
       
  1223 		__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex));
       
  1224 		}
       
  1225 	else if (os_asid==-1 && gpd)
       
  1226 		{
       
  1227 		// all OS ASIDs, address in global region
       
  1228 		TInt num_os_asids=iNumGlobalPageDirs;
       
  1229 		const TBitMapAllocator& b=*(const TBitMapAllocator*)iOsAsidAllocator;
       
  1230 		for (os_asid=0; num_os_asids; ++os_asid)
       
  1231 			{
       
  1232 			if (!b.NotAllocated(os_asid,1) && (iAsidInfo[os_asid]&1))
       
  1233 				{
       
  1234 				// this OS ASID exists and has a global page directory
       
  1235 				TPde* pageDir=PageDirectory(os_asid);
       
  1236 				NKern::LockSystem();
       
  1237 				pageDir[pdeIndex]=ptPhys|aPdePerm;
       
  1238 				CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
       
  1239 				NKern::UnlockSystem();
       
  1240 
       
  1241 				__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex));
       
  1242 				--num_os_asids;
       
  1243 				}
       
  1244 			}
       
  1245 		}
       
  1246 	else
       
  1247 		{
       
  1248 		// selection of OS ASIDs or all OS ASIDs
       
  1249 		const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids;
       
  1250 		if (os_asid==-1)
       
  1251 			pB=iOsAsidAllocator;	// 0's in positions which exist
       
  1252 		TInt num_os_asids=pB->iSize-pB->iAvail;
       
  1253 		for (os_asid=0; num_os_asids; ++os_asid)
       
  1254 			{
       
  1255 			if (pB->NotAllocated(os_asid,1))
       
  1256 				continue;			// os_asid is not needed
       
  1257 			TPde* pageDir=PageDirectory(os_asid);
       
  1258 			NKern::LockSystem();
       
  1259 			pageDir[pdeIndex]=ptPhys|aPdePerm;
       
  1260 			CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
       
  1261 			NKern::UnlockSystem();
       
  1262 
       
  1263 			__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex));
       
  1264 			--num_os_asids;
       
  1265 			}
       
  1266 		}
       
  1267 	}
       
  1268 
       
  1269 void ArmMmu::RemapPageTableSingle(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr, TInt aOsAsid)
       
  1270 //
       
  1271 // Replace a single page table mapping the specified linear address.
       
  1272 // This should be called with the system locked and the MMU mutex held.
       
  1273 //
       
  1274 	{
       
  1275 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableSingle %08x to %08x at %08x asid %d",aOld,aNew,aAddr,aOsAsid));
       
  1276 	TPde* pageDir=PageDirectory(aOsAsid);
       
  1277 	TInt pdeIndex=TInt(aAddr>>KChunkShift);
       
  1278 	TPde pde=pageDir[pdeIndex];
       
  1279 	__ASSERT_ALWAYS((pde & KPdePageTableAddrMask) == aOld, Panic(ERemapPageTableFailed));
       
  1280 	TPde newPde=aNew|(pde&~KPdePageTableAddrMask);
       
  1281 	pageDir[pdeIndex]=newPde;	// will blow up here if address is in global region aOsAsid doesn't have a global PD
       
  1282 	CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
       
  1283 				
       
  1284 	__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",newPde,pageDir+pdeIndex));
       
  1285 	}
       
  1286 
       
  1287 void ArmMmu::RemapPageTableGlobal(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr)
       
  1288 //
       
  1289 // Replace a global page table mapping the specified linear address.
       
  1290 // This should be called with the system locked and the MMU mutex held.
       
  1291 //
       
  1292 	{
       
  1293 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableGlobal %08x to %08x at %08x",aOld,aNew,aAddr));
       
  1294 	TInt pdeIndex=TInt(aAddr>>KChunkShift);
       
  1295 	TInt num_os_asids=iNumGlobalPageDirs;
       
  1296 	const TBitMapAllocator& b=*(const TBitMapAllocator*)iOsAsidAllocator;
       
  1297 	for (TInt os_asid=0; num_os_asids; ++os_asid)
       
  1298 		{
       
  1299 		if (!b.NotAllocated(os_asid,1) && (iAsidInfo[os_asid]&1))
       
  1300 			{
       
  1301 			// this OS ASID exists and has a global page directory
       
  1302 			TPde* pageDir=PageDirectory(os_asid);
       
  1303 			TPde pde=pageDir[pdeIndex];
       
  1304 			if ((pde & KPdePageTableAddrMask) == aOld)
       
  1305 				{
       
  1306 				TPde newPde=aNew|(pde&~KPdePageTableAddrMask);
       
  1307 				pageDir[pdeIndex]=newPde;
       
  1308 				CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
       
  1309 
       
  1310 				__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",newPde,pageDir+pdeIndex));
       
  1311 				}
       
  1312 			--num_os_asids;
       
  1313 			}
       
  1314 		if ((os_asid&31)==31)
       
  1315 			NKern::FlashSystem();
       
  1316 		}
       
  1317 	}
       
  1318 
       
  1319 void ArmMmu::RemapPageTableMultiple(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr, const TAny* aOsAsids)
       
  1320 //
       
  1321 // Replace multiple page table mappings of the specified linear address.
       
  1322 // This should be called with the system locked and the MMU mutex held.
       
  1323 //
       
  1324 	{
       
  1325 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableMultiple %08x to %08x at %08x asids %08x",aOld,aNew,aAddr,aOsAsids));
       
  1326 	TInt pdeIndex=TInt(aAddr>>KChunkShift);
       
  1327 	const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids;
       
  1328 	if ((TInt)aOsAsids==-1)
       
  1329 		pB=iOsAsidAllocator;	// 0's in positions which exist
       
  1330 	
       
  1331 	TInt asid = -1;
       
  1332 	TInt lastAsid = KArmV6NumAsids - 1;
       
  1333 	const TUint32* ptr = pB->iMap;
       
  1334 	do
       
  1335 		{
       
  1336 		TUint32 bits = *ptr++;
       
  1337 		do
       
  1338 			{
       
  1339 			++asid;
       
  1340 			if ((bits & 0x80000000u) == 0)
       
  1341 				{
       
  1342 				// mapped in this address space - bitmap is inverted
       
  1343 				TPde* pageDir=PageDirectory(asid);
       
  1344 				TPde pde=pageDir[pdeIndex];
       
  1345 				if ((pde & KPdePageTableAddrMask) == aOld)
       
  1346 					{
       
  1347 					TPde newPde=aNew|(pde&~KPdePageTableAddrMask);
       
  1348 					pageDir[pdeIndex]=newPde;
       
  1349 					CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
       
  1350 
       
  1351 					__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",newPde,pageDir+pdeIndex));
       
  1352 					}
       
  1353 				}
       
  1354 			}
       
  1355 		while(bits<<=1);
       
  1356 		NKern::FlashSystem();
       
  1357 		asid |= 31;
       
  1358 		}
       
  1359 	while(asid<lastAsid);
       
  1360 	}
       
  1361 
       
  1362 void ArmMmu::RemapPageTableAliases(TPhysAddr aOld, TPhysAddr aNew)
       
  1363 //
       
  1364 // Replace aliases of the specified page table.
       
  1365 // This should be called with the system locked and the MMU mutex held.
       
  1366 //
       
  1367 	{
       
  1368 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableAliases %08x to %08x",aOld,aNew));
       
  1369 	SDblQue checkedList;
       
  1370 	SDblQueLink* next;
       
  1371 
       
  1372 	while(!iAliasList.IsEmpty())
       
  1373 		{
       
  1374 		next = iAliasList.First()->Deque();
       
  1375 		checkedList.Add(next);
       
  1376 		DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink));
       
  1377 		TPde pde = thread->iAliasPde;
       
  1378 		if ((pde & ~KPageMask) == aOld)
       
  1379 			{
       
  1380 			// a page table in this page is being aliased by the thread, so update it...
       
  1381 			thread->iAliasPde = (pde & KPageMask) | aNew;
       
  1382 			}
       
  1383 		NKern::FlashSystem();
       
  1384 		}
       
  1385 
       
  1386 	// copy checkedList back to iAliasList
       
  1387 	iAliasList.MoveFrom(&checkedList);
       
  1388 	}
       
  1389 
       
  1390 void ArmMmu::DoUnassignPageTable(TLinAddr aAddr, const TAny* aOsAsids)
       
  1391 //
       
  1392 // Unassign a now-empty page table currently mapping the specified linear address.
       
  1393 // We assume that TLB and/or cache flushing has been done when any RAM pages were unmapped.
       
  1394 // This should be called with the system unlocked and the MMU mutex held.
       
  1395 //
       
  1396 	{
       
  1397 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DoUnassignPageTable at %08x a=%08x",aAddr,aOsAsids));
       
  1398 	TInt pdeIndex=TInt(aAddr>>KChunkShift);
       
  1399 	TBool gpd=(pdeIndex>=(iLocalPdSize>>2));
       
  1400 	TInt os_asid=(TInt)aOsAsids;
       
  1401 	TUint pde=0;
       
  1402 
       
  1403 	SDblQue checkedList;
       
  1404 	SDblQueLink* next;
       
  1405 
       
  1406 	if (TUint32(os_asid)<TUint32(iNumOsAsids))
       
  1407 		{
       
  1408 		// single OS ASID
       
  1409 		TPde* pageDir=PageDirectory(os_asid);
       
  1410 		NKern::LockSystem();
       
  1411 		pde = pageDir[pdeIndex];
       
  1412 		pageDir[pdeIndex]=0;
       
  1413 		CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
       
  1414 		__KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex));
       
  1415 
       
  1416 		// remove any aliases of the page table...
       
  1417 		TUint ptId = pde>>KPageTableShift;
       
  1418 		while(!iAliasList.IsEmpty())
       
  1419 			{
       
  1420 			next = iAliasList.First()->Deque();
       
  1421 			checkedList.Add(next);
       
  1422 			DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink));
       
  1423 			if(thread->iAliasOsAsid==os_asid && (thread->iAliasPde>>KPageTableShift)==ptId)
       
  1424 				{
       
  1425 				// the page table is being aliased by the thread, so remove it...
       
  1426 				thread->iAliasPde = 0;
       
  1427 				}
       
  1428 			NKern::FlashSystem();
       
  1429 			}
       
  1430 		}
       
  1431 	else if (os_asid==-1 && gpd)
       
  1432 		{
       
  1433 		// all OS ASIDs, address in global region
       
  1434 		TInt num_os_asids=iNumGlobalPageDirs;
       
  1435 		const TBitMapAllocator& b=*(const TBitMapAllocator*)iOsAsidAllocator;
       
  1436 		for (os_asid=0; num_os_asids; ++os_asid)
       
  1437 			{
       
  1438 			if (!b.NotAllocated(os_asid,1) && (iAsidInfo[os_asid]&1))
       
  1439 				{
       
  1440 				// this OS ASID exists and has a global page directory
       
  1441 				TPde* pageDir=PageDirectory(os_asid);
       
  1442 				NKern::LockSystem();
       
  1443 				pageDir[pdeIndex]=0;
       
  1444 				CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
       
  1445 				NKern::UnlockSystem();
       
  1446 				
       
  1447 				__KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex));
       
  1448 				--num_os_asids;
       
  1449 				}
       
  1450 			}
       
  1451 		// we don't need to look for aliases in this case, because these aren't
       
  1452 		// created for page tables in the global region.
       
  1453 		NKern::LockSystem();
       
  1454 		}
       
  1455 	else
       
  1456 		{
       
  1457 		// selection of OS ASIDs or all OS ASIDs
       
  1458 		const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids;
       
  1459 		if (os_asid==-1)
       
  1460 			pB=iOsAsidAllocator;	// 0's in positions which exist
       
  1461 		TInt num_os_asids=pB->iSize-pB->iAvail;
       
  1462 		for (os_asid=0; num_os_asids; ++os_asid)
       
  1463 			{
       
  1464 			if (pB->NotAllocated(os_asid,1))
       
  1465 				continue;			// os_asid is not needed
       
  1466 			TPde* pageDir=PageDirectory(os_asid);
       
  1467 			NKern::LockSystem();
       
  1468 			pde = pageDir[pdeIndex];
       
  1469 			pageDir[pdeIndex]=0;
       
  1470 			CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
       
  1471 			NKern::UnlockSystem();
       
  1472 			
       
  1473 			__KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex));
       
  1474 			--num_os_asids;
       
  1475 			}
       
  1476 
       
  1477 		// remove any aliases of the page table...
       
  1478 		TUint ptId = pde>>KPageTableShift;
       
  1479 		NKern::LockSystem();
       
  1480 		while(!iAliasList.IsEmpty())
       
  1481 			{
       
  1482 			next = iAliasList.First()->Deque();
       
  1483 			checkedList.Add(next);
       
  1484 			DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink));
       
  1485 			if((thread->iAliasPde>>KPageTableShift)==ptId && !pB->NotAllocated(thread->iAliasOsAsid,1))
       
  1486 				{
       
  1487 				// the page table is being aliased by the thread, so remove it...
       
  1488 				thread->iAliasPde = 0;
       
  1489 				}
       
  1490 			NKern::FlashSystem();
       
  1491 			}
       
  1492 		}
       
  1493 
       
  1494 	// copy checkedList back to iAliasList
       
  1495 	iAliasList.MoveFrom(&checkedList);
       
  1496 
       
  1497 	NKern::UnlockSystem();
       
  1498 	}
       
  1499 #endif
       
  1500 
       
  1501 // Initialise page table at physical address aXptPhys to be used as page table aXptId
       
  1502 // to expand the virtual address range used for mapping page tables. Map the page table
       
  1503 // at aPhysAddr as page table aId using the expanded range.
       
  1504 // Assign aXptPhys to kernel's Page Directory.
       
  1505 // Called with system unlocked and MMU mutex held.
       
  1506 void ArmMmu::BootstrapPageTable(TInt aXptId, TPhysAddr aXptPhys, TInt aId, TPhysAddr aPhysAddr)
       
  1507 	{
       
  1508 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::BootstrapPageTable xptid=%04x, xptphys=%08x, id=%04x, phys=%08x",
       
  1509 						aXptId, aXptPhys, aId, aPhysAddr));
       
  1510 	
       
  1511 	// put in a temporary mapping for aXptPhys
       
  1512 	// make it noncacheable
       
  1513 	TPhysAddr pa=aXptPhys&~KPageMask;
       
  1514 	*iTempPte = pa | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1);
       
  1515 	CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
       
  1516 	
       
  1517 	// clear XPT
       
  1518 	TPte* xpt=(TPte*)(iTempAddr+(aXptPhys&KPageMask));
       
  1519 	memclr(xpt, KPageTableSize);
       
  1520 
       
  1521 	// must in fact have aXptPhys and aPhysAddr in same physical page
       
  1522 	__ASSERT_ALWAYS( TUint32(aXptPhys^aPhysAddr)<TUint32(KPageSize), MM::Panic(MM::EBootstrapPageTableBadAddr));
       
  1523 
       
  1524 	// so only need one mapping
       
  1525 	xpt[(aXptId>>KPtClusterShift)&KPagesInPDEMask] = pa | KPtPtePerm;
       
  1526 	CacheMaintenance::MultiplePtesUpdated((TLinAddr)xpt, KPageTableSize);
       
  1527 
       
  1528 	// remove temporary mapping
       
  1529 	*iTempPte=0;
       
  1530 	CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
       
  1531 	
       
  1532 	InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING);
       
  1533 
       
  1534 	// initialise PtInfo...
       
  1535 	TLinAddr xptAddr = PageTableLinAddr(aXptId);
       
  1536 	iPtInfo[aXptId].SetGlobal(xptAddr>>KChunkShift);
       
  1537 
       
  1538 	// map xpt...
       
  1539 	TInt pdeIndex=TInt(xptAddr>>KChunkShift);
       
  1540 	TPde* pageDir=PageDirectory(0);
       
  1541 	NKern::LockSystem();
       
  1542 	pageDir[pdeIndex]=aXptPhys|KPtPdePerm;
       
  1543 	CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
       
  1544 	
       
  1545 	NKern::UnlockSystem();				
       
  1546 	}
       
  1547 
       
  1548 // Edit the self-mapping entry in page table aId, mapped at aTempMap, to
       
  1549 // change the physical address from aOld to aNew. Used when moving page
       
  1550 // tables which were created by BootstrapPageTable.
       
  1551 // Called with system locked and MMU mutex held.
       
  1552 void ArmMmu::FixupXPageTable(TInt aId, TLinAddr aTempMap, TPhysAddr aOld, TPhysAddr aNew)
       
  1553 	{
       
  1554 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::FixupXPageTable id=%04x, tempmap=%08x, old=%08x, new=%08x",
       
  1555 						aId, aTempMap, aOld, aNew));
       
  1556 	
       
  1557 	// find correct page table inside the page
       
  1558 	TPte* xpt=(TPte*)(aTempMap + ((aId & KPtClusterMask) << KPageTableShift));
       
  1559 	// find the pte in that page table
       
  1560 	xpt += (aId>>KPtClusterShift)&KPagesInPDEMask;
       
  1561 
       
  1562 	// switch the mapping
       
  1563 	__ASSERT_ALWAYS((*xpt&~KPageMask)==aOld, Panic(EFixupXPTFailed));
       
  1564 	*xpt = aNew | KPtPtePerm;
       
  1565 	// mapped with MapTemp, and thus not mapped as a PTE - have to do real cache clean.
       
  1566 	CacheMaintenance::SinglePteUpdated((TLinAddr)xpt);
       
  1567 	}
       
  1568 
       
  1569 TInt ArmMmu::NewPageDirectory(TInt aOsAsid, TBool aSeparateGlobal, TPhysAddr& aPhysAddr, TInt& aNumPages)
       
  1570 	{
       
  1571 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::NewPageDirectory(%d,%d)",aOsAsid,aSeparateGlobal));
       
  1572 	TInt r=0;
       
  1573 	TInt nlocal=iLocalPdSize>>KPageShift;
       
  1574 	aNumPages=aSeparateGlobal ? KPageDirectorySize/KPageSize : nlocal;
       
  1575 	__KTRACE_OPT(KMMU,Kern::Printf("nlocal=%d, aNumPages=%d",nlocal,aNumPages));
       
  1576 	if (aNumPages>1)
       
  1577 		{
       
  1578 		TInt align=aSeparateGlobal ? KPageDirectoryShift : KPageDirectoryShift-1;
       
  1579 		r=AllocContiguousRam(aNumPages<<KPageShift, aPhysAddr, EPageFixed, align);
       
  1580 		}
       
  1581 	else
       
  1582 		r=AllocRamPages(&aPhysAddr,1, EPageFixed);
       
  1583 	__KTRACE_OPT(KMMU,Kern::Printf("r=%d, phys=%08x",r,aPhysAddr));
       
  1584 	if (r!=KErrNone)
       
  1585 		return r;
       
  1586 #ifdef BTRACE_KERNEL_MEMORY
       
  1587 	BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, aNumPages<<KPageShift);
       
  1588 	Epoc::KernelMiscPages += aNumPages;
       
  1589 #endif
       
  1590 	SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
       
  1591 	NKern::LockSystem();
       
  1592 	TInt i;
       
  1593 	for (i=0; i<aNumPages; ++i)
       
  1594 		pi[i].SetPageDir(aOsAsid,i);
       
  1595 	NKern::UnlockSystem();
       
  1596 	return KErrNone;
       
  1597 	}
       
  1598 
       
  1599 inline void CopyPdes(TPde* aDest, const TPde* aSrc, TLinAddr aBase, TLinAddr aEnd)
       
  1600 	{
       
  1601 	memcpy(aDest+(aBase>>KChunkShift), aSrc+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
       
  1602 	CacheMaintenance::MultiplePtesUpdated((TLinAddr)(aDest+(aBase>>KChunkShift)), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
       
  1603 	}
       
  1604 
       
  1605 inline void ZeroPdes(TPde* aDest, TLinAddr aBase, TLinAddr aEnd)
       
  1606 	{
       
  1607 	memclr(aDest+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
       
  1608 	CacheMaintenance::MultiplePtesUpdated((TLinAddr)(aDest+(aBase>>KChunkShift)), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
       
  1609 	}
       
  1610 
       
  1611 void ArmMmu::InitPageDirectory(TInt aOsAsid, TBool aSeparateGlobal)
       
  1612 	{
       
  1613 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::InitPageDirectory(%d,%d)",aOsAsid,aSeparateGlobal));
       
  1614 	TPde* newpd=PageDirectory(aOsAsid);	// new page directory
       
  1615 	memclr(newpd, iLocalPdSize);		// clear local page directory
       
  1616 	CacheMaintenance::MultiplePtesUpdated((TLinAddr)newpd, iLocalPdSize);
       
  1617 	if (aSeparateGlobal)
       
  1618 		{
       
  1619 		const TPde* kpd=(const TPde*)KPageDirectoryBase;	// kernel page directory
       
  1620 		if (iLocalPdSize==KPageSize)
       
  1621 			ZeroPdes(newpd, KUserSharedDataEnd1GB, KUserSharedDataEnd2GB);
       
  1622 		ZeroPdes(newpd, KRamDriveStartAddress, KRamDriveEndAddress);	// don't copy RAM drive
       
  1623 		CopyPdes(newpd, kpd, KRomLinearBase, KUserGlobalDataEnd);		// copy ROM + user global
       
  1624 		CopyPdes(newpd, kpd, KRamDriveEndAddress, 0x00000000);			// copy kernel mappings
       
  1625 		}
       
  1626 	}
       
  1627 
       
  1628 void ArmMmu::ClearPageTable(TInt aId, TInt aFirstIndex)
       
  1629 	{
       
  1630 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ClearPageTable(%d,%d)",aId,aFirstIndex));
       
  1631 	TPte* pte=PageTable(aId);
       
  1632 	memclr(pte+aFirstIndex, KPageTableSize-aFirstIndex*sizeof(TPte));
       
  1633 	CacheMaintenance::MultiplePtesUpdated((TLinAddr)(pte+aFirstIndex), KPageTableSize-aFirstIndex*sizeof(TPte));
       
  1634 	}
       
  1635 
       
  1636 void ArmMmu::ApplyTopLevelPermissions(TLinAddr aAddr, TInt aOsAsid, TInt aNumPdes, TPde aPdePerm)
       
  1637 	{
       
  1638 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ApplyTopLevelPermissions %04x:%08x->%08x count %d",
       
  1639 												aOsAsid, aAddr, aPdePerm, aNumPdes));
       
  1640 	TInt ix=aAddr>>KChunkShift;
       
  1641 	TPde* pPde=PageDirectory(aOsAsid)+ix;
       
  1642 	TLinAddr firstPde = (TLinAddr)pPde; //Will need this to clean page table memory region in cache
       
  1643 
       
  1644 	TPde* pPdeEnd=pPde+aNumPdes;
       
  1645 	NKern::LockSystem();
       
  1646 	for (; pPde<pPdeEnd; ++pPde)
       
  1647 		{
       
  1648 		TPde pde=*pPde;
       
  1649 		if (pde)
       
  1650 			*pPde = (pde&KPdePageTableAddrMask)|aPdePerm;
       
  1651 		}
       
  1652 	CacheMaintenance::MultiplePtesUpdated(firstPde, aNumPdes*sizeof(TPde));
       
  1653 	FlushTLBs();
       
  1654 	NKern::UnlockSystem();
       
  1655 	}
       
  1656 
       
  1657 void ArmMmu::ApplyPagePermissions(TInt aId, TInt aPageOffset, TInt aNumPages, TPte aPtePerm)
       
  1658 	{
       
  1659 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ApplyPagePermissions %04x:%03x+%03x perm %08x",
       
  1660 												aId, aPageOffset, aNumPages, aPtePerm));
       
  1661 	TPte* pPte=PageTable(aId)+aPageOffset;
       
  1662 	TLinAddr firstPte = (TLinAddr)pPte; //Will need this to clean page table memory region in cache
       
  1663 
       
  1664 	TPde* pPteEnd=pPte+aNumPages;
       
  1665 	NKern::LockSystem();
       
  1666 	for (; pPte<pPteEnd; ++pPte)
       
  1667 		{
       
  1668 		TPte pte=*pPte;
       
  1669 		if (pte)
       
  1670 			*pPte = (pte&KPteSmallPageAddrMask)|aPtePerm;
       
  1671 		}
       
  1672 	CacheMaintenance::MultiplePtesUpdated(firstPte, aNumPages*sizeof(TPte));
       
  1673 	FlushTLBs();
       
  1674 	NKern::UnlockSystem();
       
  1675 	}
       
  1676 
       
  1677 void ArmMmu::ClearRamDrive(TLinAddr aStart)
       
  1678 	{
       
  1679 	// clear the page directory entries corresponding to the RAM drive
       
  1680 	TPde* kpd=(TPde*)KPageDirectoryBase;	// kernel page directory
       
  1681 	ZeroPdes(kpd, aStart, KRamDriveEndAddress);
       
  1682 	}
       
  1683 
       
  1684 TPde ArmMmu::PdePermissions(TChunkType aChunkType, TBool aRO)
       
  1685 	{
       
  1686 //	if (aChunkType==EUserData && aRO)
       
  1687 //		return KPdePtePresent|KPdePteUser;
       
  1688 	return ChunkPdePermissions[aChunkType];
       
  1689 	}
       
  1690 
       
  1691 TPte ArmMmu::PtePermissions(TChunkType aChunkType)
       
  1692 	{
       
  1693 	return ChunkPtePermissions[aChunkType];
       
  1694 	}
       
  1695 
       
  1696 // Set up a page table (specified by aId) to map a 1Mb section of ROM containing aRomAddr
       
  1697 // using ROM at aOrigPhys.
       
  1698 void ArmMmu::InitShadowPageTable(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)
       
  1699 	{
       
  1700 	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:InitShadowPageTable id=%04x aRomAddr=%08x aOrigPhys=%08x",
       
  1701 		aId, aRomAddr, aOrigPhys));
       
  1702 	TPte* ppte = PageTable(aId);
       
  1703 	TLinAddr firstPte = (TLinAddr)ppte; //Will need this to clean page table memory region in cache
       
  1704 
       
  1705 	TPte* ppte_End = ppte + KChunkSize/KPageSize;
       
  1706 	TPhysAddr phys = aOrigPhys - (aRomAddr & KChunkMask);
       
  1707 	for (; ppte<ppte_End; ++ppte, phys+=KPageSize)
       
  1708 		*ppte = phys | KRomPtePerm;
       
  1709 	CacheMaintenance::MultiplePtesUpdated(firstPte, sizeof(TPte)*KChunkSize/KPageSize);
       
  1710 	}
       
  1711 
       
  1712 // Copy the contents of ROM at aRomAddr to a shadow page at physical address aShadowPhys
       
  1713 // It is assumed aShadowPage is not mapped, therefore any mapping colour is OK.
       
  1714 void ArmMmu::InitShadowPage(TPhysAddr aShadowPhys, TLinAddr aRomAddr)
       
  1715 	{
       
  1716 	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:InitShadowPage aShadowPhys=%08x aRomAddr=%08x",
       
  1717 		aShadowPhys, aRomAddr));
       
  1718 
       
  1719 	// put in a temporary mapping for aShadowPhys
       
  1720 	// make it noncacheable
       
  1721 	*iTempPte = aShadowPhys | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1);
       
  1722 	CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
       
  1723 
       
  1724 	// copy contents of ROM
       
  1725 	wordmove( (TAny*)iTempAddr, (const TAny*)aRomAddr, KPageSize );
       
  1726 	//Temp address is uncached. No need to clean cache, just flush write buffer
       
  1727 	CacheMaintenance::MemoryToPreserveAndReuse((TLinAddr)iTempAddr, KPageSize, EMapAttrBufferedC);
       
  1728 	
       
  1729 	// remove temporary mapping
       
  1730 	*iTempPte=0;
       
  1731 	CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
       
  1732 	InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING);
       
  1733 	}
       
  1734 
       
  1735 // Assign a shadow page table to replace a ROM section mapping
       
  1736 // Enter and return with system locked
       
  1737 void ArmMmu::AssignShadowPageTable(TInt aId, TLinAddr aRomAddr)
       
  1738 	{
       
  1739 	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:AssignShadowPageTable aId=%04x aRomAddr=%08x",
       
  1740 		aId, aRomAddr));
       
  1741 	TLinAddr ptLin=PageTableLinAddr(aId);
       
  1742 	TPhysAddr ptPhys=LinearToPhysical(ptLin, 0);
       
  1743 	TPde* ppde = ::InitPageDirectory + (aRomAddr>>KChunkShift);
       
  1744 	TPde newpde = ptPhys | KShadowPdePerm;
       
  1745 	__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
       
  1746 	TInt irq=NKern::DisableAllInterrupts();
       
  1747 	*ppde = newpde;		// map in the page table
       
  1748 	CacheMaintenance::SinglePteUpdated((TLinAddr)ppde);
       
  1749 	
       
  1750 	FlushTLBs();	// flush both TLBs (no need to flush cache yet)
       
  1751 	NKern::RestoreInterrupts(irq);
       
  1752 	}
       
  1753 
       
  1754 void ArmMmu::DoUnmapShadowPage(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)
       
  1755 	{
       
  1756 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:DoUnmapShadowPage, id=%04x lin=%08x origphys=%08x", aId, aRomAddr, aOrigPhys));
       
  1757 	TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift);
       
  1758 	TPte newpte = aOrigPhys | KRomPtePerm;
       
  1759 	__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte));
       
  1760 	TInt irq=NKern::DisableAllInterrupts();
       
  1761 	*ppte = newpte;
       
  1762 	CacheMaintenance::SinglePteUpdated((TLinAddr)ppte);
       
  1763 	
       
  1764 	InvalidateTLBForPage(aRomAddr, KERNEL_MAPPING);
       
  1765 	#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
       
  1766 	__FlushBtb();
       
  1767 	#endif
       
  1768 
       
  1769 	CacheMaintenance::CodeChanged(aRomAddr, KPageSize, CacheMaintenance::EMemoryRemap);
       
  1770 	CacheMaintenance::PageToReuse(aRomAddr, EMemAttNormalCached, KPhysAddrInvalid);
       
  1771 	NKern::RestoreInterrupts(irq);
       
  1772 	}
       
  1773 
       
  1774 TInt ArmMmu::UnassignShadowPageTable(TLinAddr aRomAddr, TPhysAddr aOrigPhys)
       
  1775 	{
       
  1776 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:UnassignShadowPageTable, lin=%08x origphys=%08x", aRomAddr, aOrigPhys));
       
  1777 	TPde* ppde = ::InitPageDirectory + (aRomAddr>>KChunkShift);
       
  1778 	TPde newpde = (aOrigPhys &~ KChunkMask) | KRomSectionPermissions;
       
  1779 	__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
       
  1780 	TInt irq=NKern::DisableAllInterrupts();
       
  1781 	*ppde = newpde;			// revert to section mapping
       
  1782 	CacheMaintenance::SinglePteUpdated((TLinAddr)ppde);
       
  1783 	
       
  1784 	FlushTLBs();			// flush both TLBs
       
  1785 	NKern::RestoreInterrupts(irq);
       
  1786 	return KErrNone;
       
  1787 	}
       
  1788 
       
  1789 
       
  1790 #if defined(__CPU_MEMORY_TYPE_REMAPPING)	// arm1176, arm11mcore, armv7, ...
       
  1791 /**
       
  1792 Shadow pages on platforms with remapping (mpcore, 1176, cortex...) are not writable.
       
  1793 This will map the region into writable memory first.
       
  1794 @pre No Fast Mutex held
       
  1795 */
       
  1796 TInt ArmMmu::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength)
       
  1797 	{
       
  1798 	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:CopyToShadowMemory aDest=%08x aSrc=%08x aLength=%08x", aDest, aSrc, aLength));
       
  1799 
       
  1800 	// Check that destination is ROM
       
  1801 	if (aDest<iRomLinearBase || (aDest+aLength) > iRomLinearEnd)
       
  1802 		{
       
  1803 		__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:CopyToShadowMemory: Destination not entirely in ROM"));
       
  1804 		return KErrArgument;
       
  1805 		}
       
  1806 	// do operation with RamAlloc mutex held (to prevent shadow pages from being released from under us)
       
  1807 	MmuBase::Wait();
       
  1808 
       
  1809 
       
  1810 	TInt r = KErrNone;
       
  1811 	while (aLength)
       
  1812 		{
       
  1813 		// Calculate memory size to copy in this loop. A single page region will be copied per loop
       
  1814 		TInt copySize = Min(aLength, iPageSize - (aDest&iPageMask));
       
  1815 
       
  1816 		// Get physical address
       
  1817 		TPhysAddr	physAddr = LinearToPhysical(aDest&~iPageMask, 0);
       
  1818 		if (KPhysAddrInvalid==physAddr)
       
  1819 			{
       
  1820 			r = KErrArgument;
       
  1821 			break;
       
  1822 			}
       
  1823 		
       
  1824 		//check whether it is shadowed rom
       
  1825 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(physAddr);
       
  1826 		if (pi==0 || pi->Type()!=SPageInfo::EShadow)
       
  1827 			{
       
  1828 			__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:CopyToShadowMemory: No shadow page at this address"));
       
  1829 			r = KErrArgument;
       
  1830 			break;
       
  1831 			}
       
  1832 
       
  1833 		//Temporarily map into writable memory and copy data. RamAllocator DMutex is required
       
  1834 		TLinAddr tempAddr = MapTemp (physAddr, aDest&~iPageMask);
       
  1835 		__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:CopyToShadowMemory Copy aDest=%08x aSrc=%08x aSize=%08x", tempAddr+(aDest&iPageMask), aSrc, copySize));
       
  1836 		memcpy ((TAny*)(tempAddr+(aDest&iPageMask)), (const TAny*)aSrc, copySize);  //Kernel-to-Kernel copy is presumed
       
  1837 		UnmapTemp();
       
  1838 
       
  1839 		//Update variables for the next loop/page
       
  1840 		aDest+=copySize;
       
  1841 		aSrc+=copySize;
       
  1842 		aLength-=copySize;
       
  1843 		}
       
  1844 	MmuBase::Signal();
       
  1845 	return r;
       
  1846 	}
       
  1847 #endif
       
  1848 
       
  1849 void ArmMmu::DoFreezeShadowPage(TInt aId, TLinAddr aRomAddr)
       
  1850 	{
       
  1851 #if defined(__CPU_MEMORY_TYPE_REMAPPING) //arm1176, arm11mcore, armv7 and later
       
  1852 	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:DoFreezeShadowPage not required with MEMORY_TYPE_REMAPPING"));
       
  1853 #else
       
  1854 	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:DoFreezeShadowPage aId=%04x aRomAddr=%08x",
       
  1855 		aId, aRomAddr));
       
  1856 	TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift);
       
  1857 	TPte newpte = (*ppte & KPteSmallPageAddrMask) | KRomPtePerm;
       
  1858 	__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte));
       
  1859 	*ppte = newpte;
       
  1860 	CacheMaintenance::SinglePteUpdated((TLinAddr)ppte);
       
  1861 	InvalidateTLBForPage(aRomAddr, KERNEL_MAPPING);
       
  1862 #endif	
       
  1863 	}
       
  1864 
       
  1865 /** Replaces large page(64K) entry in page table with small page(4K) entries.*/
       
  1866 void ArmMmu::Pagify(TInt aId, TLinAddr aLinAddr)
       
  1867 	{
       
  1868 	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:Pagify aId=%04x aLinAddr=%08x", aId, aLinAddr));
       
  1869 	
       
  1870 	TInt pteIndex = (aLinAddr & KChunkMask)>>KPageShift;
       
  1871 	TPte* pte = PageTable(aId);
       
  1872 	if ((pte[pteIndex] & KArmV6PteTypeMask) == KArmV6PteLargePage)
       
  1873 		{
       
  1874 		__KTRACE_OPT(KMMU,Kern::Printf("Converting 64K page to 4K pages"));
       
  1875 		pteIndex &= ~0xf;
       
  1876 		TPte source = pte[pteIndex];
       
  1877 		source = (source & KPteLargePageAddrMask) | SP_PTE_FROM_LP_PTE(source);
       
  1878 		pte += pteIndex;
       
  1879 		for (TInt entry=0; entry<16; entry++)
       
  1880 			{
       
  1881 			pte[entry] = source | (entry<<12);
       
  1882 			}
       
  1883 		CacheMaintenance::MultiplePtesUpdated((TLinAddr)pte, 16*sizeof(TPte));
       
  1884 		FlushTLBs();
       
  1885 		}
       
  1886 	}
       
  1887 
       
  1888 void ArmMmu::FlushShadow(TLinAddr aRomAddr)
       
  1889 	{
       
  1890 	CacheMaintenance::CodeChanged(aRomAddr, KPageSize, CacheMaintenance::EMemoryRemap);
       
  1891 	CacheMaintenance::PageToReuse(aRomAddr, EMemAttNormalCached, KPhysAddrInvalid);
       
  1892 	InvalidateTLBForPage(aRomAddr, KERNEL_MAPPING);		// remove all TLB references to original ROM page
       
  1893 	}
       
  1894 
       
  1895 
       
  1896 #if defined(__CPU_MEMORY_TYPE_REMAPPING) //arm1176, arm11mcore, armv7
       
  1897 /**
       
  1898 Calculates page directory/table entries for memory type described in aMapAttr.
       
  1899 Global, small page (4KB) mapping is assumed.
       
  1900 (All magic numbers come from ARM page table descriptions.)
       
  1901 @param aMapAttr On entry, holds description(memory type, access permisions,...) of the memory.
       
  1902 				It is made up of TMappingAttributes constants or TMappingAttributes2 object. If TMappingAttributes,
       
  1903 				may be altered 	on exit to hold the actual cache attributes & access permissions.
       
  1904 @param aPde		On exit, holds page-table-entry for the 1st level descriptor
       
  1905 				for given type of memory, with base address set to 0.
       
  1906 @param aPte		On exit, holds small-page-entry (4K) for the 2nd level descriptor
       
  1907 				for given type of memory, with base address set to 0.
       
  1908 @return KErrNotSupported 	If memory described in aMapAttr is not supported
       
  1909 		KErrNone			Otherwise
       
  1910 */
       
  1911 TInt ArmMmu::PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte)
       
  1912 	{
       
  1913 	__KTRACE_OPT(KMMU,Kern::Printf(">ArmMmu::PdePtePermissions, mapattr=%08x",aMapAttr));
       
  1914 
       
  1915 	TMappingAttributes2& memory = (TMappingAttributes2&)aMapAttr;
       
  1916 
       
  1917 	if(memory.ObjectType2())
       
  1918 		{
       
  1919 //---------Memory described by TMappingAttributes2 object-----------------
       
  1920 		aPde = 	KArmV6PdePageTable	|
       
  1921 				(memory.Parity() ? KArmV6PdeECCEnable : 0);
       
  1922 #if defined(FAULTY_NONSHARED_DEVICE_MEMORY)
       
  1923 		if(!memory.Shared() && (memory.Type() == EMemAttDevice ))
       
  1924 		{
       
  1925 			aMapAttr ^= EMapAttrBufferedNC;
       
  1926 			aMapAttr |= EMapAttrFullyBlocking;
       
  1927 			// Clear EMemAttDevice
       
  1928 			aMapAttr ^= (EMemAttDevice << 26);
       
  1929 			aMapAttr |= (EMemAttStronglyOrdered << 26);
       
  1930 		}
       
  1931 #endif
       
  1932 		aPte =	KArmV6PteSmallPage										|
       
  1933 				KArmV6PteAP0											|	// AP0 bit always 1
       
  1934 				((memory.Type()&3)<<2) | ((memory.Type()&4)<<4)			|	// memory type
       
  1935 				(memory.Executable() ? 0			: KArmV6PteSmallXN)	|	// eXecuteNever bit
       
  1936 #if defined	(__CPU_USE_SHARED_MEMORY)
       
  1937 				KArmV6PteS 												|	// Memory is always shared.
       
  1938 #else
       
  1939 				(memory.Shared()	  ? KArmV6PteS	: 0) 				|	// Shared bit
       
  1940 #endif				
       
  1941 				(memory.Writable()	  ? 0			: KArmV6PteAPX)		|	// APX = !Writable
       
  1942 				(memory.UserAccess() ? KArmV6PteAP1: 0);					// AP1 = UserAccess
       
  1943 		// aMapAttr remains the same
       
  1944 		}
       
  1945 	else
       
  1946 		{
       
  1947 //---------Memory described by TMappingAttributes bitmask-----------------
       
  1948 #if defined(FAULTY_NONSHARED_DEVICE_MEMORY)
       
  1949 		if(((aMapAttr & EMapAttrL1CacheMask) == EMapAttrBufferedNC) && !(aMapAttr & EMapAttrShared))
       
  1950 		{
       
  1951 			// Clear EMapAttrBufferedNC attribute
       
  1952 			aMapAttr ^= EMapAttrBufferedNC;
       
  1953 			aMapAttr |= EMapAttrFullyBlocking;
       
  1954 		}
       
  1955 #endif
       
  1956 		//	1.	Calculate TEX0:C:B bits in page table and actual cache attributes.
       
  1957 		//		Only L1 cache attribute from aMapAttr matters. Outer (L2) cache policy will be the same as inner one.
       
  1958 		TUint l1cache=aMapAttr & EMapAttrL1CacheMask; // Inner cache attributes. May change to actual value.
       
  1959 		TUint l2cache;	// Will hold actual L2 cache attributes (in terms of TMappingAttributes constants)
       
  1960 		TUint tex0_c_b; // Will hold TEX[0]:C:B value in page table
       
  1961 
       
  1962 		switch (l1cache)
       
  1963 			{
       
  1964 			case EMapAttrFullyBlocking:
       
  1965 				tex0_c_b = EMemAttStronglyOrdered;
       
  1966 				l2cache = EMapAttrL2Uncached;
       
  1967 				break;
       
  1968 			case EMapAttrBufferedNC:
       
  1969 				tex0_c_b = EMemAttDevice;
       
  1970 				l2cache = EMapAttrL2Uncached;
       
  1971 				break;
       
  1972 			case EMapAttrBufferedC:
       
  1973 			case EMapAttrL1Uncached:
       
  1974 			case EMapAttrCachedWTRA:
       
  1975 			case EMapAttrCachedWTWA:
       
  1976 				tex0_c_b = EMemAttNormalUncached;
       
  1977 				l1cache = EMapAttrBufferedC;
       
  1978 				l2cache = EMapAttrL2Uncached;
       
  1979 				break;
       
  1980 			case EMapAttrCachedWBRA:
       
  1981 			case EMapAttrCachedWBWA:
       
  1982 			case EMapAttrL1CachedMax:
       
  1983 				tex0_c_b = EMemAttNormalCached;
       
  1984 				l1cache = EMapAttrCachedWBWA;
       
  1985 				l2cache = EMapAttrL2CachedWBWA;
       
  1986 				break;
       
  1987 			default:
       
  1988 				return KErrNotSupported;
       
  1989 			}
       
  1990 
       
  1991 		//	2.	Step 2 has been removed :)
       
  1992 
       
  1993 		//	3.	Calculate access permissions (apx:ap bits in page table + eXecute it)
       
  1994 		TUint read=aMapAttr & EMapAttrReadMask;
       
  1995 		TUint write=(aMapAttr & EMapAttrWriteMask)>>4;
       
  1996 		TUint exec=(aMapAttr & EMapAttrExecMask)>>8;
       
  1997 
       
  1998 		read|=exec; 		// User/Sup execute access requires User/Sup read access.
       
  1999 		if (exec) exec = 1; // There is a single eXecute bit in page table. Set to one if User or Sup exec is required.
       
  2000 
       
  2001 		TUint apxap=0;
       
  2002 		if (write==0) 		// no write required
       
  2003 			{
       
  2004 			if 		(read>=4)	apxap=KArmV6PermRORO;		// user read required
       
  2005 			else if (read==1) 	apxap=KArmV6PermRONO;		// supervisor read required
       
  2006 			else 				return KErrNotSupported;	// no read required
       
  2007 			}
       
  2008 		else if (write<4)	// supervisor write required
       
  2009 			{
       
  2010 			if (read<4) 		apxap=KArmV6PermRWNO;		// user read not required
       
  2011 			else 				return KErrNotSupported;	// user read required 
       
  2012 			}
       
  2013 		else				// user & supervisor writes required
       
  2014 			{
       
  2015 			apxap=KArmV6PermRWRW;		
       
  2016 			}
       
  2017 	
       
  2018 		//	4.	Calculate page-table-entry for the 1st level (aka page directory) descriptor 
       
  2019 		aPde=((aMapAttr&EMapAttrUseECC)>>8)|KArmV6PdePageTable;
       
  2020 
       
  2021 		//	5.	Calculate small-page-entry for the 2nd level (aka page table) descriptor 
       
  2022 		aPte=SP_PTE(apxap, tex0_c_b, exec, 1);	// always global
       
  2023 		if (aMapAttr&EMapAttrShared)
       
  2024 			aPte |= KArmV6PteS;
       
  2025 	
       
  2026 		//	6.	Fix aMapAttr to hold the actual values for access permission & cache attributes
       
  2027 		TUint xnapxap=((aPte<<3)&8)|((aPte>>7)&4)|((aPte>>4)&3);
       
  2028 		aMapAttr &= ~(EMapAttrAccessMask|EMapAttrL1CacheMask|EMapAttrL2CacheMask);
       
  2029 		aMapAttr |= PermissionLookup[xnapxap]; 	// Set actual access permissions
       
  2030 		aMapAttr |= l1cache;					// Set actual inner cache attributes
       
  2031 		aMapAttr |= l2cache;					// Set actual outer cache attributes
       
  2032 		}
       
  2033 
       
  2034 	__KTRACE_OPT(KMMU,Kern::Printf("<ArmMmu::PdePtePermissions, mapattr=%08x, pde=%08x, pte=%08x", 	aMapAttr, aPde, aPte));
       
  2035 	return KErrNone;
       
  2036 	}
       
  2037 
       
  2038 #else //ARMv6 (arm1136)
       
  2039 
       
  2040 const TUint FBLK=(EMapAttrFullyBlocking>>12);
       
  2041 const TUint BFNC=(EMapAttrBufferedNC>>12);
       
  2042 //const TUint BUFC=(EMapAttrBufferedC>>12);
       
  2043 const TUint L1UN=(EMapAttrL1Uncached>>12);
       
  2044 const TUint WTRA=(EMapAttrCachedWTRA>>12);
       
  2045 //const TUint WTWA=(EMapAttrCachedWTWA>>12);
       
  2046 const TUint WBRA=(EMapAttrCachedWBRA>>12);
       
  2047 const TUint WBWA=(EMapAttrCachedWBWA>>12);
       
  2048 const TUint AWTR=(EMapAttrAltCacheWTRA>>12);
       
  2049 //const TUint AWTW=(EMapAttrAltCacheWTWA>>12);
       
  2050 //const TUint AWBR=(EMapAttrAltCacheWBRA>>12);
       
  2051 const TUint AWBW=(EMapAttrAltCacheWBWA>>12);
       
  2052 const TUint MAXC=(EMapAttrL1CachedMax>>12);
       
  2053 
       
  2054 const TUint L2UN=(EMapAttrL2Uncached>>16);
       
  2055 
       
  2056 const TUint8 UNS=0xffu;	// Unsupported attribute
       
  2057 
       
  2058 //Maps L1 & L2 cache attributes into TEX[4:2]:CB[1:0]
       
  2059 //ARMv6 doesn't do WTWA so we use WTRA instead
       
  2060 
       
  2061 #if !defined(__CPU_ARM1136_ERRATUM_399234_FIXED)
       
  2062 // L1 Write-Through mode is outlawed, L1WT acts as L1UN.
       
  2063 static const TUint8 CBTEX[40]=
       
  2064 	{            // L1CACHE:
       
  2065 //  FBLK  BFNC  BUFC  L1UN  WTRA  WTWA  WBRA  WBWA 	  L2CACHE:
       
  2066 	0x00, 0x01, 0x01, 0x04, 0x04, 0x04, 0x13, 0x11,	//NC
       
  2067 	0x00, 0x01, 0x01, 0x18, 0x18, 0x18, 0x1b, 0x19,	//WTRA
       
  2068 	0x00, 0x01, 0x01, 0x18, 0x18, 0x18, 0x1b, 0x19,	//WTWA
       
  2069 	0x00, 0x01, 0x01, 0x1c, 0x1c, 0x1c, 0x1f, 0x1d,	//WBRA
       
  2070 	0x00, 0x01, 0x01, 0x14, 0x14, 0x14, 0x17, 0x15	//WBWA
       
  2071 	};
       
  2072 #else
       
  2073 static const TUint8 CBTEX[40]=
       
  2074 	{            // L1CACHE:
       
  2075 //  FBLK  BFNC  BUFC  L1UN  WTRA  WTWA  WBRA  WBWA 	  L2CACHE:
       
  2076 	0x00, 0x01, 0x01, 0x04, 0x12, 0x12, 0x13, 0x11,	//NC
       
  2077 	0x00, 0x01, 0x01, 0x18, 0x02, 0x02, 0x1b, 0x19,	//WTRA
       
  2078 	0x00, 0x01, 0x01, 0x18, 0x02, 0x02, 0x1b, 0x19,	//WTWA
       
  2079 	0x00, 0x01, 0x01, 0x1c, 0x1e, 0x1e, 0x1f, 0x1d,	//WBRA
       
  2080 	0x00, 0x01, 0x01, 0x14, 0x16, 0x16, 0x17, 0x15	//WBWA
       
  2081 	};
       
  2082 #endif
       
  2083 
       
  2084 //Maps TEX[4:2]:CB[1:0] value into L1 cache attributes
       
  2085 static const TUint8 L1Actual[32]=
       
  2086 	{
       
  2087 //CB 00		 01		 10		 11		//TEX
       
  2088 	FBLK,	BFNC,	WTRA,	WBRA,	//000
       
  2089 	L1UN,  	UNS,  	UNS, 	WBWA,	//001
       
  2090 	BFNC,	UNS,	UNS,  	UNS,	//010
       
  2091 	UNS,	UNS,	UNS,	UNS,	//011
       
  2092 	L1UN, 	WBWA, 	WTRA, 	WBRA,	//100
       
  2093 	L1UN, 	WBWA, 	WTRA, 	WBRA,	//101
       
  2094 	L1UN, 	WBWA, 	WTRA, 	WBRA,	//110
       
  2095 	L1UN, 	WBWA, 	WTRA, 	WBRA	//111
       
  2096 	};
       
  2097 
       
  2098 //Maps TEX[4:2]:CB[1:0] value into L2 cache attributes
       
  2099 static const TUint8 L2Actual[32]=
       
  2100 	{
       
  2101 //CB 00		 01		 10		 11		//TEX
       
  2102 	L2UN,	L2UN,	WTRA,	WBRA,	//000
       
  2103 	L2UN,	UNS,	UNS,	WBWA,	//001
       
  2104 	L2UN,	UNS,	UNS,	UNS,	//010
       
  2105 	UNS,	UNS,	UNS,	UNS,	//011
       
  2106 	L2UN,	L2UN,	L2UN,	L2UN,	//100
       
  2107 	WBWA,	WBWA,	WBWA,	WBWA,	//101
       
  2108 	WTRA,	WTRA,	WTRA,	WTRA,	//110
       
  2109 	WBRA,	WBRA,	WBRA,	WBRA	//111
       
  2110 	};
       
  2111 
       
  2112 TInt ArmMmu::PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte)
       
  2113 	{
       
  2114 	__KTRACE_OPT(KMMU,Kern::Printf(">ArmMmu::PdePtePermissions, mapattr=%08x",aMapAttr));
       
  2115 
       
  2116 	TUint read=aMapAttr & EMapAttrReadMask;
       
  2117 	TUint write=(aMapAttr & EMapAttrWriteMask)>>4;
       
  2118 	TUint exec=(aMapAttr & EMapAttrExecMask)>>8;
       
  2119 	TUint l1cache=(aMapAttr & EMapAttrL1CacheMask)>>12;
       
  2120 	TUint l2cache=(aMapAttr & EMapAttrL2CacheMask)>>16;
       
  2121 	if (l1cache==MAXC) l1cache=WBRA;	// map max cache to WBRA
       
  2122 	if (l1cache>AWBW)
       
  2123 		return KErrNotSupported;		// undefined attribute
       
  2124 	if (l1cache>=AWTR) l1cache-=4;		// no alternate cache, so use normal cache
       
  2125 	if (l1cache<L1UN) l2cache=0;		// for blocking/device, don't cache L2
       
  2126 	if (l2cache==MAXC) l2cache=WBRA;	// map max cache to WBRA
       
  2127 	if (l2cache>WBWA)
       
  2128 		return KErrNotSupported;		// undefined attribute
       
  2129 	if (l2cache) l2cache-=(WTRA-1);		// l2cache now in range 0-4
       
  2130 	aPde=((aMapAttr&EMapAttrUseECC)>>8)|KArmV6PdePageTable;
       
  2131 
       
  2132 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
       
  2133 	// if broken 1136, can't have supervisor only code
       
  2134 	if (exec)
       
  2135 		exec = TUint(EMapAttrExecUser>>8);
       
  2136 #endif
       
  2137 
       
  2138 	// if any execute access, must have read=execute
       
  2139 	if (exec)
       
  2140 		(void)(read>=exec || (read=exec)!=0), exec=1;
       
  2141 
       
  2142 	// l1cache between 0 and 7, l2cache between 0 and 4; look up CBTEX
       
  2143 	TUint cbtex=CBTEX[(l2cache<<3)|l1cache];
       
  2144 
       
  2145 	// work out apx:ap
       
  2146 	TUint apxap;
       
  2147 	if (write==0)
       
  2148 		apxap=(read>=4)?KArmV6PermRORO:(read?KArmV6PermRONO:KArmV6PermNONO);
       
  2149 	else if (write<4)
       
  2150 		apxap=(read>=4)?KArmV6PermRWRO:KArmV6PermRWNO;
       
  2151 	else
       
  2152 		apxap=KArmV6PermRWRW;
       
  2153 	TPte pte=SP_PTE(apxap, cbtex, exec, 1);	// always global
       
  2154 	if (aMapAttr&EMapAttrShared)
       
  2155 		pte |= KArmV6PteS;
       
  2156 
       
  2157 	// Translate back to get actual map attributes
       
  2158 	TUint xnapxap=((pte<<3)&8)|((pte>>7)&4)|((pte>>4)&3);
       
  2159 	cbtex=((pte>>4)&0x1c)|((pte>>2)&3);  // = TEX[4:2]::CB[1:0]
       
  2160 	aMapAttr &= ~(EMapAttrAccessMask|EMapAttrL1CacheMask|EMapAttrL2CacheMask);
       
  2161 	aMapAttr |= PermissionLookup[xnapxap];
       
  2162 	aMapAttr |= (L1Actual[cbtex]<<12);
       
  2163 	aMapAttr |= (L2Actual[cbtex]<<16);
       
  2164 	aPte=pte;
       
  2165 	__KTRACE_OPT(KMMU,Kern::Printf("<ArmMmu::PdePtePermissions, mapattr=%08x, pde=%08x, pte=%08x",
       
  2166 								aMapAttr, aPde, aPte));
       
  2167 	return KErrNone;
       
  2168 	}
       
  2169 #endif
       
  2170 
       
  2171 void ArmMmu::Map(TLinAddr aLinAddr, TPhysAddr aPhysAddr, TInt aSize, TPde aPdePerm, TPte aPtePerm, TInt aMapShift)
       
  2172 //
       
  2173 // Map a region of physical addresses aPhysAddr to aPhysAddr+aSize-1 to virtual address aLinAddr.
       
  2174 // Use permissions specified by aPdePerm and aPtePerm. Use mapping sizes up to and including (1<<aMapShift).
       
  2175 // Assume any page tables required are already assigned.
       
  2176 // aLinAddr, aPhysAddr, aSize must be page-aligned.
       
  2177 //
       
  2178 	{
       
  2179 	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu::Map lin=%08x phys=%08x size=%08x", aLinAddr, aPhysAddr, aSize));
       
  2180 	__KTRACE_OPT(KMMU, Kern::Printf("pde=%08x pte=%08x mapshift=%d", aPdePerm, aPtePerm, aMapShift));
       
  2181 	TPde pt_pde=aPdePerm;
       
  2182 	TPte sp_pte=aPtePerm;
       
  2183 	TPde section_pde=SECTION_PDE_FROM_PDEPTE(pt_pde, sp_pte);
       
  2184 	TPte lp_pte=LP_PTE_FROM_SP_PTE(sp_pte);
       
  2185 	TLinAddr la=aLinAddr;
       
  2186 	TPhysAddr pa=aPhysAddr;
       
  2187 	TInt remain=aSize;
       
  2188 	while (remain)
       
  2189 		{
       
  2190 		if (aMapShift>=KChunkShift && (la & KChunkMask)==0 && remain>=KChunkSize)
       
  2191 			{
       
  2192 			// use sections - ASSUMES ADDRESS IS IN GLOBAL REGION
       
  2193 			TInt npdes=remain>>KChunkShift;
       
  2194 			const TBitMapAllocator& b=*iOsAsidAllocator;
       
  2195 			TInt num_os_asids=iNumGlobalPageDirs;
       
  2196 			TInt os_asid=0;
       
  2197 			for (; num_os_asids; ++os_asid)
       
  2198 				{
       
  2199 				if (b.NotAllocated(os_asid,1) || (iAsidInfo[os_asid]&1)==0)
       
  2200 					continue;			// os_asid is not needed
       
  2201 				TPde* p_pde=PageDirectory(os_asid)+(la>>KChunkShift);
       
  2202 				TPde* p_pde_E=p_pde+npdes;
       
  2203 				TPde pde=pa|section_pde;
       
  2204 				TLinAddr firstPde = (TLinAddr)p_pde; //Will need this to clean page table memory region from cache
       
  2205 
       
  2206 				NKern::LockSystem();
       
  2207 				for (; p_pde < p_pde_E; pde+=KChunkSize)
       
  2208 					{
       
  2209 					__ASSERT_DEBUG(*p_pde==0, MM::Panic(MM::EPdeAlreadyInUse));
       
  2210 					__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", pde, p_pde));
       
  2211 					*p_pde++=pde;
       
  2212 					}
       
  2213 				CacheMaintenance::MultiplePtesUpdated(firstPde, (TUint)p_pde-firstPde);
       
  2214 				NKern::UnlockSystem();
       
  2215 				--num_os_asids;
       
  2216 				}
       
  2217 			npdes<<=KChunkShift;
       
  2218 			la+=npdes, pa+=npdes, remain-=npdes;
       
  2219 			continue;
       
  2220 			}
       
  2221 		TInt block_size = Min(remain, KChunkSize-(la&KChunkMask));
       
  2222 		TPte pa_mask=~KPageMask;
       
  2223 		TPte pte_perm=sp_pte;
       
  2224 		if (aMapShift>=KLargePageShift && block_size>=KLargePageSize)
       
  2225 			{
       
  2226 			if ((la & KLargePageMask)==0)
       
  2227 				{
       
  2228 				// use 64K large pages
       
  2229 				pa_mask=~KLargePageMask;
       
  2230 				pte_perm=lp_pte;
       
  2231 				}
       
  2232 			else
       
  2233 				block_size = Min(remain, KLargePageSize-(la&KLargePageMask));
       
  2234 			}
       
  2235 		block_size &= pa_mask;
       
  2236 
       
  2237 		// use pages (large or small)
       
  2238 		TInt id=PageTableId(la, 0);
       
  2239 		__ASSERT_DEBUG(id>=0, MM::Panic(MM::EMmuMapNoPageTable));
       
  2240 		TPte* p_pte=PageTable(id)+((la&KChunkMask)>>KPageShift);
       
  2241 		TPte* p_pte_E=p_pte + (block_size>>KPageShift);
       
  2242 		SPageTableInfo& ptinfo=iPtInfo[id];
       
  2243 		TLinAddr firstPte = (TLinAddr)p_pte; //Will need this to clean page table memory region from cache
       
  2244 		
       
  2245 		NKern::LockSystem();
       
  2246 		for (; p_pte < p_pte_E; pa+=KPageSize)
       
  2247 			{
       
  2248 			__ASSERT_DEBUG(*p_pte==0, MM::Panic(MM::EPteAlreadyInUse));
       
  2249 			TPte pte = (pa & pa_mask) | pte_perm;
       
  2250 			__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", pte, p_pte));
       
  2251 			*p_pte++=pte;
       
  2252 			++ptinfo.iCount;
       
  2253 			NKern::FlashSystem();
       
  2254 			}
       
  2255 		CacheMaintenance::MultiplePtesUpdated(firstPte, (TUint)p_pte-firstPte);
       
  2256 		NKern::UnlockSystem();
       
  2257 		la+=block_size, remain-=block_size;
       
  2258 		}
       
  2259 	}
       
  2260 
       
  2261 void ArmMmu::Unmap(TLinAddr aLinAddr, TInt aSize)
       
  2262 //
       
  2263 // Remove all mappings in the specified range of addresses.
       
  2264 // Assumes there are only global mappings involved.
       
  2265 // Don't free page tables.
       
  2266 // aLinAddr, aSize must be page-aligned.
       
  2267 //
       
  2268 	{
       
  2269 	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu::Unmap lin=%08x size=%08x", aLinAddr, aSize));
       
  2270 	TLinAddr a=aLinAddr;
       
  2271 	TLinAddr end=a+aSize;
       
  2272 	__KTRACE_OPT(KMMU,Kern::Printf("a=%08x end=%08x",a,end));
       
  2273 	NKern::LockSystem();
       
  2274 	while(a!=end)
       
  2275 		{
       
  2276 		TInt pdeIndex=a>>KChunkShift;
       
  2277 		TLinAddr next=(pdeIndex<<KChunkShift)+KChunkSize;
       
  2278 		TInt to_do = Min(TInt(end-a), TInt(next-a))>>KPageShift;
       
  2279 		__KTRACE_OPT(KMMU,Kern::Printf("a=%08x next=%08x to_do=%d",a,next,to_do));
       
  2280 		TPde pde=::InitPageDirectory[pdeIndex];
       
  2281 		if ( (pde&KArmV6PdeTypeMask)==KArmV6PdeSection )
       
  2282 			{
       
  2283 			__ASSERT_DEBUG(!(a&KChunkMask), MM::Panic(MM::EUnmapBadAlignment));
       
  2284 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
       
  2285 			remove_and_invalidate_section(::InitPageDirectory + pdeIndex, a, KERNEL_MAPPING);
       
  2286 #else
       
  2287 			::InitPageDirectory[pdeIndex]=0;
       
  2288 			CacheMaintenance::SinglePteUpdated(TLinAddr(::InitPageDirectory + pdeIndex));
       
  2289 			InvalidateTLBForPage(a, KERNEL_MAPPING);		// ASID irrelevant since global
       
  2290 #endif
       
  2291 			a=next;
       
  2292 			NKern::FlashSystem();
       
  2293 			continue;
       
  2294 			}
       
  2295 		TInt ptid=PageTableId(a,0);
       
  2296 		SPageTableInfo& ptinfo=iPtInfo[ptid];
       
  2297 		if (ptid>=0)
       
  2298 			{
       
  2299 			TPte* ppte=PageTable(ptid)+((a&KChunkMask)>>KPageShift);
       
  2300 			TPte* ppte_End=ppte+to_do;
       
  2301 			for (; ppte<ppte_End; ++ppte, a+=KPageSize)
       
  2302 				{
       
  2303 				if (*ppte & KArmV6PteSmallPage)
       
  2304 					{
       
  2305 					--ptinfo.iCount;
       
  2306 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
       
  2307 					remove_and_invalidate_page(ppte, a, KERNEL_MAPPING);
       
  2308 #else
       
  2309 					*ppte=0;
       
  2310 					CacheMaintenance::SinglePteUpdated((TLinAddr)ppte);
       
  2311 					InvalidateTLBForPage(a, KERNEL_MAPPING);
       
  2312 #endif
       
  2313 					}
       
  2314 				else if ((*ppte & KArmV6PteTypeMask) == KArmV6PteLargePage)
       
  2315 					{
       
  2316 					__ASSERT_DEBUG(!(a&KLargePageMask), MM::Panic(MM::EUnmapBadAlignment));
       
  2317 					ptinfo.iCount-=KLargeSmallPageRatio;
       
  2318 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
       
  2319 					remove_and_invalidate_page(ppte, a, KERNEL_MAPPING);
       
  2320 #else
       
  2321 					memclr(ppte, KLargeSmallPageRatio*sizeof(TPte));
       
  2322 					CacheMaintenance::MultiplePtesUpdated((TLinAddr)ppte, KLargeSmallPageRatio*sizeof(TPte));
       
  2323 					InvalidateTLBForPage(a, KERNEL_MAPPING);
       
  2324 #endif
       
  2325 					a+=(KLargePageSize-KPageSize);
       
  2326 					ppte+=(KLargeSmallPageRatio-1);
       
  2327 					}
       
  2328 				NKern::FlashSystem();
       
  2329 				}
       
  2330 			}
       
  2331 		else
       
  2332 			a += (to_do<<KPageShift);
       
  2333 		}
       
  2334 	NKern::UnlockSystem();
       
  2335 	#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
       
  2336 	__FlushBtb();
       
  2337 	#endif
       
  2338 	}
       
  2339 
       
  2340 
       
  2341 void ArmMmu::ClearPages(TInt aNumPages, TPhysAddr* aPageList, TUint8 aClearByte)
       
  2342 	{
       
  2343 	//map the pages at a temporary address, clear them and unmap
       
  2344 	__ASSERT_MUTEX(RamAllocatorMutex);
       
  2345 	while (--aNumPages >= 0)
       
  2346 		{
       
  2347 		TPhysAddr pa;
       
  2348 		if((TInt)aPageList&1)
       
  2349 			{
       
  2350 			pa = (TPhysAddr)aPageList&~1;
       
  2351 			*(TPhysAddr*)&aPageList += iPageSize;
       
  2352 			}
       
  2353 		else
       
  2354 			pa = *aPageList++;
       
  2355 		
       
  2356 		*iTempPte = pa | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1);
       
  2357 		CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
       
  2358 		InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING);
       
  2359 		memset((TAny*)iTempAddr, aClearByte, iPageSize);
       
  2360 		// This temporary mapping is noncached => No need to flush cache here.
       
  2361 		// Still, we have to make sure that write buffer(s) are drained.
       
  2362 		CacheMaintenance::MemoryToPreserveAndReuse((TLinAddr)iTempAddr, iPageSize, EMapAttrBufferedC);
       
  2363 		}
       
  2364 	*iTempPte=0;
       
  2365 	CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
       
  2366 	InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING);
       
  2367 	}
       
  2368 
       
  2369 
       
  2370 /**
       
  2371 Create a temporary mapping of one or more contiguous physical pages.
       
  2372 Fully cached memory attributes apply.
       
  2373 The RamAllocatorMutex must be held before this function is called and not released
       
  2374 until after UnmapTemp has been called.
       
  2375 
       
  2376 @param aPage	The physical address of the pages to be mapped.
       
  2377 @param aLinAddr The linear address of any existing location where the page is mapped.
       
  2378 				If the page isn't already mapped elsewhere as a cachable page then
       
  2379 				this value irrelevent. (It is used for page colouring.)
       
  2380 @param aPages	Number of pages to map.
       
  2381 
       
  2382 @return The linear address of where the pages have been mapped.
       
  2383 */
       
  2384 TLinAddr ArmMmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages)
       
  2385 	{
       
  2386 	__ASSERT_MUTEX(RamAllocatorMutex);
       
  2387 	__ASSERT_DEBUG(!*iTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
       
  2388 	iTempMapColor = (aLinAddr>>KPageShift)&KPageColourMask;
       
  2389 	iTempMapCount = aPages;
       
  2390 	if (aPages==1)
       
  2391 		{
       
  2392 		iTempPte[iTempMapColor] = (aPage&~KPageMask) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);
       
  2393 		CacheMaintenance::SinglePteUpdated((TLinAddr)(iTempPte+iTempMapColor));
       
  2394 		}
       
  2395 	else
       
  2396 		{
       
  2397 		__ASSERT_DEBUG(iTempMapColor+aPages<=KPageColourCount,MM::Panic(MM::ETempMappingNoRoom));
       
  2398 		for (TInt i=0; i<aPages; i++)
       
  2399 			iTempPte[iTempMapColor+i] = ((aPage&~KPageMask)+(i<<KPageShift)) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);	
       
  2400 		CacheMaintenance::MultiplePtesUpdated((TLinAddr)(iTempPte+iTempMapColor), aPages*sizeof(TPte));
       
  2401 		}
       
  2402 	return iTempAddr+(iTempMapColor<<KPageShift);
       
  2403 	}
       
  2404 
       
  2405 /**
       
  2406 Create a temporary mapping of one or more contiguous physical pages.
       
  2407 Memory attributes as specified by aMemType apply.
       
  2408 @See ArmMmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages) for other details.
       
  2409 */
       
  2410 TLinAddr ArmMmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages, TMemoryType aMemType)
       
  2411 	{
       
  2412 	__ASSERT_MUTEX(RamAllocatorMutex);
       
  2413 	__ASSERT_DEBUG(!*iTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
       
  2414 	iTempMapColor = (aLinAddr>>KPageShift)&KPageColourMask;
       
  2415 	iTempMapCount = aPages;
       
  2416 	TUint pte = SP_PTE(KArmV6PermRWNO, aMemType, 0, 1);
       
  2417 	if (aPages==1)
       
  2418 		{
       
  2419 		iTempPte[iTempMapColor] = (aPage&~KPageMask) | SP_PTE(KArmV6PermRWNO, pte, 0, 1);
       
  2420 		CacheMaintenance::SinglePteUpdated((TLinAddr)(iTempPte+iTempMapColor));
       
  2421 		}
       
  2422 	else
       
  2423 		{
       
  2424 		__ASSERT_DEBUG(iTempMapColor+aPages<=KPageColourCount,MM::Panic(MM::ETempMappingNoRoom));
       
  2425 		for (TInt i=0; i<aPages; i++)
       
  2426 			iTempPte[iTempMapColor+i] = ((aPage&~KPageMask)+(i<<KPageShift)) | SP_PTE(KArmV6PermRWNO, pte, 0, 1);	
       
  2427 		CacheMaintenance::MultiplePtesUpdated((TLinAddr)(iTempPte+iTempMapColor), aPages*sizeof(TPte));
       
  2428 		}
       
  2429 	return iTempAddr+(iTempMapColor<<KPageShift);
       
  2430 	}
       
  2431 
       
  2432 /**
       
  2433 Create a temporary mapping of one or more contiguous physical pages, distinct from
       
  2434 that created by MapTemp.
       
  2435 The RamAllocatorMutex must be held before this function is called and not released
       
  2436 until after UnmapSecondTemp has been called.
       
  2437 
       
  2438 @param aPage	The physical address of the pages to be mapped.
       
  2439 @param aLinAddr The linear address of any existing location where the page is mapped.
       
  2440 				If the page isn't already mapped elsewhere as a cachable page then
       
  2441 				this value irrelevent. (It is used for page colouring.)
       
  2442 @param aPages	Number of pages to map.
       
  2443 
       
  2444 @return The linear address of where the pages have been mapped.
       
  2445 */
       
  2446 TLinAddr ArmMmu::MapSecondTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages)
       
  2447 	{
       
  2448 	__ASSERT_MUTEX(RamAllocatorMutex);
       
  2449 	__ASSERT_DEBUG(!*iSecondTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
       
  2450 	iSecondTempMapColor = (aLinAddr>>KPageShift)&KPageColourMask;
       
  2451 	iSecondTempMapCount = aPages;
       
  2452 	if (aPages==1)
       
  2453 		{
       
  2454 		iSecondTempPte[iSecondTempMapColor] = (aPage&~KPageMask) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);
       
  2455 		CacheMaintenance::SinglePteUpdated((TLinAddr)(iSecondTempPte+iSecondTempMapColor));
       
  2456 		}
       
  2457 	else
       
  2458 		{
       
  2459 		__ASSERT_DEBUG(iSecondTempMapColor+aPages<=KPageColourCount,MM::Panic(MM::ETempMappingNoRoom));
       
  2460 		for (TInt i=0; i<aPages; i++)
       
  2461 			iSecondTempPte[iSecondTempMapColor+i] = ((aPage&~KPageMask)+(i<<KPageShift)) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);	
       
  2462 		CacheMaintenance::MultiplePtesUpdated((TLinAddr)(iSecondTempPte+iSecondTempMapColor), aPages*sizeof(TPte));
       
  2463 		}
       
  2464 	return iSecondTempAddr+(iSecondTempMapColor<<KPageShift);
       
  2465 	}
       
  2466 
       
  2467 /**
       
  2468 Remove the temporary mapping created with MapTemp.
       
  2469 */
       
  2470 void ArmMmu::UnmapTemp()
       
  2471 	{
       
  2472 	__ASSERT_MUTEX(RamAllocatorMutex);
       
  2473 	for (TInt i=0; i<iTempMapCount; i++)
       
  2474 		{
       
  2475 		iTempPte[iTempMapColor+i] = 0;
       
  2476 		CacheMaintenance::SinglePteUpdated((TLinAddr)(iTempPte+iTempMapColor+i));
       
  2477 		InvalidateTLBForPage(iTempAddr+((iTempMapColor+i)<<KPageShift), KERNEL_MAPPING);
       
  2478 		}
       
  2479 	}
       
  2480 
       
  2481 /**
       
  2482 Remove the temporary mapping created with MapSecondTemp.
       
  2483 */
       
  2484 void ArmMmu::UnmapSecondTemp()
       
  2485 	{
       
  2486 	__ASSERT_MUTEX(RamAllocatorMutex);
       
  2487 	for (TInt i=0; i<iSecondTempMapCount; i++)
       
  2488 		{
       
  2489 		iSecondTempPte[iSecondTempMapColor+i] = 0;
       
  2490 		CacheMaintenance::SinglePteUpdated((TLinAddr)(iSecondTempPte+iSecondTempMapColor+i));
       
  2491 		InvalidateTLBForPage(iSecondTempAddr+((iSecondTempMapColor+i)<<KPageShift), KERNEL_MAPPING);
       
  2492 		}
       
  2493 	}
       
  2494 
       
  2495 
       
  2496 TBool ArmMmu::ValidateLocalIpcAddress(TLinAddr aAddr,TInt aSize,TBool aWrite)
       
  2497 	{
       
  2498 	__NK_ASSERT_DEBUG(aSize<=KChunkSize);
       
  2499 	TLinAddr end = aAddr+aSize-1;
       
  2500 	if(end<aAddr)
       
  2501 		end = ~0u;
       
  2502 
       
  2503 	if(TUint(aAddr^KIPCAlias)<TUint(KChunkSize) || TUint(end^KIPCAlias)<TUint(KChunkSize))
       
  2504 		{
       
  2505 		// local address is in alias region.
       
  2506 		// remove alias...
       
  2507 		NKern::LockSystem();
       
  2508 		((DMemModelThread*)TheCurrentThread)->RemoveAlias();
       
  2509 		NKern::UnlockSystem();
       
  2510 		// access memory, which will cause an exception...
       
  2511 		if(!(TUint(aAddr^KIPCAlias)<TUint(KChunkSize)))
       
  2512 			aAddr = end;
       
  2513 		InvalidateTLBForPage(aAddr,((DMemModelProcess*)TheCurrentThread->iOwningProcess)->iOsAsid);
       
  2514 		if(aWrite)
       
  2515 			*(volatile TUint8*)aAddr = 0;
       
  2516 		else
       
  2517 			aWrite = *(volatile TUint8*)aAddr;
       
  2518 		// can't get here
       
  2519 		__NK_ASSERT_DEBUG(0);
       
  2520 		}
       
  2521 
       
  2522 	TUint32 local_mask;
       
  2523 	DMemModelProcess* process=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
       
  2524 	if(aWrite)
       
  2525 		local_mask = process->iAddressCheckMaskW;
       
  2526 	else
       
  2527 		local_mask = process->iAddressCheckMaskR;
       
  2528 	TUint32 mask = 2<<(end>>27);
       
  2529 	mask -= 1<<(aAddr>>27);
       
  2530 	if((local_mask&mask)!=mask)
       
  2531 		return EFalse;
       
  2532 
       
  2533 	if(!aWrite)
       
  2534 		return ETrue; // reads are ok
       
  2535 
       
  2536 	// writes need further checking...
       
  2537 	TLinAddr userCodeStart = iUserCodeBase;
       
  2538 	TLinAddr userCodeEnd = userCodeStart+iMaxUserCodeSize;
       
  2539 	if(end>=userCodeStart && aAddr<userCodeEnd)
       
  2540 		return EFalse; // trying to write to user code area
       
  2541 
       
  2542 	return ETrue;
       
  2543 	}
       
  2544 
       
  2545 TInt DMemModelThread::Alias(TLinAddr aAddr, DMemModelProcess* aProcess, TInt aSize, TInt aPerm, TLinAddr& aAliasAddr, TInt& aAliasSize)
       
  2546 //
       
  2547 // Set up an alias mapping starting at address aAddr in specified process.
       
  2548 // Check permissions aPerm.
       
  2549 // Enter and return with system locked.
       
  2550 // Note: Alias is removed if an exception if trapped by DThread::IpcExcHandler.
       
  2551 //
       
  2552 	{
       
  2553 	__KTRACE_OPT(KMMU2,Kern::Printf("Thread %O Alias %08x+%x Process %O perm %x",this,aAddr,aSize,aProcess,aPerm));
       
  2554 	__ASSERT_SYSTEM_LOCK
       
  2555 
       
  2556 	if(TUint(aAddr^KIPCAlias)<TUint(KChunkSize))
       
  2557 		return KErrBadDescriptor; // prevent access to alias region
       
  2558 
       
  2559 	ArmMmu& m=::TheMmu;
       
  2560 
       
  2561 	// check if memory is in region which is safe to access with supervisor permissions...
       
  2562 	TBool okForSupervisorAccess = aPerm&(EMapAttrReadSup|EMapAttrWriteSup) ? 1 : 0;
       
  2563 	if(!okForSupervisorAccess)
       
  2564 		{
       
  2565 		TInt shift = aAddr>>27;
       
  2566 		if(!(aPerm&EMapAttrWriteUser))
       
  2567 			{
       
  2568 			// reading with user permissions...
       
  2569 			okForSupervisorAccess = (aProcess->iAddressCheckMaskR>>shift)&1;
       
  2570 			}
       
  2571 		else
       
  2572 			{
       
  2573 			// writing with user permissions...
       
  2574 			okForSupervisorAccess = (aProcess->iAddressCheckMaskW>>shift)&1;
       
  2575 			if(okForSupervisorAccess)
       
  2576 				{
       
  2577 				// check for user code, because this is supervisor r/w and so
       
  2578 				// is not safe to write to access with supervisor permissions.
       
  2579 				if(TUint(aAddr-m.iUserCodeBase)<TUint(m.iMaxUserCodeSize))
       
  2580 					return KErrBadDescriptor; // prevent write to this...
       
  2581 				}
       
  2582 			}
       
  2583 		}
       
  2584 
       
  2585 	TInt pdeIndex = aAddr>>KChunkShift;
       
  2586 	if(pdeIndex>=(m.iLocalPdSize>>2))
       
  2587 		{
       
  2588 		// address is in global section, don't bother aliasing it...
       
  2589 		if(iAliasLinAddr)
       
  2590 			RemoveAlias();
       
  2591 		aAliasAddr = aAddr;
       
  2592 		TInt maxSize = KChunkSize-(aAddr&KChunkMask);
       
  2593 		aAliasSize = aSize<maxSize ? aSize : maxSize;
       
  2594 		__KTRACE_OPT(KMMU2,Kern::Printf("DMemModelThread::Alias() abandoned as memory is globaly mapped"));
       
  2595 		return okForSupervisorAccess;
       
  2596 		}
       
  2597 
       
  2598 	TInt asid = aProcess->iOsAsid;
       
  2599 	TPde* pd = PageDirectory(asid);
       
  2600 	TPde pde = pd[pdeIndex];
       
  2601 	if ((TPhysAddr)(pde&~KPageMask) == AliasRemapOld)
       
  2602 		pde = AliasRemapNew|(pde&KPageMask);
       
  2603 	pde = (pde&~(0xf<<5))|(KIPCAliasDomain<<5); // change domain for PDE
       
  2604 	TLinAddr aliasAddr = KIPCAlias+(aAddr&(KChunkMask & ~KPageMask));
       
  2605 	if(pde==iAliasPde && iAliasLinAddr)
       
  2606 		{
       
  2607 		// pde already aliased, so just update linear address...
       
  2608 		iAliasLinAddr = aliasAddr;
       
  2609 		}
       
  2610 	else
       
  2611 		{
       
  2612 		// alias PDE changed...
       
  2613 		iAliasPde = pde;
       
  2614 		iAliasOsAsid = asid;
       
  2615 		if(!iAliasLinAddr)
       
  2616 			{
       
  2617 			ArmMmu::UnlockAlias();
       
  2618 			::TheMmu.iAliasList.Add(&iAliasLink); // add to list if not already aliased
       
  2619 			}
       
  2620 		iAliasLinAddr = aliasAddr;
       
  2621 		*iAliasPdePtr = pde;
       
  2622 		CacheMaintenance::SinglePteUpdated((TLinAddr)iAliasPdePtr);
       
  2623 		}
       
  2624 
       
  2625 	__KTRACE_OPT(KMMU2,Kern::Printf("DMemModelThread::Alias() PDEntry=%x, iAliasLinAddr=%x",pde, aliasAddr));
       
  2626 	InvalidateTLBForPage(aliasAddr, ((DMemModelProcess*)iOwningProcess)->iOsAsid);
       
  2627 	TInt offset = aAddr&KPageMask;
       
  2628 	aAliasAddr = aliasAddr | offset;
       
  2629 	TInt maxSize = KPageSize - offset;
       
  2630 	aAliasSize = aSize<maxSize ? aSize : maxSize;
       
  2631 	iAliasTarget = aAddr & ~KPageMask;
       
  2632 	return okForSupervisorAccess;
       
  2633 	}
       
  2634 
       
  2635 void DMemModelThread::RemoveAlias()
       
  2636 //
       
  2637 // Remove alias mapping (if present)
       
  2638 // Enter and return with system locked.
       
  2639 //
       
  2640 	{
       
  2641 	__KTRACE_OPT(KMMU2,Kern::Printf("Thread %O RemoveAlias", this));
       
  2642 	__ASSERT_SYSTEM_LOCK
       
  2643 	TLinAddr addr = iAliasLinAddr;
       
  2644 	if(addr)
       
  2645 		{
       
  2646 		ArmMmu::LockAlias();
       
  2647 		iAliasLinAddr = 0;
       
  2648 		iAliasPde = 0;
       
  2649 		*iAliasPdePtr = 0;
       
  2650 		CacheMaintenance::SinglePteUpdated((TLinAddr)iAliasPdePtr);
       
  2651 		InvalidateTLBForPage(addr, ((DMemModelProcess*)iOwningProcess)->iOsAsid);
       
  2652 		iAliasLink.Deque();
       
  2653 		}
       
  2654 	}
       
  2655 
       
  2656 /*
       
  2657  * Performs cache maintenance for physical page that is going to be reused.
       
  2658  * Fully cached attributes are assumed. 
       
  2659  */
       
  2660 void ArmMmu::CacheMaintenanceOnDecommit(TPhysAddr a)
       
  2661 	{
       
  2662 	// purge a single page from the cache following decommit
       
  2663 	ArmMmu& m=::TheMmu;
       
  2664 	TInt colour = SPageInfo::FromPhysAddr(a)->Offset()&KPageColourMask;
       
  2665 	TPte& pte=m.iTempPte[colour];
       
  2666 	TLinAddr va=m.iTempAddr+(colour<<KPageShift);
       
  2667 	pte=a|SP_PTE(KArmV6PermRWNO, iCacheMaintenanceTempMapAttr, 1, 1);
       
  2668 	CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
       
  2669 
       
  2670 	CacheMaintenance::PageToReuse(va,EMemAttNormalCached, a);
       
  2671 
       
  2672 	pte=0;
       
  2673 	CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
       
  2674 	InvalidateTLBForPage(va,KERNEL_MAPPING);
       
  2675 	}
       
  2676 
       
  2677 void ArmMmu::CacheMaintenanceOnDecommit(const TPhysAddr* al, TInt n)
       
  2678 	{
       
  2679 	// purge a list of pages from the cache following decommit
       
  2680 	while (--n>=0)
       
  2681 		ArmMmu::CacheMaintenanceOnDecommit(*al++);
       
  2682 	}
       
  2683 
       
  2684 /*
       
  2685  * Performs cache maintenance to preserve physical page that is going to be reused. 
       
  2686  */
       
  2687 void ArmMmu::CacheMaintenanceOnPreserve(TPhysAddr a, TUint aMapAttr)
       
  2688 	{
       
  2689 	// purge a single page from the cache following decommit
       
  2690 	ArmMmu& m=::TheMmu;
       
  2691 	TInt colour = SPageInfo::FromPhysAddr(a)->Offset()&KPageColourMask;
       
  2692 	TPte& pte=m.iTempPte[colour];
       
  2693 	TLinAddr va=m.iTempAddr+(colour<<KPageShift);
       
  2694 	pte=a|SP_PTE(KArmV6PermRWNO, iCacheMaintenanceTempMapAttr, 1, 1);
       
  2695 	CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
       
  2696 
       
  2697 	CacheMaintenance::MemoryToPreserveAndReuse(va, KPageSize,aMapAttr);
       
  2698 
       
  2699 	pte=0;
       
  2700 	CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
       
  2701 	InvalidateTLBForPage(va,KERNEL_MAPPING);
       
  2702 	}
       
  2703 
       
  2704 void ArmMmu::CacheMaintenanceOnPreserve(const TPhysAddr* al, TInt n, TUint aMapAttr)
       
  2705 	{
       
  2706 	// purge a list of pages from the cache following decommit
       
  2707 	while (--n>=0)
       
  2708 		ArmMmu::CacheMaintenanceOnPreserve(*al++, aMapAttr);
       
  2709 	}
       
  2710 
       
  2711 /*
       
  2712  * Performs cache maintenance of physical memory that has been decommited and has to be preserved.
       
  2713  * Call this method for physical pages with no page info updated (or no page info at all).
       
  2714  * @arg aPhysAddr	The address of contiguous physical memory to be preserved.
       
  2715  * @arg aSize		The size of the region
       
  2716  * @arg aLinAddr 	Former linear address of the region. As said above, the physical memory is
       
  2717  * 					already remapped from this linear address.
       
  2718  * @arg aMapAttr 	Mapping attributes of the region when it was mapped in aLinAddr.
       
  2719  * @pre MMU mutex is held.  
       
  2720  */
       
  2721 void ArmMmu::CacheMaintenanceOnPreserve(TPhysAddr aPhysAddr, TInt aSize, TLinAddr aLinAddr, TUint aMapAttr )
       
  2722 	{
       
  2723 	__NK_ASSERT_DEBUG((aPhysAddr&KPageMask)==0);
       
  2724 	__NK_ASSERT_DEBUG((aSize&KPageMask)==0);
       
  2725 	__NK_ASSERT_DEBUG((aLinAddr&KPageMask)==0);
       
  2726 
       
  2727 	TPhysAddr pa = aPhysAddr;
       
  2728 	TInt size = aSize;
       
  2729 	TInt colour = (aLinAddr>>KPageShift)&KPageColourMask;
       
  2730 	TPte* pte = &(iTempPte[colour]);
       
  2731 	while (size)
       
  2732 		{
       
  2733 		pte=&(iTempPte[colour]);
       
  2734 		TLinAddr va=iTempAddr+(colour<<KPageShift);
       
  2735 		*pte=pa|SP_PTE(KArmV6PermRWNO, iCacheMaintenanceTempMapAttr, 1, 1);
       
  2736 		CacheMaintenance::SinglePteUpdated((TLinAddr)pte);
       
  2737 		CacheMaintenance::MemoryToPreserveAndReuse(va, KPageSize,aMapAttr);
       
  2738 
       
  2739 		*pte=0;
       
  2740 		CacheMaintenance::SinglePteUpdated((TLinAddr)pte);
       
  2741 		InvalidateTLBForPage(va,KERNEL_MAPPING);
       
  2742 
       
  2743 		colour = (colour+1)&KPageColourMask;
       
  2744 		pa += KPageSize;
       
  2745 		size -=KPageSize;
       
  2746 		}
       
  2747 	}
       
  2748 
       
  2749 TInt ArmMmu::UnlockRamCachePages(TLinAddr aLinAddr, TInt aNumPages, DProcess* aProcess)
       
  2750 	{
       
  2751 	TInt asid = ((DMemModelProcess*)aProcess)->iOsAsid;
       
  2752 	TInt page = aLinAddr>>KPageShift;
       
  2753 	NKern::LockSystem();
       
  2754 	for(;;)
       
  2755 		{
       
  2756 		TPde* pd = PageDirectory(asid)+(page>>(KChunkShift-KPageShift));
       
  2757 		TPte* pt = SafePageTableFromPde(*pd++);
       
  2758 		TInt pteIndex = page&(KChunkMask>>KPageShift);
       
  2759 		if(!pt)
       
  2760 			{
       
  2761 			// whole page table has gone, so skip all pages in it...
       
  2762 			TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
       
  2763 			aNumPages -= pagesInPt;
       
  2764 			page += pagesInPt;
       
  2765 			if(aNumPages>0)
       
  2766 				continue;
       
  2767 			NKern::UnlockSystem();
       
  2768 			return KErrNone;
       
  2769 			}
       
  2770 		pt += pteIndex;
       
  2771 		do
       
  2772 			{
       
  2773 			TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
       
  2774 			if(pagesInPt>aNumPages)
       
  2775 				pagesInPt = aNumPages;
       
  2776 			if(pagesInPt>KMaxPages)
       
  2777 				pagesInPt = KMaxPages;
       
  2778 
       
  2779 			aNumPages -= pagesInPt;
       
  2780 			page += pagesInPt;
       
  2781 
       
  2782 			do
       
  2783 				{
       
  2784 				TPte pte = *pt++;
       
  2785 				if(pte) // pte may be null if page has already been unlocked and reclaimed by system
       
  2786 					iRamCache->DonateRamCachePage(SPageInfo::FromPhysAddr(pte));
       
  2787 				}
       
  2788 			while(--pagesInPt);
       
  2789 
       
  2790 			if(!aNumPages)
       
  2791 				{
       
  2792 				NKern::UnlockSystem();
       
  2793 				return KErrNone;
       
  2794 				}
       
  2795 
       
  2796 			pteIndex = page&(KChunkMask>>KPageShift);
       
  2797 			}
       
  2798 		while(!NKern::FlashSystem() && pteIndex);
       
  2799 		}
       
  2800 	}
       
  2801 
       
  2802 
       
  2803 TInt ArmMmu::LockRamCachePages(TLinAddr aLinAddr, TInt aNumPages, DProcess* aProcess)
       
  2804 	{
       
  2805 	TInt asid = ((DMemModelProcess*)aProcess)->iOsAsid;
       
  2806 	TInt page = aLinAddr>>KPageShift;
       
  2807 	NKern::LockSystem();
       
  2808 	for(;;)
       
  2809 		{
       
  2810 		TPde* pd = PageDirectory(asid)+(page>>(KChunkShift-KPageShift));
       
  2811 		TPte* pt = SafePageTableFromPde(*pd++);
       
  2812 		TInt pteIndex = page&(KChunkMask>>KPageShift);
       
  2813 		if(!pt)
       
  2814 			goto not_found;
       
  2815 		pt += pteIndex;
       
  2816 		do
       
  2817 			{
       
  2818 			TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
       
  2819 			if(pagesInPt>aNumPages)
       
  2820 				pagesInPt = aNumPages;
       
  2821 			if(pagesInPt>KMaxPages)
       
  2822 				pagesInPt = KMaxPages;
       
  2823 
       
  2824 			aNumPages -= pagesInPt;
       
  2825 			page += pagesInPt;
       
  2826 
       
  2827 			do
       
  2828 				{
       
  2829 				TPte pte = *pt++;
       
  2830 				if(pte==0)
       
  2831 					goto not_found;
       
  2832 				if(!iRamCache->ReclaimRamCachePage(SPageInfo::FromPhysAddr(pte)))
       
  2833 					goto not_found;
       
  2834 				}
       
  2835 			while(--pagesInPt);
       
  2836 
       
  2837 			if(!aNumPages)
       
  2838 				{
       
  2839 				NKern::UnlockSystem();
       
  2840 				return KErrNone;
       
  2841 				}
       
  2842 
       
  2843 			pteIndex = page&(KChunkMask>>KPageShift);
       
  2844 			}
       
  2845 		while(!NKern::FlashSystem() && pteIndex);
       
  2846 		}
       
  2847 not_found:
       
  2848 	NKern::UnlockSystem();
       
  2849 	return KErrNotFound;
       
  2850 	}
       
  2851 
       
  2852 
       
  2853 void RamCache::SetFree(SPageInfo* aPageInfo)
       
  2854 	{
       
  2855 	ArmMmu& m=::TheMmu;
       
  2856 	// Make a page free
       
  2857 	SPageInfo::TType type = aPageInfo->Type();
       
  2858 	if(type==SPageInfo::EPagedCache)
       
  2859 		{
       
  2860 		TInt offset = aPageInfo->Offset()<<KPageShift;
       
  2861 		DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
       
  2862 		__NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iMaxSize));
       
  2863 		TLinAddr lin = ((TLinAddr)chunk->iBase)+offset;
       
  2864 		TInt asid = ((DMemModelProcess*)chunk->iOwningProcess)->iOsAsid;
       
  2865 		TPte* pt = PtePtrFromLinAddr(lin,asid);
       
  2866 		TPhysAddr phys = (*pt)&~KPageMask;
       
  2867 		*pt = KPteNotPresentEntry;
       
  2868 		CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
       
  2869 		InvalidateTLBForPage(lin,asid);
       
  2870 		m.CacheMaintenanceOnDecommit(phys);
       
  2871 
       
  2872 		// actually decommit it from chunk...
       
  2873 		TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift;
       
  2874 		SPageTableInfo& ptinfo=((ArmMmu*)iMmu)->iPtInfo[ptid];
       
  2875 		if(!--ptinfo.iCount)
       
  2876 			{
       
  2877 			chunk->iPageTables[offset>>KChunkShift] = 0xffff;
       
  2878 			NKern::UnlockSystem();
       
  2879 			((ArmMmu*)iMmu)->DoUnassignPageTable(lin, (TAny*)asid);
       
  2880 			((ArmMmu*)iMmu)->FreePageTable(ptid);
       
  2881 			NKern::LockSystem();
       
  2882 			}
       
  2883 		}
       
  2884 	else
       
  2885 		{
       
  2886 		__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type()));
       
  2887 		Panic(EUnexpectedPageType);
       
  2888 		}
       
  2889 	}
       
  2890 
       
  2891 
       
  2892 //
       
  2893 // MemModelDemandPaging
       
  2894 //
       
  2895 
       
  2896 class MemModelDemandPaging : public DemandPaging
       
  2897 	{
       
  2898 public:
       
  2899 	// From RamCacheBase
       
  2900 	virtual void Init2();
       
  2901 	virtual TInt Init3();
       
  2902 	virtual TBool PageUnmapped(SPageInfo* aPageInfo);
       
  2903 	// From DemandPaging
       
  2904 	virtual TInt Fault(TAny* aExceptionInfo);
       
  2905 	virtual void SetOld(SPageInfo* aPageInfo);
       
  2906 	virtual void SetFree(SPageInfo* aPageInfo);
       
  2907 	virtual void NotifyPageFree(TPhysAddr aPage);
       
  2908 	virtual TInt EnsurePagePresent(TLinAddr aPage, DProcess* aProcess);
       
  2909 	virtual TPhysAddr LinearToPhysical(TLinAddr aPage, DProcess* aProcess);
       
  2910 	virtual void AllocLoadAddress(DPagingRequest& aReq, TInt aDeviceId);
       
  2911 	virtual TInt PageState(TLinAddr aAddr);
       
  2912 	virtual TBool NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength);
       
  2913 	// New
       
  2914 	inline ArmMmu& Mmu() { return (ArmMmu&)*iMmu; }
       
  2915 	void InitRomPaging();
       
  2916 	void InitCodePaging();
       
  2917 	TInt HandleFault(TArmExcInfo& aExc, TLinAddr aFaultAddress, TInt aAsid);
       
  2918 	TInt PageIn(TLinAddr aAddress, TInt aAsid, DMemModelCodeSegMemory* aCodeSegMemory);
       
  2919 public:
       
  2920 	// use of the folowing members is protected by the system lock..
       
  2921 	TPte* iPurgePte;			// PTE used for temporary mappings during cache purge operations
       
  2922 	TLinAddr iPurgeAddr;		// address corresponding to iPurgePte
       
  2923 	};
       
  2924 
       
  2925 extern void MakeGlobalPTEInaccessible(TPte* aPtePtr, TPte aNewPte, TLinAddr aLinAddr);
       
  2926 extern void MakePTEInaccessible(TPte* aPtePtr, TPte aNewPte, TLinAddr aLinAddr, TInt aAsid);
       
  2927 
       
  2928 //
       
  2929 // MemModelDemandPaging
       
  2930 //
       
  2931 
       
  2932 
       
  2933 DemandPaging* DemandPaging::New()
       
  2934 	{
       
  2935 	return new MemModelDemandPaging();
       
  2936 	}
       
  2937 
       
  2938 
       
  2939 void MemModelDemandPaging::Init2()
       
  2940 	{
       
  2941 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">MemModelDemandPaging::Init2"));
       
  2942 	DemandPaging::Init2();
       
  2943 
       
  2944 	iPurgeAddr = KDemandPagingTempAddr;
       
  2945 	iPurgePte = PtePtrFromLinAddr(iPurgeAddr);
       
  2946 
       
  2947 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<MemModelDemandPaging::Init2"));
       
  2948 	}
       
  2949 
       
  2950 
       
  2951 void MemModelDemandPaging::AllocLoadAddress(DPagingRequest& aReq, TInt aReqId)
       
  2952 	{
       
  2953 	aReq.iLoadAddr = iTempPages + aReqId * KPageSize * KPageColourCount;
       
  2954 	aReq.iLoadPte = PtePtrFromLinAddr(aReq.iLoadAddr);
       
  2955 	}
       
  2956 
       
  2957 
       
  2958 TInt MemModelDemandPaging::Init3()
       
  2959 	{
       
  2960 	TInt r=DemandPaging::Init3();
       
  2961 	if(r!=KErrNone)
       
  2962 		return r;
       
  2963 	
       
  2964 	// Create a region for mapping pages during page in
       
  2965 	DPlatChunkHw* chunk;
       
  2966 	TInt chunkSize = (KMaxPagingDevices * KPagingRequestsPerDevice + 1) * KPageColourCount * KPageSize;
       
  2967 	DPlatChunkHw::DoNew(chunk, KPhysAddrInvalid, chunkSize, EMapAttrSupRw|EMapAttrFullyBlocking);
       
  2968 	if(!chunk)
       
  2969 		Panic(EInitialiseFailed);
       
  2970 	TInt colourMask = KPageColourMask << KPageShift;
       
  2971 	iTempPages = (chunk->iLinAddr + colourMask) & ~colourMask;
       
  2972 
       
  2973 	if(RomPagingRequested())
       
  2974 		InitRomPaging();
       
  2975 
       
  2976 	if (CodePagingRequested())
       
  2977 		InitCodePaging();
       
  2978 
       
  2979 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<MemModelDemandPaging::Init3"));
       
  2980 	return KErrNone;
       
  2981 	}
       
  2982 	
       
  2983 void MemModelDemandPaging::InitRomPaging()
       
  2984 	{
       
  2985 	// Make page tables for demand paged part of ROM...
       
  2986 	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("MemModelDemandPaging::Init3 making page tables for paged ROM"));
       
  2987 	TLinAddr lin = iRomPagedLinearBase&~KChunkMask; // first chunk with paged ROM in
       
  2988 	TLinAddr linEnd = iRomLinearBase+iRomSize;
       
  2989 	while(lin<linEnd)
       
  2990 		{
       
  2991 		// Get a Page Table
       
  2992 		TInt ptid = Mmu().PageTableId(lin,0);
       
  2993 		if(ptid<0)
       
  2994 			{
       
  2995 			MmuBase::Wait();
       
  2996 			ptid = Mmu().AllocPageTable();
       
  2997 			MmuBase::Signal();
       
  2998 			__NK_ASSERT_DEBUG(ptid>=0);
       
  2999 			Mmu().PtInfo(ptid).SetGlobal(lin >> KChunkShift);
       
  3000 			}
       
  3001 
       
  3002 		// Get new page table addresses
       
  3003 		TPte* pt = PageTable(ptid);
       
  3004 		TPhysAddr ptPhys=Mmu().LinearToPhysical((TLinAddr)pt,0);
       
  3005 
       
  3006 		// Pointer to page directory entry
       
  3007 		TPde* ppde = ::InitPageDirectory + (lin>>KChunkShift);
       
  3008 
       
  3009 		// Fill in Page Table
       
  3010 		TPte* ptEnd = pt+(1<<(KChunkShift-KPageShift));
       
  3011 		pt += (lin&KChunkMask)>>KPageShift;
       
  3012 		TLinAddr firstPte = (TLinAddr)pt; // Will need this to clean page table memory region from cache
       
  3013 
       
  3014 		do
       
  3015 			{
       
  3016 			if(lin<iRomPagedLinearBase)
       
  3017 				*pt++ = Mmu().LinearToPhysical(lin,0) | KRomPtePerm;
       
  3018 			else
       
  3019 				{
       
  3020 				MakeGlobalPTEInaccessible(pt, KPteNotPresentEntry, lin);
       
  3021 				++pt;
       
  3022 				}
       
  3023 			lin += KPageSize;
       
  3024 			}
       
  3025 		while(pt<ptEnd && lin<=linEnd);
       
  3026 
       
  3027 		CacheMaintenance::MultiplePtesUpdated((TLinAddr)firstPte, (TUint)pt-firstPte);
       
  3028 
       
  3029 		// Add new Page Table to the Page Directory
       
  3030 		TPde newpde = ptPhys | KShadowPdePerm;
       
  3031 		__KTRACE_OPT2(KPAGING,KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
       
  3032 		TInt irq=NKern::DisableAllInterrupts();
       
  3033 		*ppde = newpde;
       
  3034 		CacheMaintenance::SinglePteUpdated((TLinAddr)ppde);
       
  3035 		FlushTLBs();
       
  3036 		NKern::RestoreInterrupts(irq);
       
  3037 		}
       
  3038 	}
       
  3039 
       
  3040 
       
  3041 void MemModelDemandPaging::InitCodePaging()
       
  3042 	{
       
  3043 	// Initialise code paging info
       
  3044 	iCodeLinearBase = Mmu().iUserCodeBase;
       
  3045 	iCodeSize = Mmu().iMaxUserCodeSize;
       
  3046 	}
       
  3047 
       
  3048 
       
  3049 /**
       
  3050 @return ETrue when the unmapped page should be freed, EFalse otherwise
       
  3051 */
       
  3052 TBool MemModelDemandPaging::PageUnmapped(SPageInfo* aPageInfo)
       
  3053 	{
       
  3054 	SPageInfo::TType type = aPageInfo->Type();
       
  3055 
       
  3056 	// Only have to deal with cache pages - pages containg code don't get returned to the system
       
  3057 	// when they are decommitted from an individual process, only when the code segment is destroyed	
       
  3058 	if(type!=SPageInfo::EPagedCache)
       
  3059 		{
       
  3060 		__NK_ASSERT_DEBUG(type!=SPageInfo::EPagedCode); // shouldn't happen
       
  3061 		__NK_ASSERT_DEBUG(type!=SPageInfo::EPagedData); // not supported yet
       
  3062 		return ETrue;
       
  3063 		}
       
  3064 
       
  3065 	RemovePage(aPageInfo);
       
  3066 	AddAsFreePage(aPageInfo);
       
  3067 	// Return false to stop DMemModelChunk::DoDecommit from freeing this page
       
  3068 	return EFalse; 
       
  3069 	}
       
  3070 
       
  3071 
       
  3072 void DoSetCodeOld(SPageInfo* aPageInfo, DMemModelCodeSegMemory* aCodeSegMemory, TLinAddr aLinAddr)
       
  3073 	{
       
  3074 	NThread* currentThread = NKern::CurrentThread(); 
       
  3075 	aPageInfo->SetModifier(currentThread);
       
  3076 	// scan all address spaces...
       
  3077 	TInt asid = -1;
       
  3078 	TInt lastAsid = KArmV6NumAsids-1;
       
  3079 	TUint32* ptr = aCodeSegMemory->iOsAsids->iMap;
       
  3080 	do
       
  3081 		{
       
  3082 		TUint32 bits = *ptr++;
       
  3083 		do
       
  3084 			{
       
  3085 			++asid;
       
  3086 			if(bits&0x80000000u)
       
  3087 				{
       
  3088 				// codeseg is mapped in this address space, so update PTE...
       
  3089 				TPte* pt = PtePtrFromLinAddr(aLinAddr,asid);
       
  3090 				TPte pte = *pt;
       
  3091 				if(pte&KPtePresentMask)
       
  3092 					{
       
  3093 					__NK_ASSERT_DEBUG((pte&~KPageMask) == aPageInfo->PhysAddr());
       
  3094 					MakePTEInaccessible(pt, pte&~KPtePresentMask, aLinAddr, asid);
       
  3095 					}
       
  3096 				}
       
  3097 			}
       
  3098 		while(bits<<=1);
       
  3099 		if(NKern::FlashSystem() && aPageInfo->CheckModified(currentThread))
       
  3100 			return; // page was modified by another thread
       
  3101 		asid |= 31;
       
  3102 		}
       
  3103 	while(asid<lastAsid);
       
  3104 	}
       
  3105 
       
  3106 
       
  3107 void MemModelDemandPaging::SetOld(SPageInfo* aPageInfo)
       
  3108 	{
       
  3109 	__ASSERT_SYSTEM_LOCK;
       
  3110 	__NK_ASSERT_DEBUG(aPageInfo->State() == SPageInfo::EStatePagedOld);
       
  3111 
       
  3112 	SPageInfo::TType type = aPageInfo->Type();
       
  3113 
       
  3114 	if(type==SPageInfo::EPagedROM)
       
  3115 		{
       
  3116 		// get linear address of page...
       
  3117 		TInt offset = aPageInfo->Offset()<<KPageShift;
       
  3118 		__NK_ASSERT_DEBUG(TUint(offset)<iRomSize);
       
  3119 
       
  3120 		// make page inaccessible...
       
  3121 		TLinAddr lin = iRomLinearBase+offset;
       
  3122 		TPte* pt = PtePtrFromLinAddr(lin);
       
  3123 		MakeGlobalPTEInaccessible(pt, *pt&~KPtePresentMask, lin);
       
  3124 		}
       
  3125 	else if(type==SPageInfo::EPagedCode)
       
  3126 		{
       
  3127 		START_PAGING_BENCHMARK;
       
  3128 
       
  3129 		// get linear address of page...
       
  3130 		TInt offset = aPageInfo->Offset()<<KPageShift;
       
  3131 		__NK_ASSERT_DEBUG(TUint(offset)<iCodeSize);
       
  3132 		TLinAddr lin = iCodeLinearBase+offset;
       
  3133 			
       
  3134 		// get CodeSegMemory...
       
  3135 		DMemModelCodeSegMemory* codeSegMemory = (DMemModelCodeSegMemory*)aPageInfo->Owner();
       
  3136 		__NK_ASSERT_DEBUG(codeSegMemory && codeSegMemory->iPages && codeSegMemory->iIsDemandPaged);
       
  3137 
       
  3138 #ifdef _DEBUG
       
  3139 		TInt pageNumber = (lin - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
       
  3140 		__NK_ASSERT_DEBUG(codeSegMemory->iPages[pageNumber] == aPageInfo->PhysAddr());
       
  3141 #endif
       
  3142 
       
  3143 		// make page inaccessible...
       
  3144 		DoSetCodeOld(aPageInfo,codeSegMemory,lin);
       
  3145 		
       
  3146 		END_PAGING_BENCHMARK(this, EPagingBmSetCodePageOld);
       
  3147 		}
       
  3148 	else if(type==SPageInfo::EPagedCache)
       
  3149 		{
       
  3150 		// leave page accessible
       
  3151 		}
       
  3152 	else if(type!=SPageInfo::EPagedFree)
       
  3153 		{
       
  3154 		__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetOld() with bad page type = %d",aPageInfo->Type()));
       
  3155 		Panic(EUnexpectedPageType);
       
  3156 		}
       
  3157 	NKern::FlashSystem();
       
  3158 	}
       
  3159 
       
  3160 
       
  3161 void DoSetCodeFree(SPageInfo* aPageInfo, TPhysAddr aPhysAddr, DMemModelCodeSegMemory* aCodeSegMemory, TLinAddr aLinAddr)
       
  3162 	{
       
  3163 	NThread* currentThread = NKern::CurrentThread();
       
  3164 	aPageInfo->SetModifier(currentThread);
       
  3165 	// scan all address spaces...
       
  3166 	TInt asid = -1;
       
  3167 	TInt lastAsid = KArmV6NumAsids-1;
       
  3168 	TUint32* ptr = aCodeSegMemory->iOsAsids->iMap;
       
  3169 	do
       
  3170 		{
       
  3171 		TUint32 bits = *ptr++;
       
  3172 		do
       
  3173 			{
       
  3174 			++asid;
       
  3175 			if(bits&0x80000000u)
       
  3176 				{
       
  3177 				// codeseg is mapped in this address space, so update PTE...
       
  3178 				TPte* pt = PtePtrFromLinAddr(aLinAddr,asid);
       
  3179 				TPte pte = *pt;
       
  3180 				if (pte!=KPteNotPresentEntry && (pte&~KPageMask) == aPhysAddr)
       
  3181 					MakePTEInaccessible(pt, KPteNotPresentEntry, aLinAddr, asid);
       
  3182 				}
       
  3183 			}
       
  3184 		while(bits<<=1);
       
  3185 		if(NKern::FlashSystem())
       
  3186 			{
       
  3187 			// nobody else should modify page!
       
  3188 			__NK_ASSERT_DEBUG(!aPageInfo->CheckModified(currentThread));
       
  3189 			}
       
  3190 		asid |= 31;
       
  3191 		}
       
  3192 	while(asid<lastAsid);
       
  3193 	}
       
  3194 
       
  3195 
       
  3196 void MemModelDemandPaging::SetFree(SPageInfo* aPageInfo)
       
  3197 	{
       
  3198 	__ASSERT_SYSTEM_LOCK;
       
  3199 	__ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
       
  3200 	__NK_ASSERT_DEBUG(aPageInfo->State() == SPageInfo::EStatePagedDead);
       
  3201 	if(aPageInfo->LockCount())
       
  3202 		Panic(ERamPageLocked);
       
  3203 
       
  3204 	SPageInfo::TType type = aPageInfo->Type();
       
  3205 	TPhysAddr phys = aPageInfo->PhysAddr();
       
  3206 
       
  3207 	if(type==SPageInfo::EPagedROM)
       
  3208 		{
       
  3209 		// get linear address of page...
       
  3210 		TInt offset = aPageInfo->Offset()<<KPageShift;
       
  3211 		__NK_ASSERT_DEBUG(TUint(offset)<iRomSize);
       
  3212 		TLinAddr lin = iRomLinearBase+offset;
       
  3213 
       
  3214 		// unmap it...
       
  3215 		TPte* pt = PtePtrFromLinAddr(lin);
       
  3216 		MakeGlobalPTEInaccessible(pt, KPteNotPresentEntry, lin);
       
  3217 
       
  3218 #ifdef BTRACE_PAGING
       
  3219 		BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutROM,phys,lin);
       
  3220 #endif
       
  3221 		}
       
  3222 	else if(type==SPageInfo::EPagedCode)
       
  3223 		{
       
  3224 		START_PAGING_BENCHMARK;
       
  3225 		
       
  3226 		// get linear address of page...
       
  3227 		TInt offset = aPageInfo->Offset()<<KPageShift;
       
  3228 		__NK_ASSERT_DEBUG(TUint(offset)<iCodeSize);
       
  3229 		TLinAddr lin = iCodeLinearBase+offset;
       
  3230 
       
  3231 		// get CodeSegMemory...
       
  3232 		// NOTE, this cannot die because we hold the RamAlloc mutex, and the CodeSegMemory
       
  3233 		// destructor also needs this mutex to do it's cleanup...
       
  3234 		DMemModelCodeSegMemory* codeSegMemory = (DMemModelCodeSegMemory*)aPageInfo->Owner();
       
  3235 		__NK_ASSERT_DEBUG(codeSegMemory && codeSegMemory->iPages && codeSegMemory->iIsDemandPaged);
       
  3236 
       
  3237 		// remove page from CodeSegMemory (must come before System Lock is released)...
       
  3238 		TInt pageNumber = (lin - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
       
  3239 		__NK_ASSERT_DEBUG(codeSegMemory->iPages[pageNumber] == aPageInfo->PhysAddr());
       
  3240 		codeSegMemory->iPages[pageNumber] = KPhysAddrInvalid;
       
  3241 		
       
  3242 		// unmap page from all processes it's mapped into...
       
  3243 		DoSetCodeFree(aPageInfo,phys,codeSegMemory,lin);
       
  3244 
       
  3245 		END_PAGING_BENCHMARK(this, EPagingBmSetCodePageFree);
       
  3246 #ifdef BTRACE_PAGING
       
  3247 		BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutCode,phys,lin);
       
  3248 #endif
       
  3249 		}
       
  3250 	else if(type==SPageInfo::EPagedCache)
       
  3251 		{
       
  3252 		// get linear address of page...
       
  3253 		TInt offset = aPageInfo->Offset()<<KPageShift;
       
  3254 		DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
       
  3255 		__NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iMaxSize));
       
  3256 		TLinAddr lin = ((TLinAddr)chunk->iBase)+offset;
       
  3257 
       
  3258 		// unmap it...
       
  3259 		TInt asid = ((DMemModelProcess*)chunk->iOwningProcess)->iOsAsid;
       
  3260 		TPte* pt = PtePtrFromLinAddr(lin,asid);
       
  3261 		*pt = KPteNotPresentEntry;
       
  3262 		CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
       
  3263 
       
  3264 		InvalidateTLBForPage(lin,asid);
       
  3265 
       
  3266 		// actually decommit it from chunk...
       
  3267 		TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift;
       
  3268 		SPageTableInfo& ptinfo=Mmu().iPtInfo[ptid];
       
  3269 		if(!--ptinfo.iCount)
       
  3270 			{
       
  3271 			chunk->iPageTables[offset>>KChunkShift] = 0xffff;
       
  3272 			NKern::UnlockSystem();
       
  3273 			Mmu().DoUnassignPageTable(lin, (TAny*)asid);
       
  3274 			Mmu().FreePageTable(ptid);
       
  3275 			NKern::LockSystem();
       
  3276 			}
       
  3277 
       
  3278 #ifdef BTRACE_PAGING
       
  3279 		BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutCache,phys,lin);
       
  3280 #endif
       
  3281 		}
       
  3282 	else if(type==SPageInfo::EPagedFree)
       
  3283 		{
       
  3284 		// already free...
       
  3285 #ifdef BTRACE_PAGING
       
  3286 		BTraceContext4(BTrace::EPaging,BTrace::EPagingPageOutFree,phys);
       
  3287 #endif
       
  3288 		// fall through to cache purge code because cache may not have been
       
  3289 		// cleaned for this page if PageUnmapped called
       
  3290 		}
       
  3291 	else
       
  3292 		{
       
  3293 		__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type()));
       
  3294 		Panic(EUnexpectedPageType);
       
  3295 		return;
       
  3296 		}
       
  3297 
       
  3298 	NKern::UnlockSystem();
       
  3299 
       
  3300 	// purge cache for page...
       
  3301 	TInt colour = aPageInfo->Offset()&KPageColourMask;
       
  3302 	TPte& pte=iPurgePte[colour];
       
  3303 	TLinAddr va=iPurgeAddr+(colour<<KPageShift);
       
  3304 	pte=phys|SP_PTE(KArmV6PermRWNO, TheMmu.iCacheMaintenanceTempMapAttr, 1, 1);
       
  3305 	CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
       
  3306 
       
  3307 	CacheMaintenance::PageToReuse(va,EMemAttNormalCached, KPhysAddrInvalid);
       
  3308 
       
  3309 	pte=0;
       
  3310 	CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
       
  3311 	InvalidateTLBForPage(va,KERNEL_MAPPING);
       
  3312 
       
  3313 	NKern::LockSystem();
       
  3314 	}
       
  3315 
       
  3316 
       
  3317 void MemModelDemandPaging::NotifyPageFree(TPhysAddr aPage)
       
  3318 	{
       
  3319 	__KTRACE_OPT(KPAGING, Kern::Printf("MemModelDemandPaging::NotifyPageFree %08x", aPage));
       
  3320 	__ASSERT_SYSTEM_LOCK;
       
  3321 
       
  3322 	SPageInfo* pageInfo = SPageInfo::FromPhysAddr(aPage);
       
  3323 	__ASSERT_DEBUG(pageInfo->Type()==SPageInfo::EPagedCode, MM::Panic(MM::EUnexpectedPageType));
       
  3324 	RemovePage(pageInfo);
       
  3325 	SetFree(pageInfo);
       
  3326 	AddAsFreePage(pageInfo);
       
  3327 	}
       
  3328 
       
  3329 
       
  3330 TInt MemModelDemandPaging::Fault(TAny* aExceptionInfo)
       
  3331 	{
       
  3332 	TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo;
       
  3333 
       
  3334 	// Get faulting address
       
  3335 	TLinAddr faultAddress = exc.iFaultAddress;
       
  3336 	if(exc.iExcCode==EArmExceptionDataAbort)
       
  3337 		{
       
  3338 		// Let writes take an exception rather than page in any memory...
       
  3339 		if(exc.iFaultStatus&(1<<11))
       
  3340 			return KErrUnknown;
       
  3341 		}
       
  3342 	else if (exc.iExcCode != EArmExceptionPrefetchAbort)
       
  3343 		return KErrUnknown; // Not prefetch or data abort
       
  3344 	
       
  3345 	// Only handle page translation faults
       
  3346 	if((exc.iFaultStatus & 0x40f) != 0x7)
       
  3347 		return KErrUnknown;
       
  3348 
       
  3349 	DMemModelThread* thread = (DMemModelThread*)TheCurrentThread;
       
  3350 
       
  3351 	// check which ragion fault occured in...
       
  3352 	TInt asid = 0; // asid != 0 => code paging fault
       
  3353 	if(TUint(faultAddress-iRomPagedLinearBase)<iRomPagedSize)
       
  3354 		{
       
  3355 		// in ROM
       
  3356 		}
       
  3357 	else if(TUint(faultAddress-iCodeLinearBase)<iCodeSize)
       
  3358 		{
       
  3359 		// in code
       
  3360 		asid = ((DMemModelProcess*)TheScheduler.iAddressSpace)->iOsAsid;
       
  3361 		}
       
  3362 	else if (thread->iAliasLinAddr && TUint(faultAddress - thread->iAliasLinAddr) < TUint(KPageSize))
       
  3363 		{
       
  3364 		// in aliased memory
       
  3365 		faultAddress = (faultAddress - thread->iAliasLinAddr) + thread->iAliasTarget;
       
  3366 		if(TUint(faultAddress-iCodeLinearBase)>=iCodeSize)
       
  3367 			return KErrUnknown; // not in alias of code
       
  3368 		asid = thread->iAliasOsAsid;
       
  3369 		__NK_ASSERT_DEBUG(asid != 0);
       
  3370 		}
       
  3371 	else
       
  3372 		return KErrUnknown; // Not in pageable region
       
  3373 
       
  3374 	// Check if thread holds fast mutex and claim system lock
       
  3375 	NFastMutex* fm = NKern::HeldFastMutex();
       
  3376 	TPagingExcTrap* trap = thread->iPagingExcTrap;
       
  3377 	if(!fm)
       
  3378 		NKern::LockSystem();
       
  3379 	else
       
  3380 		{
       
  3381 		if(!trap || fm!=&TheScheduler.iLock)
       
  3382 			{
       
  3383 			__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: Fault with FM Held! %x (%O pc=%x)",faultAddress,&Kern::CurrentThread(),exc.iR15));
       
  3384 			Panic(EPageFaultWhilstFMHeld); // Not allowed to hold mutexes
       
  3385 			}
       
  3386 		// restore address space on multiple memory model (because the trap will
       
  3387 		// bypass any code which would have done this.)...
       
  3388 		DMemModelThread::RestoreAddressSpace();
       
  3389 
       
  3390 		// Current thread already has the system lock...
       
  3391 		NKern::FlashSystem(); // Let someone else have a go with the system lock.
       
  3392 		}
       
  3393 
       
  3394 	// System locked here
       
  3395 
       
  3396 	TInt r = KErrNone;	
       
  3397 	if(thread->IsRealtime())
       
  3398 		r = CheckRealtimeThreadFault(thread, aExceptionInfo);
       
  3399 	if (r == KErrNone)
       
  3400 		r = HandleFault(exc, faultAddress, asid);
       
  3401 	
       
  3402 	// Restore system lock state
       
  3403 	if (fm != NKern::HeldFastMutex())
       
  3404 		{
       
  3405 		if (fm)
       
  3406 			NKern::LockSystem();
       
  3407 		else
       
  3408 			NKern::UnlockSystem();
       
  3409 		}
       
  3410 	
       
  3411 	// Deal with XTRAP_PAGING
       
  3412 	if(r == KErrNone && trap)
       
  3413 		{
       
  3414 		trap->Exception(1); // Return from exception trap with result '1' (value>0)
       
  3415 		// code doesn't continue beyond this point.
       
  3416 		}
       
  3417 
       
  3418 	return r;
       
  3419 	}
       
  3420 
       
  3421 
       
  3422 
       
  3423 TInt MemModelDemandPaging::HandleFault(TArmExcInfo& aExc, TLinAddr aFaultAddress, TInt aAsid)
       
  3424 	{
       
  3425 	++iEventInfo.iPageFaultCount;
       
  3426 
       
  3427 	// get page table entry...
       
  3428 	TPte* pt = SafePtePtrFromLinAddr(aFaultAddress, aAsid);
       
  3429 	if(!pt)
       
  3430 		return KErrNotFound;
       
  3431 	TPte pte = *pt;
       
  3432 
       
  3433 	// Do what is required to make page accessible...
       
  3434 
       
  3435 	if(pte&KPtePresentMask)
       
  3436 		{
       
  3437 		// PTE is present, so assume it has already been dealt with
       
  3438 #ifdef BTRACE_PAGING
       
  3439 		BTraceContext12(BTrace::EPaging,BTrace::EPagingPageNop,pte&~KPageMask,aFaultAddress,aExc.iR15);
       
  3440 #endif
       
  3441 		return KErrNone;
       
  3442 		}
       
  3443 
       
  3444 	if(pte!=KPteNotPresentEntry)
       
  3445 		{
       
  3446 		// PTE alread has a page
       
  3447 		SPageInfo* pageInfo = SPageInfo::FromPhysAddr(pte);
       
  3448 		if(pageInfo->State()==SPageInfo::EStatePagedDead)
       
  3449 			{
       
  3450 			// page currently being unmapped, so do that here...
       
  3451 			MakePTEInaccessible(pt, KPteNotPresentEntry, aFaultAddress, aAsid);
       
  3452 			}
       
  3453 		else
       
  3454 			{
       
  3455 			// page just needs making young again...
       
  3456 			*pt = TPte(pte|KArmV6PteSmallPage); // Update page table
       
  3457 			CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
       
  3458 			Rejuvenate(pageInfo);
       
  3459 #ifdef BTRACE_PAGING
       
  3460 			BTraceContext12(BTrace::EPaging,BTrace::EPagingRejuvenate,pte&~KPageMask,aFaultAddress,aExc.iR15);
       
  3461 #endif
       
  3462 			return KErrNone;
       
  3463 			}
       
  3464 		}
       
  3465 
       
  3466 	// PTE not present, so page it in...
       
  3467 	// check if fault in a CodeSeg...
       
  3468 	DMemModelCodeSegMemory* codeSegMemory = NULL;
       
  3469 	if (!aAsid)
       
  3470 		NKern::ThreadEnterCS();
       
  3471 	else
       
  3472 		{
       
  3473 		// find CodeSeg...
       
  3474 		DMemModelCodeSeg* codeSeg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aFaultAddress);
       
  3475 		if (!codeSeg)
       
  3476 			return KErrNotFound;
       
  3477 		codeSegMemory = codeSeg->Memory();
       
  3478 		if (codeSegMemory==0 || !codeSegMemory->iIsDemandPaged || codeSegMemory->iOsAsids->NotFree(aAsid, 1))
       
  3479 			return KErrNotFound;
       
  3480 	
       
  3481 		// check if it's paged in but not yet mapped into this process...			
       
  3482 		TInt pageNumber = (aFaultAddress - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
       
  3483 		TPhysAddr page = codeSegMemory->iPages[pageNumber];
       
  3484 		if (page != KPhysAddrInvalid)
       
  3485 			{
       
  3486 			// map it into this process...
       
  3487 			SPageInfo* pageInfo = SPageInfo::FromPhysAddr(page);
       
  3488 			__NK_ASSERT_DEBUG(pageInfo->State()!=SPageInfo::EStatePagedDead);
       
  3489 			*pt = page | (codeSegMemory->iCreator ? KUserCodeLoadPte : KUserCodeRunPte);
       
  3490 			CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
       
  3491 			Rejuvenate(pageInfo);
       
  3492 #ifdef BTRACE_PAGING
       
  3493 			BTraceContext8(BTrace::EPaging,BTrace::EPagingMapCode,page,aFaultAddress);
       
  3494 #endif
       
  3495 			return KErrNone;
       
  3496 			}
       
  3497 
       
  3498 		// open reference on CodeSegMemory
       
  3499 		NKern::ThreadEnterCS();
       
  3500 #ifdef _DEBUG
       
  3501 		TInt r = 
       
  3502 #endif
       
  3503 				 codeSegMemory->Open();
       
  3504 		__NK_ASSERT_DEBUG(r==KErrNone);
       
  3505 		NKern::FlashSystem();
       
  3506 		}		
       
  3507 
       
  3508 #ifdef BTRACE_PAGING
       
  3509 	BTraceContext8(BTrace::EPaging,BTrace::EPagingPageInBegin,aFaultAddress,aExc.iR15);
       
  3510 #endif
       
  3511 	TInt r = PageIn(aFaultAddress, aAsid, codeSegMemory);
       
  3512 
       
  3513 	NKern::UnlockSystem();
       
  3514 
       
  3515 	if(codeSegMemory)
       
  3516 		codeSegMemory->Close();
       
  3517 
       
  3518 	NKern::ThreadLeaveCS();
       
  3519 	
       
  3520 	return r;
       
  3521 	}
       
  3522 
       
  3523 
       
  3524 TInt MemModelDemandPaging::PageIn(TLinAddr aAddress, TInt aAsid, DMemModelCodeSegMemory* aCodeSegMemory)
       
  3525 	{
       
  3526 	// Get a request object - this may block until one is available
       
  3527 	DPagingRequest* req = AcquireRequestObject();
       
  3528 	
       
  3529 	// Get page table entry
       
  3530 	TPte* pt = SafePtePtrFromLinAddr(aAddress, aAsid);
       
  3531 
       
  3532 	// Check page is still required...
       
  3533 	if(!pt || *pt!=KPteNotPresentEntry)
       
  3534 		{
       
  3535 #ifdef BTRACE_PAGING
       
  3536 		BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
       
  3537 #endif
       
  3538 		ReleaseRequestObject(req);
       
  3539 		return pt ? KErrNone : KErrNotFound;
       
  3540 		}
       
  3541 
       
  3542 	++iEventInfo.iPageInReadCount;
       
  3543 
       
  3544 	// Get a free page
       
  3545 	SPageInfo* pageInfo = AllocateNewPage();
       
  3546 	__NK_ASSERT_DEBUG(pageInfo);
       
  3547 
       
  3548 	// Get physical address of free page
       
  3549 	TPhysAddr phys = pageInfo->PhysAddr();
       
  3550 	__NK_ASSERT_DEBUG(phys!=KPhysAddrInvalid);
       
  3551 
       
  3552 	// Temporarily map free page
       
  3553 	TInt colour = (aAddress>>KPageShift)&KPageColourMask;
       
  3554 	__NK_ASSERT_DEBUG((req->iLoadAddr & (KPageColourMask << KPageShift)) == 0);
       
  3555 	req->iLoadAddr |= colour << KPageShift;
       
  3556 	TLinAddr loadAddr = req->iLoadAddr;
       
  3557 	pt = req->iLoadPte+colour;
       
  3558 //	*pt = phys | SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTWAWTWA, 0, 1);
       
  3559 	*pt = phys | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1);
       
  3560 	CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
       
  3561 
       
  3562 	// Read page from backing store
       
  3563 	aAddress &= ~KPageMask;	
       
  3564 	NKern::UnlockSystem();
       
  3565 
       
  3566 	TInt r;
       
  3567 	if (!aCodeSegMemory)
       
  3568 		r = ReadRomPage(req, aAddress);
       
  3569 	else
       
  3570 		{
       
  3571 		r = ReadCodePage(req, aCodeSegMemory, aAddress);
       
  3572 		if (r == KErrNone)
       
  3573 			aCodeSegMemory->ApplyCodeFixups((TUint32*)loadAddr, aAddress);
       
  3574 		}
       
  3575 	if(r!=KErrNone)
       
  3576 		Panic(EPageInFailed);
       
  3577 	
       
  3578 	// make caches consistant...
       
  3579 //	Cache::IMB_Range(loadAddr, KPageSize);
       
  3580 	*pt = phys | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);
       
  3581 	CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
       
  3582 	InvalidateTLBForPage(loadAddr,KERNEL_MAPPING);
       
  3583 	CacheMaintenance::CodeChanged(loadAddr, KPageSize, CacheMaintenance::ECPUUncached);
       
  3584 
       
  3585 	NKern::LockSystem();
       
  3586 
       
  3587 	// Invalidate temporary mapping
       
  3588 	MakeGlobalPTEInaccessible(pt, KPteNotPresentEntry, loadAddr);
       
  3589 
       
  3590 	// Release request object now we're finished with it
       
  3591 	req->iLoadAddr &= ~(KPageColourMask << KPageShift);
       
  3592 	ReleaseRequestObject(req);
       
  3593 	
       
  3594 	// Get page table entry
       
  3595 	pt = SafePtePtrFromLinAddr(aAddress, aAsid);
       
  3596 
       
  3597 	// Check page still needs updating
       
  3598 	TBool notNeeded = pt==0 || *pt!=KPteNotPresentEntry;
       
  3599 	if(aCodeSegMemory)
       
  3600 		notNeeded |= aCodeSegMemory->iOsAsids->NotFree(aAsid, 1);
       
  3601 	if(notNeeded)
       
  3602 		{
       
  3603 		// We don't need the new page after all, so put it on the active list as a free page
       
  3604 		__KTRACE_OPT(KPAGING,Kern::Printf("DP: PageIn (New page not used)"));
       
  3605 #ifdef BTRACE_PAGING
       
  3606 		BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
       
  3607 #endif
       
  3608 		AddAsFreePage(pageInfo);
       
  3609 		return pt ? KErrNone : KErrNotFound;
       
  3610 		}
       
  3611 
       
  3612 	// Update page info
       
  3613 	if (!aCodeSegMemory)
       
  3614 		pageInfo->SetPagedROM((aAddress-iRomLinearBase)>>KPageShift);
       
  3615 	else
       
  3616 		{
       
  3617 		// Check if page has been paged in and mapped into another process while we were waiting
       
  3618 		TInt pageNumber = (aAddress - aCodeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
       
  3619 		TPhysAddr page = aCodeSegMemory->iPages[pageNumber];
       
  3620 		if (page != KPhysAddrInvalid)
       
  3621 			{
       
  3622 			// don't need page we've just paged in...
       
  3623 			AddAsFreePage(pageInfo);
       
  3624 
       
  3625 			// map existing page into this process...
       
  3626 			pageInfo = SPageInfo::FromPhysAddr(page);
       
  3627 			__NK_ASSERT_DEBUG(pageInfo->State()!=SPageInfo::EStatePagedDead);
       
  3628 			*pt = page | (aCodeSegMemory->iCreator ? KUserCodeLoadPte : KUserCodeRunPte);
       
  3629 			CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
       
  3630 #ifdef BTRACE_PAGING
       
  3631 			BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
       
  3632 #endif
       
  3633 			Rejuvenate(pageInfo);
       
  3634 			return KErrNone;
       
  3635 			}
       
  3636 		aCodeSegMemory->iPages[pageNumber] = phys;
       
  3637 		
       
  3638 		pageInfo->SetPagedCode(aCodeSegMemory,(aAddress-Mmu().iUserCodeBase)>>KPageShift);
       
  3639 		}
       
  3640 
       
  3641 	// Map page into final location	
       
  3642 	*pt = phys | (aCodeSegMemory ? (aCodeSegMemory->iCreator ? KUserCodeLoadPte : KUserCodeRunPte) : KRomPtePerm);
       
  3643 	CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
       
  3644 #ifdef BTRACE_PAGING
       
  3645 	TInt subCat = aCodeSegMemory ? BTrace::EPagingPageInCode : BTrace::EPagingPageInROM;
       
  3646 	BTraceContext8(BTrace::EPaging,subCat,phys,aAddress);
       
  3647 #endif
       
  3648 
       
  3649 	AddAsYoungest(pageInfo);
       
  3650 	BalanceAges();
       
  3651 
       
  3652 	return KErrNone;
       
  3653 	}
       
  3654 
       
  3655 
       
  3656 inline TUint8 ReadByte(TLinAddr aAddress)
       
  3657 	{ return *(volatile TUint8*)aAddress; }
       
  3658 
       
  3659 
       
  3660 TInt MemModelDemandPaging::EnsurePagePresent(TLinAddr aPage, DProcess* aProcess)
       
  3661 	{
       
  3662 	TInt r = KErrBadDescriptor;
       
  3663 	XTRAPD(exc,XT_DEFAULT,
       
  3664 		if (!aProcess)
       
  3665 			{
       
  3666 			XTRAP_PAGING_RETRY(CHECK_PAGING_SAFE; ReadByte(aPage););
       
  3667 			r = KErrNone;
       
  3668 			}
       
  3669 		else
       
  3670 			{
       
  3671 			DMemModelThread& t=*(DMemModelThread*)TheCurrentThread;
       
  3672 		retry:
       
  3673 			TInt pagingFault;
       
  3674 			XTRAP_PAGING_START(pagingFault);
       
  3675 			CHECK_PAGING_SAFE;
       
  3676 			// make alias of page in this process
       
  3677 			TLinAddr alias_src;
       
  3678 			TInt alias_size;
       
  3679 			TInt aliasResult = t.Alias(aPage, (DMemModelProcess*)aProcess, 1, EMapAttrReadUser, alias_src, alias_size);
       
  3680 			if (aliasResult>=0)
       
  3681 				{
       
  3682 				// ensure page to be locked is mapped in, by reading from it...
       
  3683 				ReadByte(alias_src);
       
  3684 				r = KErrNone;
       
  3685 				}
       
  3686 			XTRAP_PAGING_END;
       
  3687 			t.RemoveAlias();
       
  3688 			if(pagingFault>0)
       
  3689 				goto retry;
       
  3690 			}
       
  3691 		); // end of XTRAPD
       
  3692 	if(exc)
       
  3693 		return KErrBadDescriptor;
       
  3694 	return r;
       
  3695 	}
       
  3696 
       
  3697 
       
  3698 TPhysAddr MemModelDemandPaging::LinearToPhysical(TLinAddr aPage, DProcess* aProcess)
       
  3699 	{
       
  3700 	TInt asid = 0;
       
  3701 	if (aProcess)
       
  3702 		asid = ((DMemModelProcess*)aProcess)->iOsAsid;
       
  3703 	return Mmu().LinearToPhysical(aPage, asid);
       
  3704 	}
       
  3705 
       
  3706 
       
  3707 TInt MemModelDemandPaging::PageState(TLinAddr aAddr)
       
  3708 	{
       
  3709 	DMemModelProcess* process = (DMemModelProcess*)TheCurrentThread->iOwningProcess;
       
  3710 	TInt asid = 0;
       
  3711 	TPte* ptePtr = 0;
       
  3712 	TPte pte = 0;
       
  3713 	TInt r = 0;
       
  3714 	SPageInfo* pageInfo = NULL;
       
  3715 
       
  3716 	NKern::LockSystem();
       
  3717 
       
  3718 	DMemModelCodeSegMemory* codeSegMemory = 0;
       
  3719 	if(TUint(aAddr-iRomPagedLinearBase)<iRomPagedSize)
       
  3720 		r |= EPageStateInRom;
       
  3721 	else if (TUint(aAddr-iCodeLinearBase)<iCodeSize)
       
  3722 		{
       
  3723 		DMemModelCodeSeg* codeSeg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aAddr);
       
  3724 		if(codeSeg)
       
  3725 			codeSegMemory = codeSeg->Memory();
       
  3726 		asid = process->iOsAsid;
       
  3727 		if (codeSegMemory && codeSegMemory->iOsAsids->NotAllocated(asid, 1))
       
  3728 			{
       
  3729 			r |= EPageStateInRamCode;
       
  3730 			if (codeSegMemory->iIsDemandPaged)
       
  3731 				r |= EPageStatePaged;
       
  3732 			}
       
  3733 		if(process->iCodeChunk)
       
  3734 			r |= EPageStateCodeChunkPresent;
       
  3735 		}
       
  3736 
       
  3737 	ptePtr = SafePtePtrFromLinAddr(aAddr,asid);
       
  3738 	if (!ptePtr)
       
  3739 		goto done;
       
  3740 	r |= EPageStatePageTablePresent;
       
  3741 	pte = *ptePtr;
       
  3742 	if (pte == KPteNotPresentEntry)
       
  3743 		goto done;		
       
  3744 	r |= EPageStatePtePresent;
       
  3745 	if (pte & KPtePresentMask)
       
  3746 		r |= EPageStatePteValid;
       
  3747 	
       
  3748 	pageInfo = SPageInfo::FromPhysAddr(pte);
       
  3749 	r |= pageInfo->Type();
       
  3750 	r |= pageInfo->State()<<8;
       
  3751 
       
  3752 	if (codeSegMemory && codeSegMemory->iPages)
       
  3753 		{
       
  3754 		TPhysAddr phys = pte & ~KPageMask;
       
  3755 		TInt pageNumber = (aAddr - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
       
  3756 		if (codeSegMemory->iPages[pageNumber] == phys)
       
  3757 			r |= EPageStatePhysAddrPresent;
       
  3758 		}
       
  3759 
       
  3760 done:
       
  3761 	NKern::UnlockSystem();
       
  3762 	return r;
       
  3763 	}
       
  3764 
       
  3765 
       
  3766 TBool MemModelDemandPaging::NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength)
       
  3767 	{
       
  3768 	// Don't check mutex order for reads from global area, except for the paged part of rom
       
  3769 	TBool rangeInGlobalArea = aStartAddr >= KRomLinearBase;
       
  3770 	TBool rangeInPagedRom = iRomPagedLinearBase != 0 && aStartAddr < (iRomLinearBase + iRomSize) && (aStartAddr + aLength) > iRomPagedLinearBase;
       
  3771 	return !rangeInGlobalArea || rangeInPagedRom;
       
  3772 	}
       
  3773 
       
  3774 
       
  3775 EXPORT_C TBool DDemandPagingLock::Lock(DThread* aThread, TLinAddr aStart, TInt aSize)
       
  3776 	{
       
  3777 	MemModelDemandPaging* pager = (MemModelDemandPaging*)iThePager;
       
  3778 	if(pager)
       
  3779 		{
       
  3780 		ArmMmu& m = pager->Mmu();
       
  3781 		TLinAddr end = aStart+aSize;
       
  3782 		
       
  3783 		if ((aStart < TUint(pager->iRomPagedLinearBase+pager->iRomPagedSize) && end > pager->iRomPagedLinearBase) ||
       
  3784 			(aStart < TUint(m.iUserCodeBase + m.iMaxUserCodeSize) && end > m.iUserCodeBase))
       
  3785 			return pager->ReserveLock(aThread,aStart,aSize,*this);
       
  3786 		}
       
  3787 	return EFalse;
       
  3788 	}
       
  3789 
       
  3790 void ArmMmu::DisablePageModification(DMemModelChunk* aChunk, TInt aOffset)
       
  3791 //
       
  3792 // Mark the page at aOffset in aChunk read-only to prevent it being
       
  3793 // modified while defrag is in progress. Save the required information
       
  3794 // to allow the fault handler to deal with this.
       
  3795 // Call this with the system unlocked.
       
  3796 //
       
  3797 	{
       
  3798 	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DisablePageModification() offset=%08x", aOffset));
       
  3799 
       
  3800 	TInt ptid = aChunk->iPageTables[aOffset>>KChunkShift];
       
  3801 	if(ptid == 0xffff)
       
  3802 		Panic(EDefragDisablePageFailed);	
       
  3803 
       
  3804 	NKern::LockSystem();
       
  3805 	TPte* pPte = PageTable(ptid) + ((aOffset&KChunkMask)>>KPageShift);
       
  3806 	TPte pte = *pPte;
       
  3807 	if ((pte & KArmV6PteSmallPage) != KArmV6PteSmallPage 
       
  3808 			|| SP_PTE_PERM_GET(pte) != (TUint)KArmV6PermRWRW)
       
  3809 		Panic(EDefragDisablePageFailed);
       
  3810 
       
  3811 	iDisabledAddr = (TLinAddr)(aChunk->iBase) + aOffset;
       
  3812 	if (aChunk->iOwningProcess)
       
  3813 		iDisabledAddrAsid = ((DMemModelProcess*)(aChunk->iOwningProcess))->iOsAsid;
       
  3814 	else
       
  3815 		iDisabledAddrAsid = iDisabledAddr<KRomLinearBase ? UNKNOWN_MAPPING : KERNEL_MAPPING;
       
  3816 	iDisabledPte = pPte;
       
  3817 	iDisabledOldVal = pte;
       
  3818 
       
  3819 	*pPte = SP_PTE_PERM_SET(pte, KArmV6PermRORO);
       
  3820 	CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
       
  3821 	InvalidateTLBForPage(iDisabledAddr, iDisabledAddrAsid);
       
  3822 	NKern::UnlockSystem();
       
  3823 	}
       
  3824 
       
  3825 TInt ArmMmu::RamDefragFault(TAny* aExceptionInfo)
       
  3826 	{
       
  3827 	TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo;
       
  3828 
       
  3829 	// Get faulting address
       
  3830 	TLinAddr faultAddress;
       
  3831 	if(exc.iExcCode==EArmExceptionDataAbort)
       
  3832 		{
       
  3833 		faultAddress = exc.iFaultAddress;
       
  3834 		// Defrag can only cause writes to fault on multiple model
       
  3835 		if(!(exc.iFaultStatus&(1<<11)))
       
  3836 			return KErrUnknown;
       
  3837 		}
       
  3838 	else
       
  3839 		return KErrUnknown; // Not data abort
       
  3840 
       
  3841 	// Only handle page permission faults
       
  3842 	if((exc.iFaultStatus & 0x40f) != 0xf)
       
  3843 		return KErrUnknown;
       
  3844 
       
  3845 	DMemModelThread* thread = (DMemModelThread*)TheCurrentThread;
       
  3846 	TInt asid = ((DMemModelProcess*)TheScheduler.iAddressSpace)->iOsAsid;
       
  3847 
       
  3848 	TBool aliased = EFalse;
       
  3849 	if (thread->iAliasLinAddr && TUint(faultAddress - thread->iAliasLinAddr) < TUint(KPageSize))
       
  3850 		{
       
  3851 		// in aliased memory
       
  3852 		aliased = ETrue;
       
  3853 		faultAddress = (faultAddress - thread->iAliasLinAddr) + thread->iAliasTarget;
       
  3854 		asid = thread->iAliasOsAsid;
       
  3855 		__NK_ASSERT_DEBUG(asid != 0);
       
  3856 		}
       
  3857 
       
  3858 	// Take system lock if not already held
       
  3859 	NFastMutex* fm = NKern::HeldFastMutex();
       
  3860 	if(!fm)
       
  3861 		NKern::LockSystem();
       
  3862 	else if(fm!=&TheScheduler.iLock)
       
  3863 		{
       
  3864 		__KTRACE_OPT2(KMMU,KPANIC,Kern::Printf("Defrag: Fault with FM Held! %x (%O pc=%x)",faultAddress,&Kern::CurrentThread(),exc.iR15));
       
  3865 		Panic(EDefragFaultWhilstFMHeld); // Not allowed to hold mutexes
       
  3866 		}
       
  3867 
       
  3868 	TInt r = KErrUnknown;
       
  3869 
       
  3870 	// check if write access to the page has already been restored and retry if so
       
  3871 	TPte* pt = SafePtePtrFromLinAddr(faultAddress, asid);
       
  3872 	if(!pt)
       
  3873 		{
       
  3874 		r = KErrNotFound;
       
  3875 		goto leave;
       
  3876 		}
       
  3877 	if (SP_PTE_PERM_GET(*pt) == (TUint)KArmV6PermRWRW)
       
  3878 		{
       
  3879 		r = KErrNone;
       
  3880 		goto leave;
       
  3881 		}
       
  3882 
       
  3883 	// check if the fault occurred in the page we are moving
       
  3884 	if (	   iDisabledPte
       
  3885 			&& TUint(faultAddress - iDisabledAddr) < TUint(KPageSize)
       
  3886 			&& (iDisabledAddrAsid < 0 || asid == iDisabledAddrAsid) )
       
  3887 		{
       
  3888 		// restore access to the page
       
  3889 		*iDisabledPte = iDisabledOldVal;
       
  3890 		CacheMaintenance::SinglePteUpdated((TLinAddr)iDisabledPte);
       
  3891 		InvalidateTLBForPage(iDisabledAddr, iDisabledAddrAsid);
       
  3892 		if (aliased)
       
  3893 			InvalidateTLBForPage(exc.iFaultAddress, ((DMemModelProcess*)TheScheduler.iAddressSpace)->iOsAsid);
       
  3894 		iDisabledAddr = 0;
       
  3895 		iDisabledAddrAsid = -1;
       
  3896 		iDisabledPte = NULL;
       
  3897 		iDisabledOldVal = 0;
       
  3898 		r = KErrNone;
       
  3899 		}
       
  3900 
       
  3901 leave:
       
  3902 	// Restore system lock state
       
  3903 	if (!fm)
       
  3904 		NKern::UnlockSystem();
       
  3905 	
       
  3906 	return r;
       
  3907 	}