kernel/eka/memmodel/epoc/flexible/mmu/mmapping.cpp
changeset 0 a41df078684a
child 4 56f325a607ea
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 //
       
    15 
       
    16 #include <plat_priv.h>
       
    17 #include "mm.h"
       
    18 #include "mmu.h"
       
    19 #include "mmapping.h"
       
    20 #include "mobject.h"
       
    21 #include "maddressspace.h"
       
    22 #include "mptalloc.h"
       
    23 #include "mmanager.h" // needed for DMemoryManager::Pin/Unpin, not nice, but no obvious way to break dependency
       
    24 #include "cache_maintenance.inl"
       
    25 
       
    26 //
       
    27 // DMemoryMapping
       
    28 //
       
    29 
       
    30 DMemoryMapping::DMemoryMapping(TUint aType)
       
    31 	: DMemoryMappingBase(aType)
       
    32 	{
       
    33 	}
       
    34 
       
    35 
       
    36 TInt DMemoryMapping::Construct(TMemoryAttributes aAttributes, TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset)
       
    37 	{
       
    38 	TRACE(("DMemoryMapping[0x%08x]::Construct(0x%x,0x%x,%d,0x%08x,0x%08x,0x%08x)",this,(TUint32&)aAttributes,aFlags,aOsAsid,aAddr,aSize,aColourOffset));
       
    39 
       
    40 	// setup PDE values...
       
    41 	iBlankPde = Mmu::BlankPde(aAttributes);
       
    42 
       
    43 	// setup flags...
       
    44 	if(aFlags&EMappingCreateReserveAllResources)
       
    45 		Flags() |= EPermanentPageTables;
       
    46 
       
    47 	// allocate virtual memory...
       
    48 	TInt r = AllocateVirtualMemory(aFlags,aOsAsid,aAddr,aSize,aColourOffset);
       
    49 	if(r==KErrNone)
       
    50 		{
       
    51 		// add to address space...
       
    52 		TLinAddr addr = iAllocatedLinAddrAndOsAsid&~KPageMask;
       
    53 		TInt osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
       
    54 		r = AddressSpace[osAsid]->AddMapping(addr,this);
       
    55 		if(r!=KErrNone)
       
    56 			FreeVirtualMemory();
       
    57 		}
       
    58 
       
    59 	return r;
       
    60 	}
       
    61 
       
    62 
       
    63 DMemoryMapping::~DMemoryMapping()
       
    64 	{
       
    65 	TRACE(("DMemoryMapping[0x%08x]::~DMemoryMapping()",this));
       
    66 	__NK_ASSERT_DEBUG(!IsAttached());
       
    67 
       
    68 	// remove from address space...
       
    69 	TLinAddr addr = iAllocatedLinAddrAndOsAsid&~KPageMask;
       
    70 	TInt osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
       
    71 	TAny* removed = AddressSpace[osAsid]->RemoveMapping(addr);
       
    72 	if(removed)
       
    73 		__NK_ASSERT_DEBUG(removed==this);
       
    74 
       
    75 	FreeVirtualMemory();
       
    76 	}
       
    77 
       
    78 
       
    79 void DMemoryMapping::BTraceCreate()
       
    80 	{
       
    81 	MmuLock::Lock();
       
    82 	TUint32 data[4] = { iStartIndex, iSizeInPages, OsAsid(), Base() };
       
    83 	BTraceContextN(BTrace::EFlexibleMemModel,BTrace::EMemoryMappingCreate,this,Memory(),data,sizeof(data));
       
    84 	MmuLock::Unlock();
       
    85 	}
       
    86 
       
    87 
       
    88 TInt DMemoryMapping::Map(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions)
       
    89 	{
       
    90 	TRACE(("DMemoryMapping[0x%08x]::Map(0x%08x,0x%x,0x%x,0x%08x)",this,aMemory,aIndex,aCount,aPermissions));
       
    91 	__NK_ASSERT_DEBUG(!IsAttached());
       
    92 
       
    93 	// check reserved resources are compatible (memory objects with reserved resources 
       
    94 	// don't expect to have to allocate memory when mapping new pages),,,
       
    95 	if(aMemory->iFlags&DMemoryObject::EReserveResources && !(Flags()&EPermanentPageTables))
       
    96 		return KErrArgument;
       
    97 
       
    98 	// check arguments for coarse mappings...
       
    99 	if(IsCoarse())
       
   100 		{
       
   101 		if(!aMemory->IsCoarse())
       
   102 			return KErrArgument;
       
   103 		if((aCount|aIndex)&(KChunkMask>>KPageShift))
       
   104 			return KErrArgument;
       
   105 		}
       
   106 
       
   107 	TLinAddr base = iAllocatedLinAddrAndOsAsid&~KPageMask;
       
   108 #ifdef _DEBUG
       
   109 	TUint osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
       
   110 #endif
       
   111 
       
   112 	// check user/supervisor memory partitioning...
       
   113 	if(base<KUserMemoryLimit != (bool)(aPermissions&EUser))
       
   114 		return KErrAccessDenied;
       
   115 
       
   116 	// check mapping doesn't straddle KGlobalMemoryBase or KUserMemoryLimit...
       
   117 	__NK_ASSERT_DEBUG(TUint(KGlobalMemoryBase-base)==0 || TUint(KGlobalMemoryBase-base)>=TUint(aCount<<KPageShift));
       
   118 	__NK_ASSERT_DEBUG(TUint(KUserMemoryLimit-base)==0 || TUint(KUserMemoryLimit-base)>=TUint(aCount<<KPageShift));
       
   119 
       
   120 	// setup attributes...
       
   121 	TBool global = base>=KGlobalMemoryBase;
       
   122 	__NK_ASSERT_DEBUG(global || osAsid!=(TInt)KKernelOsAsid); // prevent non-global memory in kernel process
       
   123 	PteType() =	Mmu::PteType(aPermissions,global);
       
   124 	iBlankPte = Mmu::BlankPte(aMemory->Attributes(),PteType());
       
   125 
       
   126 	// setup base address... 
       
   127 	TUint colourOffset = ((aIndex&KPageColourMask)<<KPageShift);
       
   128 	if(colourOffset+aCount*KPageSize > iAllocatedSize)
       
   129 		return KErrTooBig;
       
   130 	__NK_ASSERT_DEBUG(!iLinAddrAndOsAsid || ((iLinAddrAndOsAsid^iAllocatedLinAddrAndOsAsid)&~(KPageColourMask<<KPageShift))==0); // new, OR, only differ in page colour
       
   131 	iLinAddrAndOsAsid = iAllocatedLinAddrAndOsAsid+colourOffset;
       
   132 
       
   133 	// attach to memory object...
       
   134 	TInt r = Attach(aMemory,aIndex,aCount);
       
   135 
       
   136 	// cleanup if error...
       
   137 	if(r!=KErrNone)
       
   138 		iLinAddrAndOsAsid = 0;
       
   139 
       
   140 	return r;
       
   141 	}
       
   142 
       
   143 
       
   144 void DMemoryMapping::Unmap()
       
   145 	{
       
   146 	Detach();
       
   147 	// we can't clear iLinAddrAndOsAsid here because this may be needed by other code,
       
   148 	// e.g. DFineMapping::MapPages/UnmapPages/RestrictPages/PageIn
       
   149 	}
       
   150 
       
   151 
       
   152 TInt DMemoryMapping::AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset)
       
   153 	{
       
   154 	TRACE(("DMemoryMapping[0x%08x]::AllocateVirtualMemory(0x%x,%d,0x%08x,0x%08x,0x%08x)",this,aFlags,aOsAsid,aAddr,aSize,aColourOffset));
       
   155 	__NK_ASSERT_DEBUG((aAddr&KPageMask)==0);
       
   156 	__NK_ASSERT_DEBUG(!iAllocatedLinAddrAndOsAsid);
       
   157 	__NK_ASSERT_DEBUG(!iAllocatedSize);
       
   158 
       
   159 	// setup PDE type...
       
   160 	TUint pdeType = 0;
       
   161 	if(aFlags&EMappingCreateCommonVirtual)
       
   162 		pdeType |= EVirtualSlabTypeCommonVirtual;
       
   163 	if(aFlags&EMappingCreateDemandPaged)
       
   164 		pdeType |= EVirtualSlabTypeDemandPaged;
       
   165 
       
   166 	TInt r;
       
   167 	TUint colourOffset = aColourOffset&(KPageColourMask<<KPageShift);
       
   168 	TLinAddr addr;
       
   169 	TUint size;
       
   170 	if(aFlags&(EMappingCreateFixedVirtual|EMappingCreateAdoptVirtual))
       
   171 		{
       
   172 		// just use the supplied virtual address...
       
   173 		__NK_ASSERT_ALWAYS(aAddr);
       
   174 		__NK_ASSERT_ALWAYS(colourOffset==0);
       
   175 		__NK_ASSERT_DEBUG((aFlags&EMappingCreateAdoptVirtual)==0 || AddressSpace[aOsAsid]->CheckPdeType(aAddr,aSize,pdeType));
       
   176 		addr = aAddr;
       
   177 		size = aSize;
       
   178 		r = KErrNone;
       
   179 		}
       
   180 	else
       
   181 		{
       
   182 		if(aFlags&(EMappingCreateExactVirtual|EMappingCreateCommonVirtual))
       
   183 			{
       
   184 			__NK_ASSERT_ALWAYS(aAddr); // address must be specified
       
   185 			}
       
   186 		else
       
   187 			{
       
   188 			__NK_ASSERT_ALWAYS(!aAddr); // address shouldn't have been specified
       
   189 			}
       
   190 
       
   191 		// adjust for colour...
       
   192 		TUint allocSize = aSize+colourOffset;
       
   193 		TUint allocAddr = aAddr;
       
   194 		if(allocAddr)
       
   195 			{
       
   196 			allocAddr -= colourOffset;
       
   197 			if(allocAddr&(KPageColourMask<<KPageShift))
       
   198 				return KErrArgument; // wrong colour
       
   199 			}
       
   200 
       
   201 		// allocate virtual addresses...
       
   202 		if(aFlags&EMappingCreateUserGlobalVirtual)
       
   203 			{
       
   204 			if(aOsAsid!=(TInt)KKernelOsAsid)
       
   205 				return KErrArgument;
       
   206 			r = DAddressSpace::AllocateUserGlobalVirtualMemory(addr,size,allocAddr,allocSize,pdeType);
       
   207 			}
       
   208 		else
       
   209 			r = AddressSpace[aOsAsid]->AllocateVirtualMemory(addr,size,allocAddr,allocSize,pdeType);
       
   210 		}
       
   211 
       
   212 	if(r==KErrNone)
       
   213 		{
       
   214 		iAllocatedLinAddrAndOsAsid = addr|aOsAsid;
       
   215 		iAllocatedSize = size;
       
   216 		}
       
   217 
       
   218 	TRACE(("DMemoryMapping[0x%08x]::AllocateVirtualMemory returns %d address=0x%08x",this,r,addr));
       
   219 	return r;
       
   220 	}
       
   221 
       
   222 
       
   223 void DMemoryMapping::FreeVirtualMemory()
       
   224 	{
       
   225 	if(!iAllocatedSize)
       
   226 		return; // no virtual memory to free
       
   227 
       
   228 	TRACE(("DMemoryMapping[0x%08x]::FreeVirtualMemory()",this));
       
   229 
       
   230 	iLinAddrAndOsAsid = 0;
       
   231 
       
   232 	TLinAddr addr = iAllocatedLinAddrAndOsAsid&~KPageMask;
       
   233 	TInt osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
       
   234 	AddressSpace[osAsid]->FreeVirtualMemory(addr,iAllocatedSize);
       
   235 	iAllocatedLinAddrAndOsAsid = 0;
       
   236 	iAllocatedSize = 0;
       
   237 	}
       
   238 
       
   239 
       
   240 
       
   241 //
       
   242 // DCoarseMapping
       
   243 //
       
   244 
       
   245 DCoarseMapping::DCoarseMapping()
       
   246 	: DMemoryMapping(ECoarseMapping)
       
   247 	{
       
   248 	}
       
   249 
       
   250 
       
   251 DCoarseMapping::DCoarseMapping(TUint aFlags)
       
   252 	: DMemoryMapping(ECoarseMapping|aFlags)
       
   253 	{
       
   254 	}
       
   255 
       
   256 
       
   257 DCoarseMapping::~DCoarseMapping()
       
   258 	{
       
   259 	}
       
   260 
       
   261 
       
   262 TInt DCoarseMapping::DoMap()
       
   263 	{
       
   264 	TRACE(("DCoarseMapping[0x%08x]::DoMap()", this));
       
   265 	__NK_ASSERT_DEBUG(((iStartIndex|iSizeInPages)&(KChunkMask>>KPageShift))==0); // be extra paranoid about alignment
       
   266 
       
   267 	MmuLock::Lock();
       
   268 	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),Base());
       
   269 	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we're called from code which has added mapping to memory
       
   270 	
       
   271 	TUint flash = 0;
       
   272 	TUint chunk = iStartIndex >> KPagesInPDEShift;
       
   273 	TUint endChunk = (iStartIndex + iSizeInPages) >> KPagesInPDEShift;
       
   274 	TBool sectionMappingsBroken = EFalse;
       
   275 	
       
   276 	while(chunk < endChunk)
       
   277 		{
       
   278 		MmuLock::Flash(flash,KMaxPdesInOneGo*2);
       
   279 		TPte* pt = memory->GetPageTable(PteType(), chunk);
       
   280 		if(!pt)
       
   281 			{
       
   282 			TRACE2(("!PDE %x=%x (was %x)",pPde,KPdeUnallocatedEntry,*pPde));
       
   283 			__NK_ASSERT_DEBUG(*pPde==KPdeUnallocatedEntry);
       
   284 			}
       
   285 		else
       
   286 			{
       
   287 			TPde pde = Mmu::PageTablePhysAddr(pt)|iBlankPde;
       
   288 			TRACE2(("!PDE %x=%x (was %x)",pPde,pde,*pPde));
       
   289 			if (Mmu::PdeMapsSection(*pPde))
       
   290 				{
       
   291 				// break previous section mapping...
       
   292 				__NK_ASSERT_DEBUG(*pPde==Mmu::PageToSectionEntry(pt[0],iBlankPde));
       
   293 				sectionMappingsBroken = ETrue;
       
   294 				}
       
   295 			else
       
   296 				__NK_ASSERT_DEBUG(*pPde==KPdeUnallocatedEntry || ((*pPde^pde)&~KPdeMatchMask)==0); 
       
   297 			*pPde = pde;
       
   298 			SinglePdeUpdated(pPde);
       
   299 			flash += 3; // increase flash rate because we've done quite a bit more work
       
   300 			}
       
   301 		++pPde;
       
   302 		++chunk;
       
   303 		}
       
   304 	MmuLock::Unlock();
       
   305 
       
   306 	if (sectionMappingsBroken)
       
   307 		{
       
   308 		// We must invalidate the TLB since we broke section mappings created by the bootstrap.
       
   309 		// Since this will only ever happen on boot, we just invalidate the entire TLB for this
       
   310 		// process.
       
   311 		InvalidateTLBForAsid(OsAsid());
       
   312 		}
       
   313 
       
   314 	return KErrNone;
       
   315 	}
       
   316 
       
   317 
       
   318 void DCoarseMapping::DoUnmap()
       
   319 	{
       
   320 	TRACE(("DCoarseMapping[0x%08x]::DoUnmap()", this));
       
   321 	MmuLock::Lock();
       
   322 	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),Base());
       
   323 	TPde* pPdeEnd = pPde+(iSizeInPages>>(KChunkShift-KPageShift));
       
   324 	TUint flash = 0;
       
   325 	do
       
   326 		{
       
   327 		MmuLock::Flash(flash,KMaxPdesInOneGo);
       
   328 		TPde pde = KPdeUnallocatedEntry;
       
   329 		TRACE2(("!PDE %x=%x",pPde,pde));
       
   330 		*pPde = pde;
       
   331 		SinglePdeUpdated(pPde);
       
   332 		++pPde;
       
   333 		}
       
   334 	while(pPde<pPdeEnd);
       
   335 	MmuLock::Unlock();
       
   336 
       
   337 	InvalidateTLBForAsid(OsAsid());
       
   338 	}
       
   339 
       
   340 
       
   341 TInt DCoarseMapping::MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount)
       
   342 	{
       
   343 	// shouldn't ever be called because coarse mappings don't have their own page tables...
       
   344 	__NK_ASSERT_DEBUG(0);
       
   345 	return KErrNotSupported;
       
   346 	}
       
   347 
       
   348 
       
   349 void DCoarseMapping::UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount)
       
   350 	{
       
   351 	// shouldn't ever be called because coarse mappings don't have their own page tables...
       
   352 	__NK_ASSERT_DEBUG(0);
       
   353 	}
       
   354 
       
   355 void DCoarseMapping::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB)
       
   356 	{
       
   357 	// shouldn't ever be called because coarse mappings don't have their own page tables...
       
   358 	__NK_ASSERT_DEBUG(0);
       
   359 	}
       
   360 
       
   361 void DCoarseMapping::RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount)
       
   362 	{
       
   363 	// shouldn't ever be called because coarse mappings don't have their own page tables...
       
   364 	__NK_ASSERT_DEBUG(0);
       
   365 	}
       
   366 
       
   367 
       
   368 TInt DCoarseMapping::PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount)
       
   369 	{
       
   370 	MmuLock::Lock();
       
   371 
       
   372 	if(!IsAttached())
       
   373 		{
       
   374 		MmuLock::Unlock();
       
   375 		return KErrNotFound;
       
   376 		}
       
   377 
       
   378 	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we've checked mapping IsAttached
       
   379 	return memory->PageIn(this, aPages, aPinArgs, aMapInstanceCount);
       
   380 	}
       
   381 
       
   382 
       
   383 TBool DCoarseMapping::MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)
       
   384 	{
       
   385 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   386 	__NK_ASSERT_DEBUG(IsAttached());
       
   387 
       
   388 	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we've checked mapping IsAttached
       
   389 	TBool success = memory->MovingPageIn(this, aPageArrayPtr, aIndex);
       
   390 	if (success)
       
   391 		{
       
   392 		TLinAddr addr = Base() + (aIndex - iStartIndex) * KPageSize;
       
   393 		InvalidateTLBForPage(addr);
       
   394 		}
       
   395 	return success;
       
   396 	}
       
   397 
       
   398 
       
   399 TPte* DCoarseMapping::FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex)
       
   400 	{
       
   401 	TRACE(("DCoarseMapping::FindPageTable(0x%x, %d)", aLinAddr, aMemoryIndex));
       
   402 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   403 	__NK_ASSERT_DEBUG(IsAttached());
       
   404 	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we've checked mapping IsAttached
       
   405 	return memory->FindPageTable(this, aLinAddr, aMemoryIndex);
       
   406 	}
       
   407 
       
   408 
       
   409 
       
   410 //
       
   411 // DFineMapping
       
   412 //
       
   413 
       
   414 DFineMapping::DFineMapping()
       
   415 	: DMemoryMapping(0)
       
   416 	{
       
   417 	}
       
   418 
       
   419 
       
   420 DFineMapping::~DFineMapping()
       
   421 	{
       
   422 	TRACE(("DFineMapping[0x%08x]::~DFineMapping()",this));
       
   423 	FreePermanentPageTables();
       
   424 	}
       
   425 
       
   426 #ifdef _DEBUG
       
   427 void DFineMapping::ValidatePageTable(TPte* aPt, TLinAddr aAddr)
       
   428 	{
       
   429 	if(aPt)
       
   430 		{
       
   431 		// check page table is correct...
       
   432 		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPt);
       
   433 		__NK_ASSERT_DEBUG(pti->CheckFine(aAddr&~KChunkMask,OsAsid()));
       
   434 		DMemoryObject* memory = Memory();
       
   435 		if(memory)
       
   436 			{
       
   437 			if(memory->IsDemandPaged() && !IsPinned() && !(Flags()&EPageTablesAllocated))
       
   438 				__NK_ASSERT_DEBUG(pti->IsDemandPaged());
       
   439 			else
       
   440 				__NK_ASSERT_DEBUG(!pti->IsDemandPaged());
       
   441 			}
       
   442 		}
       
   443 	}
       
   444 #endif
       
   445 
       
   446 TPte* DFineMapping::GetPageTable(TLinAddr aAddr)
       
   447 	{
       
   448 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   449 
       
   450 	// get address of PDE which refers to the page table...
       
   451 	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),aAddr);
       
   452 
       
   453 	// get page table...
       
   454 	TPte* pt = Mmu::PageTableFromPde(*pPde);
       
   455 #ifdef _DEBUG
       
   456 	ValidatePageTable(pt, aAddr);
       
   457 #endif
       
   458 	return pt;
       
   459 	}
       
   460 
       
   461 
       
   462 TPte* DFineMapping::GetOrAllocatePageTable(TLinAddr aAddr)
       
   463 	{
       
   464 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   465 
       
   466 	// get address of PDE which refers to the page table...
       
   467 	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),aAddr);
       
   468 
       
   469 	// get page table...
       
   470 	TPte* pt = Mmu::PageTableFromPde(*pPde);
       
   471 	if(!pt)
       
   472 		{
       
   473 		pt = AllocatePageTable(aAddr,pPde);
       
   474 #ifdef _DEBUG
       
   475 		ValidatePageTable(pt, aAddr);
       
   476 #endif
       
   477 		}
       
   478 
       
   479 	return pt;
       
   480 	}
       
   481 
       
   482 
       
   483 TPte* DFineMapping::GetOrAllocatePageTable(TLinAddr aAddr, TPinArgs& aPinArgs)
       
   484 	{
       
   485 	__NK_ASSERT_DEBUG(aPinArgs.iPinnedPageTables);
       
   486 
       
   487 	if(!aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable))
       
   488 		return 0;
       
   489 
       
   490 	TPte* pinnedPt = 0;
       
   491 	for(;;)
       
   492 		{
       
   493 		TPte* pt = GetOrAllocatePageTable(aAddr);
       
   494 
       
   495 		if(pinnedPt && pinnedPt!=pt)
       
   496 			{
       
   497 			// previously pinned page table not needed...
       
   498 			PageTableAllocator::UnpinPageTable(pinnedPt,aPinArgs);
       
   499 
       
   500 			// make sure we have memory for next pin attempt...
       
   501 			MmuLock::Unlock();
       
   502 			aPinArgs.AllocReplacementPages(KNumPagesToPinOnePageTable);
       
   503 			MmuLock::Lock();
       
   504 			if(!aPinArgs.HaveSufficientPages(KNumPagesToPinOnePageTable)) // if out of memory...
       
   505 				{
       
   506 				// make sure we free any unneeded page table we allocated...
       
   507 				if(pt)
       
   508 					FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),aAddr));
       
   509 				return 0;
       
   510 				}
       
   511 			}
       
   512 
       
   513 		if(!pt)
       
   514 			return 0; // out of memory
       
   515 
       
   516 		if(pt==pinnedPt)
       
   517 			{
       
   518 			// we got a page table and it was pinned...
       
   519 			*aPinArgs.iPinnedPageTables++ = pt;
       
   520 			++aPinArgs.iNumPinnedPageTables;
       
   521 			return pt;
       
   522 			}
       
   523 
       
   524 		// don't pin page table if it's not paged (e.g. unpaged part of ROM)...
       
   525 		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
       
   526 		if(!pti->IsDemandPaged())
       
   527 			return pt;
       
   528 
       
   529 		// pin the page table...
       
   530 		pinnedPt = pt;
       
   531 		PageTableAllocator::PinPageTable(pinnedPt,aPinArgs);
       
   532 		}
       
   533 	}
       
   534 
       
   535 
       
   536 TInt DFineMapping::AllocateVirtualMemory(TMappingCreateFlags aFlags, TInt aOsAsid, TLinAddr aAddr, TUint aSize, TLinAddr aColourOffset)
       
   537 	{
       
   538 	TInt r = DMemoryMapping::AllocateVirtualMemory(aFlags,aOsAsid,aAddr,aSize,aColourOffset);
       
   539 	if(r==KErrNone && (Flags()&EPermanentPageTables))
       
   540 		{
       
   541 		r = AllocatePermanentPageTables();
       
   542 		if(r!=KErrNone)
       
   543 			FreeVirtualMemory();
       
   544 		}
       
   545 	return r;
       
   546 	}
       
   547 
       
   548 
       
   549 void DFineMapping::FreeVirtualMemory()
       
   550 	{
       
   551 	FreePermanentPageTables();
       
   552 	DMemoryMapping::FreeVirtualMemory();
       
   553 	}
       
   554 
       
   555 
       
   556 TPte* DFineMapping::AllocatePageTable(TLinAddr aAddr, TPde* aPdeAddress, TBool aPermanent)
       
   557 	{
       
   558 	TRACE2(("DFineMapping[0x%08x]::AllocatePageTable(0x%08x,0x%08x,%d)",this,aAddr,aPdeAddress,aPermanent));
       
   559 
       
   560 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
   561 
       
   562 	for(;;)
       
   563 		{
       
   564 		// mapping is going, so we don't need a page table any more...
       
   565 		if(BeingDetached())
       
   566 			return 0;
       
   567 
       
   568 		// get paged state...
       
   569 		TBool demandPaged = false;
       
   570 		if(!aPermanent)
       
   571 			{
       
   572 			DMemoryObject* memory = Memory();
       
   573 			__NK_ASSERT_DEBUG(memory); // can't be NULL because not BeingDetached()
       
   574 			demandPaged = memory->IsDemandPaged();
       
   575 			}
       
   576 
       
   577 		// get page table...
       
   578 		TPte* pt = Mmu::PageTableFromPde(*aPdeAddress);
       
   579 		if(pt!=0)
       
   580 			{
       
   581 			// we have a page table...
       
   582 			__NK_ASSERT_DEBUG(SPageTableInfo::FromPtPtr(pt)->CheckFine(aAddr&~KChunkMask,iAllocatedLinAddrAndOsAsid&KPageMask));
       
   583 			if(aPermanent)
       
   584 				{
       
   585 				__NK_ASSERT_DEBUG(BeingDetached()==false);
       
   586 				__NK_ASSERT_ALWAYS(!demandPaged);
       
   587 				SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
       
   588 				pti->IncPermanenceCount();
       
   589 				}
       
   590 			return pt;
       
   591 			}
       
   592 
       
   593 		// allocate a new page table...
       
   594 		MmuLock::Unlock();
       
   595 		::PageTables.Lock();
       
   596 		TPte* newPt = ::PageTables.Alloc(demandPaged);
       
   597 		if(!newPt)
       
   598 			{
       
   599 			// out of memory...
       
   600 			::PageTables.Unlock();
       
   601 			MmuLock::Lock();
       
   602 			return 0;
       
   603 			}
       
   604 
       
   605 		// check if new page table is still needed...
       
   606 		MmuLock::Lock();
       
   607 		pt = Mmu::PageTableFromPde(*aPdeAddress);
       
   608 		if(pt)
       
   609 			{
       
   610 			// someone else has already allocated a page table,
       
   611 			// so free the one we just allocated and try again...
       
   612 			MmuLock::Unlock();
       
   613 			::PageTables.Free(newPt);
       
   614 			}
       
   615 		else if(BeingDetached())
       
   616 			{
       
   617 			// mapping is going, so we don't need a page table any more...
       
   618 			MmuLock::Unlock();
       
   619 			::PageTables.Free(newPt);
       
   620 			::PageTables.Unlock();
       
   621 			MmuLock::Lock();
       
   622 			return 0;
       
   623 			}
       
   624 		else
       
   625 			{
       
   626 			// setup new page table...
       
   627 			SPageTableInfo* pti = SPageTableInfo::FromPtPtr(newPt);
       
   628 			pti->SetFine(aAddr&~KChunkMask,iAllocatedLinAddrAndOsAsid&KPageMask);
       
   629 
       
   630 			TPde pde = Mmu::PageTablePhysAddr(newPt)|iBlankPde;
       
   631 			TRACE2(("!PDE %x=%x",aPdeAddress,pde));
       
   632 			__NK_ASSERT_DEBUG(((*aPdeAddress^pde)&~KPdeMatchMask)==0 || *aPdeAddress==KPdeUnallocatedEntry);
       
   633 			*aPdeAddress = pde;
       
   634 			SinglePdeUpdated(aPdeAddress);
       
   635 
       
   636 			MmuLock::Unlock();
       
   637 			}
       
   638 
       
   639 		// loop back and recheck...
       
   640 		::PageTables.Unlock();
       
   641 		MmuLock::Lock();
       
   642 		}
       
   643 	}
       
   644 
       
   645 
       
   646 void DFineMapping::FreePageTable(TPde* aPdeAddress)
       
   647 	{
       
   648 	TRACE2(("DFineMapping[0x%08x]::FreePageTable(0x%08x)",this,aPdeAddress));
       
   649 
       
   650 	// get page table lock...
       
   651 	::PageTables.Lock();
       
   652 	MmuLock::Lock();
       
   653 
       
   654 	// find page table...
       
   655 	TPte* pt = Mmu::PageTableFromPde(*aPdeAddress);
       
   656 	if(pt)
       
   657 		{
       
   658 		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
       
   659 		if(pti->PageCount() || pti->PermanenceCount())
       
   660 			{
       
   661 			// page table still in use, so don't free it...
       
   662 			pt = 0;
       
   663 			}
       
   664 		else
       
   665 			{
       
   666 			// page table not used, so unmap it...
       
   667 			TPde pde = KPdeUnallocatedEntry;
       
   668 			TRACE2(("!PDE %x=%x",aPdeAddress,pde));
       
   669 			*aPdeAddress = pde;
       
   670 			SinglePdeUpdated(aPdeAddress);
       
   671 			}
       
   672 		}
       
   673 
       
   674 	MmuLock::Unlock();
       
   675 	if(pt)
       
   676 		::PageTables.Free(pt);
       
   677 	::PageTables.Unlock();
       
   678 	}
       
   679 
       
   680 
       
   681 void DFineMapping::RemapPage(TPhysAddr& aPageArray, TUint aIndex, TUint aMapInstanceCount, TBool aInvalidateTLB)
       
   682 	{
       
   683 	TRACE2(("DFineMemoryMapping[0x%08x]::RemapPage(0x%x,0x%x,%d,%d)",this,aPageArray,aIndex,aMapInstanceCount,aInvalidateTLB));
       
   684 
       
   685 	__NK_ASSERT_DEBUG(aIndex >= iStartIndex);
       
   686 	__NK_ASSERT_DEBUG(aIndex < iStartIndex + iSizeInPages);
       
   687 
       
   688 	TLinAddr addr = Base() + ((aIndex - iStartIndex) << KPageShift);
       
   689 	TUint pteIndex = (addr >> KPageShift) & (KChunkMask >> KPageShift);
       
   690 
       
   691 	// get address of page table...
       
   692 	MmuLock::Lock();
       
   693 	TPte* pPte = GetPageTable(addr);
       
   694 
       
   695 	// check the page is still mapped and mapping isn't being detached 
       
   696 	// or hasn't been reused for another purpose...
       
   697 	if(!pPte || BeingDetached() || aMapInstanceCount != MapInstanceCount())
       
   698 		{
       
   699 		// can't map pages to this mapping any more so just exit.
       
   700 		MmuLock::Unlock();
       
   701 		return;
       
   702 		}
       
   703 
       
   704 	// remap the page...
       
   705 	pPte += pteIndex;
       
   706 	Mmu::RemapPage(pPte, aPageArray, iBlankPte);
       
   707 	MmuLock::Unlock();
       
   708 
       
   709 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
       
   710 	// clean TLB...
       
   711 	if (aInvalidateTLB)
       
   712 		{
       
   713 		InvalidateTLBForPage(addr + OsAsid());
       
   714 		}
       
   715 #endif
       
   716 	}
       
   717 
       
   718 
       
   719 TInt DFineMapping::MapPages(RPageArray::TIter aPages, TUint aMapInstanceCount)
       
   720 	{
       
   721 	TRACE2(("DFineMapping[0x%08x]::MapPages(?,%d) index=0x%x count=0x%x",this,aMapInstanceCount,aPages.Index(),aPages.Count()));
       
   722 
       
   723 	__NK_ASSERT_DEBUG(aPages.Count());
       
   724 	__NK_ASSERT_DEBUG(aPages.Index()>=iStartIndex);
       
   725 	__NK_ASSERT_DEBUG(aPages.IndexEnd()-iStartIndex<=iSizeInPages);
       
   726 
       
   727 	TLinAddr addr = Base()+(aPages.Index()-iStartIndex)*KPageSize;
       
   728 	for(;;)
       
   729 		{
       
   730 		TUint pteIndex = (addr>>KPageShift)&(KChunkMask>>KPageShift);
       
   731 
       
   732 		// calculate max number of pages to do...
       
   733 		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
       
   734 		if(n>KMaxPagesInOneGo)
       
   735 			n = KMaxPagesInOneGo;
       
   736 
       
   737 		// get some pages...
       
   738 		TPhysAddr* pages;
       
   739 		n = aPages.Pages(pages,n);
       
   740 		if(!n)
       
   741 			break;
       
   742 
       
   743 		// get address of page table...
       
   744 		MmuLock::Lock();
       
   745 		TPte* pPte = GetOrAllocatePageTable(addr);
       
   746 
       
   747 		// check mapping isn't being unmapped, or been reused for another purpose...
       
   748 		if(BeingDetached() || aMapInstanceCount!=MapInstanceCount())
       
   749 			{
       
   750 			// can't map pages to this mapping any more, so free any page table
       
   751 			// we just got (if it's not used)...
       
   752 			if(!pPte)
       
   753 				MmuLock::Unlock();
       
   754 			else
       
   755 				{
       
   756 				SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pPte);
       
   757 				TBool keepPt = pti->PermanenceCount() ||  pti->PageCount();
       
   758 				MmuLock::Unlock();
       
   759 				if(!keepPt)
       
   760 					FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),addr));
       
   761 				}
       
   762 			// then end...
       
   763 			return KErrNone;
       
   764 			}
       
   765 
       
   766 		// check for OOM...
       
   767 		if(!pPte)
       
   768 			{
       
   769 			MmuLock::Unlock();
       
   770 			return KErrNoMemory;
       
   771 			}
       
   772 
       
   773 		// map some pages...
       
   774 		pPte += pteIndex;
       
   775 		TBool keepPt = Mmu::MapPages(pPte, n, pages, iBlankPte);
       
   776 		MmuLock::Unlock();
       
   777 
       
   778 		// free page table if no longer needed...
       
   779 		if(!keepPt)
       
   780 			FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),addr));
       
   781 
       
   782 		// move on...
       
   783 		aPages.Skip(n);
       
   784 		addr += n*KPageSize;
       
   785 		}
       
   786 
       
   787 	return KErrNone;
       
   788 	}
       
   789 
       
   790 
       
   791 void DFineMapping::UnmapPages(RPageArray::TIter aPages, TUint aMapInstanceCount)
       
   792 	{
       
   793 	TRACE2(("DFineMapping[0x%08x]::UnmapPages(?,%d) index=0x%x count=0x%x",this,aMapInstanceCount,aPages.Index(),aPages.Count()));
       
   794 
       
   795 	__NK_ASSERT_DEBUG(aPages.Count());
       
   796 
       
   797 	TLinAddr addr = Base()+(aPages.Index()-iStartIndex)*KPageSize;
       
   798 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
       
   799 	TLinAddr startAddr = addr;
       
   800 #endif
       
   801 	for(;;)
       
   802 		{
       
   803 		TUint pteIndex = (addr>>KPageShift)&(KChunkMask>>KPageShift);
       
   804 
       
   805 		// calculate max number of pages to do...
       
   806 		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
       
   807 		if(n>KMaxPagesInOneGo)
       
   808 			n = KMaxPagesInOneGo;
       
   809 
       
   810 		// get some pages...
       
   811 		TPhysAddr* pages;
       
   812 		n = aPages.Pages(pages,n);
       
   813 		if(!n)
       
   814 			break;
       
   815 
       
   816 		MmuLock::Lock();
       
   817 
       
   818 		// check that mapping hasn't been reused for another purpose...
       
   819 		if(aMapInstanceCount!=MapInstanceCount())
       
   820 			{
       
   821 			MmuLock::Unlock();
       
   822 			break;
       
   823 			}
       
   824 
       
   825 		// get address of PTE for pages...
       
   826 		TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),addr);
       
   827 		TPte* pPte = Mmu::PageTableFromPde(*pPde);
       
   828 		if(pPte)
       
   829 			{
       
   830 			// unmap some pages...
       
   831 			pPte += pteIndex;
       
   832 			TBool keepPt = Mmu::UnmapPages(pPte,n,pages);
       
   833 			MmuLock::Unlock();
       
   834 
       
   835 			// free page table if no longer needed...
       
   836 			if(!keepPt)
       
   837 				FreePageTable(pPde);
       
   838 			}
       
   839 		else
       
   840 			{
       
   841 			// no page table found...
       
   842 			MmuLock::Unlock();
       
   843 			}
       
   844 
       
   845 		// move on...
       
   846 		aPages.Skip(n);
       
   847 		addr += n*KPageSize;
       
   848 		}
       
   849 
       
   850 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
       
   851 	// clean TLB...
       
   852 	TLinAddr endAddr = addr;
       
   853 	addr = startAddr+OsAsid();
       
   854 	do InvalidateTLBForPage(addr);
       
   855 	while((addr+=KPageSize)<endAddr);
       
   856 #endif
       
   857 	}
       
   858 
       
   859 
       
   860 void DFineMapping::RestrictPagesNA(RPageArray::TIter aPages, TUint aMapInstanceCount)
       
   861 	{
       
   862 	TRACE2(("DFineMapping[0x%08x]::RestrictPages(?,%d) index=0x%x count=0x%x",this,aMapInstanceCount,aPages.Index(),aPages.Count()));
       
   863 
       
   864 	__NK_ASSERT_DEBUG(aPages.Count());
       
   865 
       
   866 	TLinAddr addr = Base()+(aPages.Index()-iStartIndex)*KPageSize;
       
   867 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
       
   868 	TLinAddr startAddr = addr;
       
   869 #endif
       
   870 	for(;;)
       
   871 		{
       
   872 		TUint pteIndex = (addr>>KPageShift)&(KChunkMask>>KPageShift);
       
   873 
       
   874 		// calculate max number of pages to do...
       
   875 		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
       
   876 		if(n>KMaxPagesInOneGo)
       
   877 			n = KMaxPagesInOneGo;
       
   878 
       
   879 		// get some pages...
       
   880 		TPhysAddr* pages;
       
   881 		n = aPages.Pages(pages,n);
       
   882 		if(!n)
       
   883 			break;
       
   884 
       
   885 		MmuLock::Lock();
       
   886 
       
   887 		// check that mapping hasn't been reused for another purpose...
       
   888 		if(aMapInstanceCount!=MapInstanceCount())
       
   889 			{
       
   890 			MmuLock::Unlock();
       
   891 			break;
       
   892 			}
       
   893 
       
   894 		// get address of PTE for pages...
       
   895 		TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),addr);
       
   896 		TPte* pPte = Mmu::PageTableFromPde(*pPde);
       
   897 		if(pPte)
       
   898 			{
       
   899 			// restrict some pages...
       
   900 			pPte += pteIndex;
       
   901 			Mmu::RestrictPagesNA(pPte,n,pages);
       
   902 			}
       
   903 		MmuLock::Unlock();
       
   904 
       
   905 		// move on...
       
   906 		aPages.Skip(n);
       
   907 		addr += n*KPageSize;
       
   908 		}
       
   909 
       
   910 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
       
   911 	// clean TLB...
       
   912 	TLinAddr endAddr = addr;
       
   913 	addr = startAddr+OsAsid();
       
   914 	do InvalidateTLBForPage(addr);
       
   915 	while((addr+=KPageSize)<endAddr);
       
   916 #endif
       
   917 	}
       
   918 
       
   919 
       
   920 TInt DFineMapping::PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount)
       
   921 	{
       
   922 	TRACE2(("DFineMapping[0x%08x]::PageIn(?,?,%d) index=0x%x count=0x%x",this,aMapInstanceCount,aPages.Index(),aPages.Count()));
       
   923 
       
   924 	__NK_ASSERT_DEBUG(aPages.Count());
       
   925 	__NK_ASSERT_DEBUG(aPages.Index()>=iStartIndex);
       
   926 	__NK_ASSERT_DEBUG(aPages.IndexEnd()-iStartIndex<=iSizeInPages);
       
   927 
       
   928 	TInt r = KErrNone;
       
   929 
       
   930 	TLinAddr addr = Base()+(aPages.Index()-iStartIndex)*KPageSize;
       
   931 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
       
   932 	TLinAddr startAddr = addr;
       
   933 #endif
       
   934 	TBool pinPageTable = aPinArgs.iPinnedPageTables!=0; // check if we need to pin the first page table
       
   935 	for(;;)
       
   936 		{
       
   937 		TUint pteIndex = (addr>>KPageShift)&(KChunkMask>>KPageShift);
       
   938 		if(pteIndex==0)
       
   939 			pinPageTable = aPinArgs.iPinnedPageTables!=0;	// started a new page table, check if we need to pin it
       
   940 
       
   941 		// calculate max number of pages to do...
       
   942 		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
       
   943 		if(n>KMaxPagesInOneGo)
       
   944 			n = KMaxPagesInOneGo;
       
   945 
       
   946 		// get some pages...
       
   947 		TPhysAddr* pages;
       
   948 		n = aPages.Pages(pages,n);
       
   949 		if(!n)
       
   950 			break;
       
   951 
       
   952 		// make sure we have memory to pin the page table if required...
       
   953 		if(pinPageTable)
       
   954 			aPinArgs.AllocReplacementPages(KNumPagesToPinOnePageTable);
       
   955 
       
   956 		// get address of page table...
       
   957 		MmuLock::Lock();
       
   958 		TPte* pPte;
       
   959 		if(pinPageTable)
       
   960 			pPte = GetOrAllocatePageTable(addr,aPinArgs);
       
   961 		else
       
   962 			pPte = GetOrAllocatePageTable(addr);
       
   963 
       
   964 		// check mapping isn't being unmapped or hasn't been reused...
       
   965 		if(BeingDetached() || aMapInstanceCount != MapInstanceCount())
       
   966 			{
       
   967 			// can't map pages to this mapping any more, so free any page table
       
   968 			// we just got (if it's not used)...
       
   969 			if(!pPte)
       
   970 				MmuLock::Unlock();
       
   971 			else
       
   972 				{
       
   973 				SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pPte);
       
   974 				TBool keepPt = pti->PermanenceCount() ||  pti->PageCount();
       
   975 				MmuLock::Unlock();
       
   976 				if(!keepPt)
       
   977 					FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),addr));
       
   978 				}
       
   979 			// then end...
       
   980 			r = KErrNotFound;
       
   981 			break;
       
   982 			}
       
   983 
       
   984 		// check for OOM...
       
   985 		if(!pPte)
       
   986 			{
       
   987 			MmuLock::Unlock();
       
   988 			r = KErrNoMemory;
       
   989 			break;
       
   990 			}
       
   991 
       
   992 		// map some pages...
       
   993 		pPte += pteIndex;
       
   994 		TPte blankPte = iBlankPte;
       
   995 		if(aPinArgs.iReadOnly)
       
   996 			blankPte = Mmu::MakePteInaccessible(blankPte,true);
       
   997 		TBool keepPt = Mmu::PageInPages(pPte, n, pages, blankPte);
       
   998 		MmuLock::Unlock();
       
   999 
       
  1000 		// free page table if no longer needed...
       
  1001 		if(!keepPt)
       
  1002 			FreePageTable(Mmu::PageDirectoryEntry(OsAsid(),addr));
       
  1003 
       
  1004 		// move on...
       
  1005 		aPages.Skip(n);
       
  1006 		addr += n*KPageSize;
       
  1007 		pinPageTable = false;
       
  1008 		}
       
  1009 
       
  1010 #ifndef COARSE_GRAINED_TLB_MAINTENANCE
       
  1011 	// clean TLB...
       
  1012 	TLinAddr endAddr = addr;
       
  1013 	addr = startAddr+OsAsid();
       
  1014 	do InvalidateTLBForPage(addr);
       
  1015 	while((addr+=KPageSize)<endAddr);
       
  1016 #endif
       
  1017 	return r;
       
  1018 	}
       
  1019 
       
  1020 
       
  1021 TBool DFineMapping::MovingPageIn(TPhysAddr& aPageArrayPtr, TUint aIndex)
       
  1022 	{
       
  1023 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
  1024 	__NK_ASSERT_DEBUG(IsAttached());
       
  1025 	__NK_ASSERT_DEBUG(!BeingDetached());
       
  1026 
       
  1027 	TLinAddr addr = Base() + (aIndex - iStartIndex) * KPageSize;
       
  1028 	TUint pteIndex = (addr >> KPageShift) & (KChunkMask >> KPageShift);
       
  1029 
       
  1030 	// get address of page table...
       
  1031 	TPte* pPte = GetPageTable(addr);
       
  1032 	
       
  1033 	// Check the page is still mapped.
       
  1034 	if (!pPte)
       
  1035 		return EFalse;
       
  1036 
       
  1037 	// map some pages...
       
  1038 	pPte += pteIndex;
       
  1039 	Mmu::RemapPage(pPte, aPageArrayPtr, iBlankPte);
       
  1040 	InvalidateTLBForPage(addr);
       
  1041 	return ETrue;
       
  1042 	}
       
  1043 
       
  1044 
       
  1045 TInt DFineMapping::DoMap()
       
  1046 	{
       
  1047 	TRACE(("DFineMapping[0x%08x]::DoMap()", this));
       
  1048 	DMemoryObject* memory = Memory(true); // safe because we're called from code which has added mapping to memory
       
  1049 	if(memory->IsDemandPaged())
       
  1050 		{
       
  1051 		// do nothing, allow pages to be mapped on demand...
       
  1052 		return KErrNone;
       
  1053 		}
       
  1054 
       
  1055 	RPageArray::TIter pageIter;
       
  1056 	memory->iPages.FindStart(iStartIndex,iSizeInPages,pageIter);
       
  1057 
       
  1058 	// map pages...
       
  1059 	TInt r = KErrNone;
       
  1060 	for(;;)
       
  1061 		{
       
  1062 		// find some pages...
       
  1063 		RPageArray::TIter pageList;
       
  1064 		TUint n = pageIter.Find(pageList);
       
  1065 		if(!n)
       
  1066 			break; // done
       
  1067 
       
  1068 		// map some pages...
       
  1069 		r = MapPages(pageList,MapInstanceCount());
       
  1070 
       
  1071 		// done with pages...
       
  1072 		pageIter.FindRelease(n);
       
  1073 
       
  1074 		if(r!=KErrNone)
       
  1075 			break;
       
  1076 		}
       
  1077 
       
  1078 	memory->iPages.FindEnd(iStartIndex,iSizeInPages);
       
  1079 	return r;
       
  1080 	}
       
  1081 
       
  1082 
       
  1083 void DFineMapping::DoUnmap()
       
  1084 	{
       
  1085 	TRACE2(("DFineMapping[0x%08x]::DoUnmap()",this));
       
  1086 
       
  1087 	TLinAddr startAddr = Base();
       
  1088 	TUint count = iSizeInPages;
       
  1089 	TLinAddr addr = startAddr;
       
  1090 	TPde* pPde = Mmu::PageDirectoryEntry(OsAsid(),addr);
       
  1091 
       
  1092 	for(;;)
       
  1093 		{
       
  1094 		TUint pteIndex = (addr>>KPageShift)&(KChunkMask>>KPageShift);
       
  1095 
       
  1096 		// calculate number of pages to do...
       
  1097 		TUint n = (KChunkSize>>KPageShift)-pteIndex; // pages left in page table
       
  1098 		if(n>count)
       
  1099 			n = count;
       
  1100 
       
  1101 		// get page table...
       
  1102 		MmuLock::Lock();
       
  1103 		TPte* pPte = Mmu::PageTableFromPde(*pPde);
       
  1104 		if(!pPte)
       
  1105 			{
       
  1106 			// no page table found, so nothing to do...
       
  1107 			MmuLock::Unlock();
       
  1108 			}
       
  1109 		else
       
  1110 			{
       
  1111 			// unmap some pages...
       
  1112 			pPte += pteIndex;
       
  1113 			if(n>KMaxPagesInOneGo)
       
  1114 				n = KMaxPagesInOneGo;
       
  1115 			TBool keepPt = Mmu::UnmapPages(pPte, n);
       
  1116 			MmuLock::Unlock();
       
  1117 
       
  1118 			// free page table if no longer needed...
       
  1119 			if(!keepPt)
       
  1120 				FreePageTable(pPde);
       
  1121 			}
       
  1122 
       
  1123 		// move on...
       
  1124 		addr += n*KPageSize;
       
  1125 		count -= n;
       
  1126 		if(!count)
       
  1127 			break;
       
  1128 		if(!(addr&KChunkMask))
       
  1129 			++pPde;
       
  1130 		}
       
  1131 
       
  1132 #ifdef COARSE_GRAINED_TLB_MAINTENANCE
       
  1133 	InvalidateTLBForAsid(OsAsid());
       
  1134 #else
       
  1135 	// clean TLB...
       
  1136 	TLinAddr endAddr = addr;
       
  1137 	addr = LinAddrAndOsAsid();
       
  1138 	do InvalidateTLBForPage(addr);
       
  1139 	while((addr+=KPageSize)<endAddr);
       
  1140 #endif
       
  1141 	}
       
  1142 
       
  1143 
       
  1144 TInt DFineMapping::AllocatePermanentPageTables()
       
  1145 	{
       
  1146 	TRACE2(("DFineMapping[0x%08x]::AllocatePermanentPageTables()",this));
       
  1147 	__NK_ASSERT_DEBUG(((Flags()&EPageTablesAllocated)==0));
       
  1148 	__NK_ASSERT_DEBUG(iBlankPde);
       
  1149 
       
  1150 	TLinAddr addr = iAllocatedLinAddrAndOsAsid&~KPageMask;
       
  1151 	TInt osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
       
  1152 	TPde* pStartPde = Mmu::PageDirectoryEntry(osAsid,addr);
       
  1153 	TPde* pEndPde = Mmu::PageDirectoryEntry(osAsid,addr+iAllocatedSize-1);
       
  1154 	TPde* pPde = pStartPde;
       
  1155 
       
  1156 	while(pPde<=pEndPde)
       
  1157 		{
       
  1158 		MmuLock::Lock();
       
  1159 		TPte* pPte = AllocatePageTable(addr,pPde,true);
       
  1160 		if(!pPte)
       
  1161 			{
       
  1162 			// out of memory...
       
  1163 			MmuLock::Unlock();
       
  1164 			FreePermanentPageTables(pStartPde,pPde-1);
       
  1165 			return KErrNoMemory;
       
  1166 			}
       
  1167 		MmuLock::Unlock();
       
  1168 
       
  1169 		addr += KChunkSize;
       
  1170 		++pPde;
       
  1171 		}
       
  1172 
       
  1173 	TRACE2(("DFineMapping[0x%08x]::AllocatePermanentPageTables() done",this));
       
  1174 	Flags() |= DMemoryMapping::EPageTablesAllocated;
       
  1175 	return KErrNone;
       
  1176 	}
       
  1177 
       
  1178 
       
  1179 void DFineMapping::FreePermanentPageTables(TPde* aFirstPde, TPde* aLastPde)
       
  1180 	{
       
  1181 	Flags() &= ~DMemoryMapping::EPageTablesAllocated;
       
  1182 
       
  1183 	MmuLock::Lock();
       
  1184 
       
  1185 	TUint flash = 0;
       
  1186 	TPde* pPde = aFirstPde;
       
  1187 	while(pPde<=aLastPde)
       
  1188 		{
       
  1189 		TPte* pPte = Mmu::PageTableFromPde(*pPde);
       
  1190 		__NK_ASSERT_DEBUG(pPte);
       
  1191 		SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pPte);
       
  1192 		if(pti->DecPermanenceCount() || pti->PageCount())
       
  1193 			{
       
  1194 			// still in use...
       
  1195 			MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo*2);
       
  1196 			}
       
  1197 		else
       
  1198 			{
       
  1199 			// page table no longer used for anything...
       
  1200 			MmuLock::Unlock();
       
  1201 			FreePageTable(pPde);
       
  1202 			MmuLock::Lock();
       
  1203 			}
       
  1204 
       
  1205 		++pPde;
       
  1206 		}
       
  1207 
       
  1208 	MmuLock::Unlock();
       
  1209 	}
       
  1210 
       
  1211 
       
  1212 void DFineMapping::FreePermanentPageTables()
       
  1213 	{
       
  1214 	if((Flags()&EPageTablesAllocated)==0)
       
  1215 		return;
       
  1216 
       
  1217 	TRACE2(("DFineMapping[0x%08x]::FreePermanentPageTables()",this));
       
  1218 
       
  1219 	TLinAddr addr = iAllocatedLinAddrAndOsAsid&~KPageMask;
       
  1220 	TInt osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
       
  1221 	TPde* pPde = Mmu::PageDirectoryEntry(osAsid,addr);
       
  1222 	TPde* pEndPde = Mmu::PageDirectoryEntry(osAsid,addr+iAllocatedSize-1);
       
  1223 	FreePermanentPageTables(pPde,pEndPde);
       
  1224 	}
       
  1225 
       
  1226 
       
  1227 TPte* DFineMapping::FindPageTable(TLinAddr aLinAddr, TUint aMemoryIndex)
       
  1228 	{
       
  1229 	TRACE(("DFineMapping::FindPageTable(0x%x, %d)", aLinAddr, aMemoryIndex));
       
  1230 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
  1231 	__NK_ASSERT_DEBUG(IsAttached());
       
  1232 	return GetPageTable(aLinAddr);
       
  1233 	}
       
  1234 
       
  1235 
       
  1236 
       
  1237 //
       
  1238 // DPhysicalPinMapping
       
  1239 //
       
  1240 
       
  1241 DPhysicalPinMapping::DPhysicalPinMapping()
       
  1242 	: DMemoryMappingBase(EPinned|EPhysicalPinningMapping)
       
  1243 	{
       
  1244 	}
       
  1245 
       
  1246 
       
  1247 TInt DPhysicalPinMapping::PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
       
  1248 	{
       
  1249 	__NK_ASSERT_ALWAYS(IsAttached());
       
  1250 
       
  1251 	__NK_ASSERT_ALWAYS(TUint(aIndex+aCount)>aIndex && TUint(aIndex+aCount)<=iSizeInPages);
       
  1252 	aIndex += iStartIndex;
       
  1253 
       
  1254 	DCoarseMemory* memory = (DCoarseMemory*)Memory(true); // safe because we should only be called whilst memory is Pinned
       
  1255 	TInt r = memory->PhysAddr(aIndex,aCount,aPhysicalAddress,aPhysicalPageList);
       
  1256 	if(r!=KErrNone)
       
  1257 		return r;
       
  1258 
       
  1259 	if(memory->IsDemandPaged() && !IsReadOnly())
       
  1260 		{
       
  1261 		// the memory is demand paged and writeable so we need to mark it as dirty
       
  1262 		// as we have to assume that the memory will be modified via the physical
       
  1263 		// addresses we return...
       
  1264 		MmuLock::Lock();
       
  1265 		TPhysAddr* pages = aPhysicalPageList;
       
  1266 		TUint count = aCount;
       
  1267 		while(count)
       
  1268 			{
       
  1269 			SPageInfo* pi = SPageInfo::FromPhysAddr(*(pages++));
       
  1270 			pi->SetDirty();
       
  1271 			if((count&(KMaxPageInfoUpdatesInOneGo-1))==0)
       
  1272 				MmuLock::Flash(); // flash lock every KMaxPageInfoUpdatesInOneGo iterations of the loop
       
  1273 			--count;
       
  1274 			}
       
  1275 		MmuLock::Unlock();
       
  1276 		}
       
  1277 
       
  1278 	return KErrNone;
       
  1279 	}
       
  1280 
       
  1281 
       
  1282 TInt DPhysicalPinMapping::Pin(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions)
       
  1283 	{
       
  1284 	PteType() =	Mmu::PteType(aPermissions,true);
       
  1285 	return Attach(aMemory,aIndex,aCount);
       
  1286 	}
       
  1287 
       
  1288 
       
  1289 void DPhysicalPinMapping::Unpin()
       
  1290 	{
       
  1291 	Detach();
       
  1292 	}
       
  1293 
       
  1294 
       
  1295 TInt DPhysicalPinMapping::MapPages(RPageArray::TIter /*aPages*/, TUint /*aMapInstanceCount*/)
       
  1296 	{
       
  1297 	// shouldn't ever be called because these mappings are always pinned...
       
  1298 	__NK_ASSERT_DEBUG(0);
       
  1299 	return KErrNotSupported;
       
  1300 	}
       
  1301 
       
  1302 
       
  1303 void DPhysicalPinMapping::UnmapPages(RPageArray::TIter /*aPages*/, TUint /*aMapInstanceCount*/)
       
  1304 	{
       
  1305 	// nothing to do...
       
  1306 	}
       
  1307 
       
  1308 
       
  1309 void DPhysicalPinMapping::RemapPage(TPhysAddr& /*aPageArrayPtr*/, TUint /*aIndex*/, TUint /*aMapInstanceCount*/, TBool /*aInvalidateTLB*/)
       
  1310 	{
       
  1311 	// shouldn't ever be called because physically pinned mappings block page moving.
       
  1312 	__NK_ASSERT_DEBUG(0);
       
  1313 	}
       
  1314 
       
  1315 
       
  1316 void DPhysicalPinMapping::RestrictPagesNA(RPageArray::TIter /*aPages*/, TUint /*aMapInstanceCount*/)
       
  1317 	{
       
  1318 	// nothing to do...
       
  1319 	}
       
  1320 
       
  1321 
       
  1322 TInt DPhysicalPinMapping::PageIn(RPageArray::TIter /*aPages*/, TPinArgs& /*aPinArgs*/, TUint /*aMapInstanceCount*/)
       
  1323 	{
       
  1324 	// nothing to do...
       
  1325 	return KErrNone;
       
  1326 	}
       
  1327 
       
  1328 
       
  1329 TInt DPhysicalPinMapping::MovingPageIn(TPhysAddr& /*aPageArrayPtr*/, TUint /*aIndex*/)
       
  1330 	{
       
  1331 	// Should never be asked to page in a page that is being moved as physical 
       
  1332 	// pin mappings don't own any page tables.
       
  1333 	__NK_ASSERT_DEBUG(0);
       
  1334 	return KErrAbort;
       
  1335 	}
       
  1336 
       
  1337 TInt DPhysicalPinMapping::DoMap()
       
  1338 	{
       
  1339 	// nothing to do...
       
  1340 	return KErrNone;
       
  1341 	}
       
  1342 
       
  1343 
       
  1344 void DPhysicalPinMapping::DoUnmap()
       
  1345 	{
       
  1346 	// nothing to do...
       
  1347 	}
       
  1348 
       
  1349 
       
  1350 
       
  1351 //
       
  1352 // DVirtualPinMapping
       
  1353 //
       
  1354 
       
  1355 DVirtualPinMapping::DVirtualPinMapping()
       
  1356 	: iMaxCount(0)
       
  1357 	{
       
  1358 	// Clear flag so it is possible to distingish between virtual and physical pin mappings.
       
  1359 	Flags() &= ~EPhysicalPinningMapping;
       
  1360 	}
       
  1361 
       
  1362 
       
  1363 DVirtualPinMapping::~DVirtualPinMapping()
       
  1364 	{
       
  1365 	TRACE(("DVirtualPinMapping[0x%08x]::~DVirtualPinMapping()",this));
       
  1366 	FreePageTableArray();
       
  1367 	}
       
  1368 
       
  1369 
       
  1370 DVirtualPinMapping* DVirtualPinMapping::New(TUint aMaxCount)
       
  1371 	{
       
  1372 	TRACE(("DVirtualPinMapping::New(0x%x)",aMaxCount));
       
  1373 	DVirtualPinMapping* self = new DVirtualPinMapping;
       
  1374 	if(aMaxCount)
       
  1375 		{
       
  1376 		// pages have been reserved for our use.
       
  1377 
       
  1378 		// Create the array for storing pinned paged tables now, so we
       
  1379 		// don't risk out-of-memory errors trying to do so later...
       
  1380 		if(self->AllocPageTableArray(aMaxCount)!=KErrNone)
       
  1381 			{
       
  1382 			// failed, so cleanup...
       
  1383 			self->Close();
       
  1384 			self = 0;
       
  1385 			}
       
  1386 		else
       
  1387 			{
       
  1388 			// success, so remember the pages that have been reserved for us...
       
  1389 			self->iMaxCount = aMaxCount;
       
  1390 			self->Flags() |= EPinningPagesReserved;
       
  1391 			}
       
  1392 		}
       
  1393 	TRACE(("DVirtualPinMapping::New(0x%x) returns 0x%08x",aMaxCount,self));
       
  1394 	return self;
       
  1395 	}
       
  1396 
       
  1397 
       
  1398 TUint DVirtualPinMapping::MaxPageTables(TUint aPageCount)
       
  1399 	{
       
  1400 	return (aPageCount+2*KChunkSize/KPageSize-2)>>(KChunkShift-KPageShift);
       
  1401 	}
       
  1402 
       
  1403 
       
  1404 TInt DVirtualPinMapping::AllocPageTableArray(TUint aCount)
       
  1405 	{
       
  1406 	__NK_ASSERT_ALWAYS(iAllocatedPinnedPageTables==0);
       
  1407 	TUint maxPt	= MaxPageTables(aCount);
       
  1408 	if(maxPt>KSmallPinnedPageTableCount)
       
  1409 		{
       
  1410 		iAllocatedPinnedPageTables = new TPte*[maxPt];
       
  1411 		if(!iAllocatedPinnedPageTables)
       
  1412 			return KErrNoMemory;
       
  1413 		}
       
  1414 	return KErrNone;
       
  1415 	}
       
  1416 
       
  1417 
       
  1418 void DVirtualPinMapping::FreePageTableArray()
       
  1419 	{
       
  1420 	delete [] iAllocatedPinnedPageTables;
       
  1421 	iAllocatedPinnedPageTables = 0;
       
  1422 	}
       
  1423 
       
  1424 
       
  1425 TPte** DVirtualPinMapping::PageTableArray()
       
  1426 	{
       
  1427 	return iAllocatedPinnedPageTables ? iAllocatedPinnedPageTables : iSmallPinnedPageTablesArray;
       
  1428 	}
       
  1429 
       
  1430 
       
  1431 TInt DVirtualPinMapping::Pin(	DMemoryObject* aMemory, TUint aIndex, TUint aCount, TMappingPermissions aPermissions, 
       
  1432 								DMemoryMappingBase* aMapping, TUint aMappingInstanceCount)
       
  1433 	{
       
  1434 	// Virtual pinning ensures a page is always mapped to a particular virtual address
       
  1435 	// and therefore require a non-pinning mapping of the virtual address to pin.
       
  1436 	__NK_ASSERT_ALWAYS(aMapping && !aMapping->IsPinned());
       
  1437 
       
  1438 	if(iMaxCount)
       
  1439 		{
       
  1440 		if(aCount>iMaxCount)
       
  1441 			return KErrArgument;
       
  1442 		}
       
  1443 	else
       
  1444 		{
       
  1445 		TInt r = AllocPageTableArray(aCount);
       
  1446 		if(r!=KErrNone)
       
  1447 			return r;
       
  1448 		}
       
  1449 
       
  1450 	iPinVirtualMapping = aMapping;
       
  1451 	iPinVirtualMapInstanceCount = aMappingInstanceCount;
       
  1452 	TInt r = DPhysicalPinMapping::Pin(aMemory,aIndex,aCount,aPermissions);
       
  1453 	iPinVirtualMapping = 0;
       
  1454 
       
  1455 	return r;
       
  1456 	}
       
  1457 
       
  1458 
       
  1459 void DVirtualPinMapping::Unpin()
       
  1460 	{
       
  1461 	Detach();
       
  1462 	}
       
  1463 
       
  1464 
       
  1465 void DVirtualPinMapping::UnpinPageTables(TPinArgs& aPinArgs)
       
  1466 	{
       
  1467 	TPte** pPt = PageTableArray();
       
  1468 	TPte** pPtEnd = pPt+iNumPinnedPageTables;
       
  1469 
       
  1470 	MmuLock::Lock();
       
  1471 	while(pPt<pPtEnd)
       
  1472 		PageTableAllocator::UnpinPageTable(*pPt++,aPinArgs);
       
  1473 	MmuLock::Unlock();
       
  1474 	iNumPinnedPageTables = 0;
       
  1475 
       
  1476 	if(!iMaxCount)
       
  1477 		FreePageTableArray();
       
  1478 	}
       
  1479 
       
  1480 
       
  1481 void DVirtualPinMapping::RemapPage(TPhysAddr& /*aPageArrayPtr*/, TUint /*aIndex*/, TUint /*aMapInstanceCount*/, TBool /*aInvalidateTLB*/)
       
  1482 	{
       
  1483 	__NK_ASSERT_DEBUG(0);
       
  1484 	}
       
  1485 
       
  1486 
       
  1487 TInt DVirtualPinMapping::PageIn(RPageArray::TIter aPages, TPinArgs& aPinArgs, TUint aMapInstanceCount)
       
  1488 	{
       
  1489 	if(iPinVirtualMapping)
       
  1490 		return iPinVirtualMapping->PageIn(aPages, aPinArgs, iPinVirtualMapInstanceCount);
       
  1491 	return KErrNone;
       
  1492 	}
       
  1493 
       
  1494 
       
  1495 TInt DVirtualPinMapping::MovingPageIn(TPhysAddr& /*aPageArrayPtr*/, TUint /*aIndex*/)
       
  1496 	{
       
  1497 	// Should never be asked to page in a page that is being moved as virtual 
       
  1498 	// pin mappings don't own any page tables.
       
  1499 	__NK_ASSERT_DEBUG(0);
       
  1500 	return KErrAbort;
       
  1501 	}
       
  1502 
       
  1503 
       
  1504 TInt DVirtualPinMapping::DoPin(TPinArgs& aPinArgs)
       
  1505 	{
       
  1506 	// setup for page table pinning...
       
  1507 	aPinArgs.iPinnedPageTables = PageTableArray();
       
  1508 
       
  1509 	// do pinning...
       
  1510 	TInt r = DPhysicalPinMapping::DoPin(aPinArgs);
       
  1511 
       
  1512 	// save results...
       
  1513 	iNumPinnedPageTables = aPinArgs.iNumPinnedPageTables;
       
  1514 	__NK_ASSERT_DEBUG(iNumPinnedPageTables<=MaxPageTables(iSizeInPages));
       
  1515 
       
  1516 	// cleanup if error...
       
  1517 	if(r!=KErrNone)
       
  1518 		UnpinPageTables(aPinArgs);
       
  1519 
       
  1520 	return r;
       
  1521 	}
       
  1522 
       
  1523 
       
  1524 void DVirtualPinMapping::DoUnpin(TPinArgs& aPinArgs)
       
  1525 	{
       
  1526 	DPhysicalPinMapping::DoUnpin(aPinArgs);
       
  1527 	UnpinPageTables(aPinArgs);
       
  1528 	}
       
  1529 
       
  1530 
       
  1531 
       
  1532 //
       
  1533 // DMemoryMappingBase
       
  1534 //
       
  1535 
       
  1536 
       
  1537 DMemoryMappingBase::DMemoryMappingBase(TUint aType)
       
  1538 	{
       
  1539 	Flags() = aType; // rest of members cleared by DBase
       
  1540 	}
       
  1541 
       
  1542 
       
  1543 TInt DMemoryMappingBase::Attach(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
       
  1544 	{
       
  1545 	TRACE(("DMemoryMappingBase[0x%08x]::Attach(0x%08x,0x%x,0x%x)",this,aMemory,aIndex,aCount));
       
  1546 	__NK_ASSERT_DEBUG(!IsAttached());
       
  1547 	TInt r;
       
  1548 
       
  1549 	if(++iMapInstanceCount>1)
       
  1550 		{// This mapping is being reused...
       
  1551 
       
  1552 		// Non-pinned mappings can be reused however this is only exercised 
       
  1553 		// by aligned shared buffers whose memory is managed by the unpaged 
       
  1554 		// or hardware memory manager.  Reusing mappings to paged or movable 
       
  1555 		// memory hasn't tested and may need reusing mappings and its 
       
  1556 		// interactions with the fault handler, pinning etc to be tested.
       
  1557 		__NK_ASSERT_DEBUG(	IsPinned() ||
       
  1558 							aMemory->iManager == TheUnpagedMemoryManager || 
       
  1559 							aMemory->iManager == TheHardwareMemoryManager);
       
  1560 
       
  1561 		// make sure new instance count is seen by other threads which may be operating
       
  1562 		// on old mapping instance (this will stop them changing the mapping any more)...
       
  1563 		MmuLock::Lock();
       
  1564 		MmuLock::Unlock();
       
  1565 		// clear unmapping flag from previous use...
       
  1566 		__e32_atomic_and_ord16(&Flags(), (TUint16)~(EDetaching|EPageUnmapVetoed));
       
  1567 		}
       
  1568 
       
  1569 	__NK_ASSERT_DEBUG((Flags()&(EDetaching|EPageUnmapVetoed))==0);
       
  1570 
       
  1571 	// set region being mapped...
       
  1572 	iStartIndex = aIndex;
       
  1573 	iSizeInPages = aCount;
       
  1574 
       
  1575 	// reserve any pages required for pinning demand paged memory.
       
  1576 	// We must do this before we add the mapping to the memory object
       
  1577 	// because once that is done the pages we are mapping will be prevented
       
  1578 	// from being paged out. That could leave the paging system without
       
  1579 	// enough pages to correctly handle page faults...
       
  1580 	TPinArgs pinArgs;
       
  1581 	pinArgs.iReadOnly = IsReadOnly();
       
  1582 	if(IsPinned() && aMemory->IsDemandPaged())
       
  1583 		{
       
  1584 		pinArgs.iUseReserve = Flags()&EPinningPagesReserved;
       
  1585 		r = pinArgs.AllocReplacementPages(aCount);
       
  1586 		if(r!=KErrNone)
       
  1587 			return r;
       
  1588 		}
       
  1589 
       
  1590 	// link into memory object...
       
  1591 	r = aMemory->AddMapping(this);
       
  1592 	if(r==KErrNone)
       
  1593 		{
       
  1594 		// pin pages if needed...
       
  1595 		if(IsPinned())
       
  1596 			r = DoPin(pinArgs);
       
  1597 
       
  1598 		// add pages to this mapping...
       
  1599 		if(r==KErrNone)
       
  1600 			r = DoMap();
       
  1601 
       
  1602 		// revert if error...
       
  1603 		if(r!=KErrNone)
       
  1604 			Detach();
       
  1605 		}
       
  1606 
       
  1607 	// free any left over pinning pages...
       
  1608 	pinArgs.FreeReplacementPages();
       
  1609 
       
  1610 	return r;
       
  1611 	}
       
  1612 
       
  1613 
       
  1614 void DMemoryMappingBase::Detach()
       
  1615 	{
       
  1616 	TRACE(("DMemoryMappingBase[0x%08x]::Detach()",this));
       
  1617 	__NK_ASSERT_DEBUG(IsAttached());
       
  1618 
       
  1619 	// set EDetaching flag, which prevents anyone modifying pages in this
       
  1620 	// mapping, except to remove them...
       
  1621 	MmuLock::Lock();
       
  1622 	__e32_atomic_ior_ord16(&Flags(), (TUint16)EDetaching);
       
  1623 	MmuLock::Unlock();
       
  1624 
       
  1625 	// remove all pages from this mapping...
       
  1626 	DoUnmap();
       
  1627 
       
  1628 	// unpin pages if needed...
       
  1629 	TPinArgs pinArgs;
       
  1630 	if(IsPinned())
       
  1631 		DoUnpin(pinArgs);
       
  1632 
       
  1633 	// unlink from memory object...
       
  1634 	iMemory->RemoveMapping(this);
       
  1635 
       
  1636 	// free any spare pages produced by unpinning...
       
  1637 	pinArgs.FreeReplacementPages();
       
  1638 	}
       
  1639 
       
  1640 
       
  1641 TInt DMemoryMappingBase::DoPin(TPinArgs& aPinArgs)
       
  1642 	{
       
  1643 	DMemoryObject* memory = Memory(true); // safe because we're called from code which has added mapping to memory
       
  1644 	return memory->iManager->Pin(memory,this,aPinArgs);
       
  1645 	}
       
  1646 
       
  1647 
       
  1648 void DMemoryMappingBase::DoUnpin(TPinArgs& aPinArgs)
       
  1649 	{
       
  1650 	DMemoryObject* memory = Memory(true); // safe because we're called from code which will be removing this mapping from memory afterwards
       
  1651 	memory->iManager->Unpin(memory,this,aPinArgs);
       
  1652 	}
       
  1653 
       
  1654 
       
  1655 void DMemoryMappingBase::LinkToMemory(DMemoryObject* aMemory, TMappingList& aMappingList)
       
  1656 	{
       
  1657 	TRACE(("DMemoryMappingBase[0x%08x]::LinkToMemory(0x%08x,?)",this,aMemory));
       
  1658 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
       
  1659 	__NK_ASSERT_DEBUG(aMappingList.LockIsHeld());
       
  1660 	__NK_ASSERT_ALWAYS(!IsAttached());
       
  1661 	__NK_ASSERT_DEBUG(!BeingDetached());
       
  1662 	aMappingList.Add(this);
       
  1663 	iMemory = aMemory;
       
  1664 	iMemory->SetMappingAddedFlag();
       
  1665 	}
       
  1666 
       
  1667 
       
  1668 void DMemoryMappingBase::UnlinkFromMemory(TMappingList& aMappingList)
       
  1669 	{
       
  1670 	TRACE(("DMemoryMappingBase[0x%08x]::UnlinkMapping(?)",this));
       
  1671 
       
  1672 	// unlink...
       
  1673 	MmuLock::Lock();
       
  1674 	aMappingList.Lock();
       
  1675 	__NK_ASSERT_DEBUG(IsAttached());
       
  1676 	__NK_ASSERT_DEBUG(BeingDetached());
       
  1677 	aMappingList.Remove(this);
       
  1678 	DMemoryObject* memory = iMemory;
       
  1679 	iMemory = 0;
       
  1680 	aMappingList.Unlock();
       
  1681 	MmuLock::Unlock();
       
  1682 
       
  1683 	// if mapping had vetoed any page decommits...
       
  1684 	if(Flags()&DMemoryMapping::EPageUnmapVetoed)
       
  1685 		{
       
  1686 		// then queue cleanup of decommitted pages...
       
  1687 		memory->iManager->QueueCleanup(memory,DMemoryManager::ECleanupDecommitted);
       
  1688 		}
       
  1689 	}
       
  1690 
       
  1691 
       
  1692 
       
  1693 //
       
  1694 // Debug
       
  1695 //
       
  1696 
       
  1697 void DMemoryMappingBase::Dump()
       
  1698 	{
       
  1699 #ifdef _DEBUG
       
  1700 	Kern::Printf("DMemoryMappingBase[0x%08x]::Dump()",this);
       
  1701 	Kern::Printf("  IsAttached() = %d",(bool)IsAttached());
       
  1702 	Kern::Printf("  iMemory = 0x%08x",iMemory);
       
  1703 	Kern::Printf("  iStartIndex = 0x%x",iStartIndex);
       
  1704 	Kern::Printf("  iSizeInPages = 0x%x",iSizeInPages);
       
  1705 	Kern::Printf("  Flags() = 0x%x",Flags());
       
  1706 	Kern::Printf("  PteType() = 0x%x",PteType());
       
  1707 #endif // _DEBUG
       
  1708 	}
       
  1709 
       
  1710 
       
  1711 void DMemoryMapping::Dump()
       
  1712 	{
       
  1713 #ifdef _DEBUG
       
  1714 	Kern::Printf("DMemoryMapping[0x%08x]::Dump()",this);
       
  1715 	Kern::Printf("  Base() = 0x08%x",iLinAddrAndOsAsid&~KPageMask);
       
  1716 	Kern::Printf("  OsAsid() = %d",iLinAddrAndOsAsid&KPageMask);
       
  1717 	Kern::Printf("  iBlankPde = 0x%08x",iBlankPde);
       
  1718 	Kern::Printf("  iBlankPte = 0x%08x",iBlankPte);
       
  1719 	Kern::Printf("  iAllocatedLinAddrAndOsAsid = 0x%08x",iAllocatedLinAddrAndOsAsid);
       
  1720 	Kern::Printf("  iAllocatedSize = 0x%x",iAllocatedSize);
       
  1721 	DMemoryMappingBase::Dump();
       
  1722 #endif // _DEBUG
       
  1723 	}
       
  1724 
       
  1725 
       
  1726 void DVirtualPinMapping::Dump()
       
  1727 	{
       
  1728 #ifdef _DEBUG
       
  1729 	Kern::Printf("DVirtualPinMapping[0x%08x]::Dump()",this);
       
  1730 	Kern::Printf("  iMaxCount = %d",iMaxCount);
       
  1731 	Kern::Printf("  iNumPinnedPageTables = %d",iNumPinnedPageTables);
       
  1732 	DMemoryMappingBase::Dump();
       
  1733 #endif // _DEBUG
       
  1734 	}
       
  1735