kernel/eka/memmodel/epoc/flexible/mmu/mmapping.cpp
changeset 31 56f325a607ea
parent 0 a41df078684a
child 33 0173bcd7697c
equal deleted inserted replaced
15:4122176ea935 31:56f325a607ea
   102 			return KErrArgument;
   102 			return KErrArgument;
   103 		if((aCount|aIndex)&(KChunkMask>>KPageShift))
   103 		if((aCount|aIndex)&(KChunkMask>>KPageShift))
   104 			return KErrArgument;
   104 			return KErrArgument;
   105 		}
   105 		}
   106 
   106 
   107 	TLinAddr base = iAllocatedLinAddrAndOsAsid&~KPageMask;
   107 	TLinAddr base = iAllocatedLinAddrAndOsAsid & ~KPageMask;
   108 #ifdef _DEBUG
   108 	TLinAddr top = base + (aCount << KPageShift);
   109 	TUint osAsid = iAllocatedLinAddrAndOsAsid&KPageMask;
       
   110 #endif
       
   111 
   109 
   112 	// check user/supervisor memory partitioning...
   110 	// check user/supervisor memory partitioning...
   113 	if(base<KUserMemoryLimit != (bool)(aPermissions&EUser))
   111 	if (aPermissions & EUser)
   114 		return KErrAccessDenied;
   112 		{
   115 
   113 		if (base > KUserMemoryLimit || top > KUserMemoryLimit)
   116 	// check mapping doesn't straddle KGlobalMemoryBase or KUserMemoryLimit...
   114 			return KErrAccessDenied;
   117 	__NK_ASSERT_DEBUG(TUint(KGlobalMemoryBase-base)==0 || TUint(KGlobalMemoryBase-base)>=TUint(aCount<<KPageShift));
   115 		}
   118 	__NK_ASSERT_DEBUG(TUint(KUserMemoryLimit-base)==0 || TUint(KUserMemoryLimit-base)>=TUint(aCount<<KPageShift));
   116 	else
       
   117 		{
       
   118 		if (base < KUserMemoryLimit || top < KUserMemoryLimit)
       
   119 			return KErrAccessDenied;
       
   120 		}
       
   121 
       
   122 	// check that mapping doesn't straddle KUserMemoryLimit or KGlobalMemoryBase ...
       
   123 	__NK_ASSERT_DEBUG((base < KUserMemoryLimit) == (top <= KUserMemoryLimit));
       
   124 	__NK_ASSERT_DEBUG((base < KGlobalMemoryBase) == (top <= KGlobalMemoryBase));
       
   125 
       
   126 	// check that only global memory is mapped into the kernel process
       
   127 	TBool global = base >= KGlobalMemoryBase;
       
   128 	__NK_ASSERT_DEBUG(global || (iAllocatedLinAddrAndOsAsid & KPageMask) != KKernelOsAsid);
   119 
   129 
   120 	// setup attributes...
   130 	// setup attributes...
   121 	TBool global = base>=KGlobalMemoryBase;
       
   122 	__NK_ASSERT_DEBUG(global || osAsid!=(TInt)KKernelOsAsid); // prevent non-global memory in kernel process
       
   123 	PteType() =	Mmu::PteType(aPermissions,global);
   131 	PteType() =	Mmu::PteType(aPermissions,global);
   124 	iBlankPte = Mmu::BlankPte(aMemory->Attributes(),PteType());
   132 	iBlankPte = Mmu::BlankPte(aMemory->Attributes(),PteType());
   125 
   133 
   126 	// setup base address... 
   134 	// setup base address... 
   127 	TUint colourOffset = ((aIndex&KPageColourMask)<<KPageShift);
   135 	TUint colourOffset = ((aIndex&KPageColourMask)<<KPageShift);
   283 			__NK_ASSERT_DEBUG(*pPde==KPdeUnallocatedEntry);
   291 			__NK_ASSERT_DEBUG(*pPde==KPdeUnallocatedEntry);
   284 			}
   292 			}
   285 		else
   293 		else
   286 			{
   294 			{
   287 			TPde pde = Mmu::PageTablePhysAddr(pt)|iBlankPde;
   295 			TPde pde = Mmu::PageTablePhysAddr(pt)|iBlankPde;
       
   296 #ifdef	__USER_MEMORY_GUARDS_ENABLED__
       
   297 			if (IsUserMapping())
       
   298 				pde = PDE_IN_DOMAIN(pde, USER_MEMORY_DOMAIN);
       
   299 #endif
   288 			TRACE2(("!PDE %x=%x (was %x)",pPde,pde,*pPde));
   300 			TRACE2(("!PDE %x=%x (was %x)",pPde,pde,*pPde));
   289 			if (Mmu::PdeMapsSection(*pPde))
   301 			if (Mmu::PdeMapsSection(*pPde))
   290 				{
   302 				{
   291 				// break previous section mapping...
   303 				// break previous section mapping...
   292 				__NK_ASSERT_DEBUG(*pPde==Mmu::PageToSectionEntry(pt[0],iBlankPde));
   304 				__NK_ASSERT_DEBUG(*pPde==Mmu::PageToSectionEntry(pt[0],iBlankPde));
   626 			// setup new page table...
   638 			// setup new page table...
   627 			SPageTableInfo* pti = SPageTableInfo::FromPtPtr(newPt);
   639 			SPageTableInfo* pti = SPageTableInfo::FromPtPtr(newPt);
   628 			pti->SetFine(aAddr&~KChunkMask,iAllocatedLinAddrAndOsAsid&KPageMask);
   640 			pti->SetFine(aAddr&~KChunkMask,iAllocatedLinAddrAndOsAsid&KPageMask);
   629 
   641 
   630 			TPde pde = Mmu::PageTablePhysAddr(newPt)|iBlankPde;
   642 			TPde pde = Mmu::PageTablePhysAddr(newPt)|iBlankPde;
       
   643 #ifdef	__USER_MEMORY_GUARDS_ENABLED__
       
   644 			if (IsUserMapping())
       
   645 				pde = PDE_IN_DOMAIN(pde, USER_MEMORY_DOMAIN);
       
   646 #endif
   631 			TRACE2(("!PDE %x=%x",aPdeAddress,pde));
   647 			TRACE2(("!PDE %x=%x",aPdeAddress,pde));
   632 			__NK_ASSERT_DEBUG(((*aPdeAddress^pde)&~KPdeMatchMask)==0 || *aPdeAddress==KPdeUnallocatedEntry);
   648 			__NK_ASSERT_DEBUG(((*aPdeAddress^pde)&~KPdeMatchMask)==0 || *aPdeAddress==KPdeUnallocatedEntry);
   633 			*aPdeAddress = pde;
   649 			*aPdeAddress = pde;
   634 			SinglePdeUpdated(aPdeAddress);
   650 			SinglePdeUpdated(aPdeAddress);
   635 
   651