kernel/eka/memmodel/epoc/flexible/mcodeseg.cpp
changeset 0 a41df078684a
child 4 56f325a607ea
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 1995-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 //
       
    15 
       
    16 #include <memmodel.h>
       
    17 #include "mmu/mm.h"
       
    18 #include "mmboot.h"
       
    19 #include "mmu/mcodepaging.h"
       
    20 
       
    21 #include "cache_maintenance.h"
       
    22 
       
    23 
       
    24 DCodeSeg* M::NewCodeSeg(TCodeSegCreateInfo&)
       
    25 	{
       
    26 	__KTRACE_OPT(KDLL,Kern::Printf("M::NewCodeSeg"));
       
    27 	return new DMemModelCodeSeg;
       
    28 	}
       
    29 
       
    30 
       
    31 //
       
    32 // DMemModelCodeSegMemory
       
    33 //
       
    34 
       
    35 DEpocCodeSegMemory* DEpocCodeSegMemory::New(DEpocCodeSeg* aCodeSeg)
       
    36 	{
       
    37 	return new DMemModelCodeSegMemory(aCodeSeg);
       
    38 	}
       
    39 
       
    40 
       
    41 DMemModelCodeSegMemory::DMemModelCodeSegMemory(DEpocCodeSeg* aCodeSeg)
       
    42 	: DEpocCodeSegMemory(aCodeSeg)
       
    43 	{
       
    44 	}
       
    45 
       
    46 
       
    47 TInt DMemModelCodeSegMemory::Create(TCodeSegCreateInfo& aInfo, DMemModelProcess* aProcess)
       
    48 	{
       
    49 	TInt r;
       
    50 
       
    51 	TUint codePageCount;
       
    52 	TUint dataPageCount;
       
    53 	TBool isDemandPaged;
       
    54 	if(!aInfo.iUseCodePaging)
       
    55 		{
       
    56 		isDemandPaged = 0;
       
    57 		codePageCount = MM::RoundToPageCount(iRamInfo.iCodeSize+iRamInfo.iDataSize);
       
    58 		dataPageCount = 0;
       
    59 		}
       
    60 	else
       
    61 		{
       
    62 		isDemandPaged = 1;
       
    63 		codePageCount = MM::RoundToPageCount(iRamInfo.iCodeSize);
       
    64 		dataPageCount = MM::RoundToPageCount(iRamInfo.iDataSize);
       
    65 
       
    66 		iDataSectionMemory = Kern::Alloc(iRamInfo.iDataSize);
       
    67 		if(!iDataSectionMemory)
       
    68 			return KErrNoMemory;
       
    69 		}
       
    70 
       
    71 	iCodeSeg->iSize = codePageCount<<KPageShift;
       
    72 
       
    73 	// allocate virtual address for code to run at...
       
    74 	const TUint codeSize = codePageCount<<KPageShift;
       
    75 	if(iCodeSeg->IsExe())
       
    76 		{// Get the os asid without opening a reference on it as aProcess isn't fully 
       
    77 		// created yet so won't free its os asid.
       
    78 		r = MM::VirtualAlloc(aProcess->OsAsid(),iRamInfo.iCodeRunAddr,codeSize,isDemandPaged);
       
    79 		if(r!=KErrNone)
       
    80 			return r;
       
    81 		aProcess->iCodeVirtualAllocSize = codeSize;
       
    82 		aProcess->iCodeVirtualAllocAddress = iRamInfo.iCodeRunAddr;
       
    83 		iCodeSeg->iAttr |= ECodeSegAttAddrNotUnique;
       
    84 		}
       
    85 	else
       
    86 		{
       
    87 		r = MM::VirtualAllocCommon(iRamInfo.iCodeRunAddr,codeSize,isDemandPaged);
       
    88 		if(r!=KErrNone)
       
    89 			return r;
       
    90 		iVirtualAllocCommonSize = codeSize;
       
    91 		}
       
    92 
       
    93 	// create memory object for codeseg...
       
    94 	if(isDemandPaged)
       
    95 		{
       
    96 		// create memory object...
       
    97 		r = MM::PagedCodeNew(iCodeMemoryObject, codePageCount, iPagedCodeInfo);
       
    98 		if(r!=KErrNone)
       
    99 			return r;
       
   100 
       
   101 		// get file blockmap for codeseg contents...
       
   102 		r = iPagedCodeInfo->ReadBlockMap(aInfo);
       
   103 		if (r != KErrNone)
       
   104 			return r;
       
   105 		}
       
   106 	else
       
   107 		{
       
   108 		// create memory object...
       
   109 		TMemoryCreateFlags flags = (TMemoryCreateFlags)(EMemoryCreateNoWipe | EMemoryCreateAllowExecution);
       
   110 		r = MM::MemoryNew(iCodeMemoryObject, EMemoryObjectMovable, codePageCount, flags);
       
   111 		if(r!=KErrNone)
       
   112 			return r;
       
   113 
       
   114 		// commit memory...
       
   115 		r = MM::MemoryAlloc(iCodeMemoryObject,0,codePageCount);
       
   116 		if(r!=KErrNone)
       
   117 			return r;
       
   118 		}
       
   119 
       
   120 	// create a mapping of the memory for the loader...
       
   121 	// No need to open reference on os asid it is the current thread/process's.
       
   122 	DMemModelProcess* pP = (DMemModelProcess*)TheCurrentThread->iOwningProcess;
       
   123 	r = MM::MappingNew(iCodeLoadMapping,iCodeMemoryObject,EUserReadWrite,pP->OsAsid());
       
   124 	if(r!=KErrNone)
       
   125 		return r;
       
   126 
       
   127 	iRamInfo.iCodeLoadAddr = MM::MappingBase(iCodeLoadMapping);
       
   128 
       
   129 	// work out where the loader is to put the loaded data section...
       
   130 	TInt loadSize = iRamInfo.iCodeSize; // size of memory filled by loader
       
   131 	if(iRamInfo.iDataSize)
       
   132 		{
       
   133 		if(!dataPageCount)
       
   134 			{
       
   135 			// data loaded immediately after code...
       
   136 			iRamInfo.iDataLoadAddr = iRamInfo.iCodeLoadAddr+iRamInfo.iCodeSize;
       
   137 			loadSize += iRamInfo.iDataSize;
       
   138 			}
       
   139 		else
       
   140 			{
       
   141 			// create memory object for data...
       
   142 			DMemoryObject* dataMemory;
       
   143 			r = MM::MemoryNew(dataMemory, EMemoryObjectMovable, dataPageCount, EMemoryCreateNoWipe);
       
   144 			if(r!=KErrNone)
       
   145 				return r;
       
   146 
       
   147 			// commit memory...
       
   148 			r = MM::MemoryAlloc(dataMemory,0,dataPageCount);
       
   149 			if(r==KErrNone)
       
   150 				{
       
   151 				// create a mapping of the memory for the loader...
       
   152 				// No need to open reference on os asid it is the current thread/process's.
       
   153 				r = MM::MappingNew(iDataLoadMapping,dataMemory,EUserReadWrite,pP->OsAsid());
       
   154 				}
       
   155 
       
   156 			if(r!=KErrNone)
       
   157 				{
       
   158 				MM::MemoryDestroy(dataMemory);
       
   159 				return r;
       
   160 				}
       
   161 
       
   162 			iRamInfo.iDataLoadAddr = MM::MappingBase(iDataLoadMapping);
       
   163 			}
       
   164 		}
       
   165 
       
   166 	if(!isDemandPaged)
       
   167 		{
       
   168 		// wipe memory that the loader wont fill...
       
   169 		UNLOCK_USER_MEMORY();
       
   170 		memset((TAny*)(iRamInfo.iCodeLoadAddr+loadSize), 0x03, codeSize-loadSize);
       
   171 		LOCK_USER_MEMORY();
       
   172 		}
       
   173 
       
   174 	// done...
       
   175 	iCreator = pP;
       
   176 	
       
   177 	return KErrNone;
       
   178 	}
       
   179 
       
   180 
       
   181 TInt DMemModelCodeSegMemory::Loaded(TCodeSegCreateInfo& aInfo)
       
   182 	{
       
   183 	if(iPagedCodeInfo)
       
   184 		{
       
   185 		// get information needed to fixup code for it's run address...
       
   186 		TInt r = iPagedCodeInfo->ReadFixupTables(aInfo);
       
   187 		if(r!=KErrNone)
       
   188 			return r;
       
   189 		MM::PagedCodeLoaded(iCodeMemoryObject, iRamInfo.iCodeLoadAddr);
       
   190 		}
       
   191 	else
       
   192 		{
       
   193 		// make code visible to instruction cache...
       
   194 		UNLOCK_USER_MEMORY();
       
   195 		CacheMaintenance::CodeChanged(iRamInfo.iCodeLoadAddr, iRamInfo.iCodeSize);
       
   196 		LOCK_USER_MEMORY();
       
   197 		}
       
   198 
       
   199 	// adjust iDataLoadAddr to point to address contents for initial data section
       
   200 	// in running process...
       
   201 	if(iRamInfo.iDataLoadAddr)
       
   202 		{
       
   203 		TAny* dataSection = iDataSectionMemory;
       
   204 		if(dataSection)
       
   205 			{
       
   206 			// contents for initial data section to be stored in iDataSectionMemory...
       
   207 			UNLOCK_USER_MEMORY();
       
   208 			memcpy(dataSection,(TAny*)iRamInfo.iDataLoadAddr,iRamInfo.iDataSize);
       
   209 			LOCK_USER_MEMORY();
       
   210 			iRamInfo.iDataLoadAddr = (TLinAddr)dataSection;
       
   211 			}
       
   212 		else
       
   213 			{
       
   214 			// contents for initial data section stored after code...
       
   215 			__NK_ASSERT_DEBUG(iRamInfo.iDataLoadAddr==iRamInfo.iCodeLoadAddr+iRamInfo.iCodeSize); // check data loaded at end of code
       
   216 			iRamInfo.iDataLoadAddr = iRamInfo.iCodeRunAddr+iRamInfo.iCodeSize;
       
   217 			}
       
   218 		}
       
   219 
       
   220 	// copy export directory (this will now have fixups applied)...
       
   221 	TInt exportDirSize = iRamInfo.iExportDirCount * sizeof(TLinAddr);
       
   222 	if(exportDirSize > 0 || (exportDirSize==0 && (iCodeSeg->iAttr&ECodeSegAttNmdExpData)) )
       
   223 		{
       
   224 		exportDirSize += sizeof(TLinAddr);
       
   225 		TLinAddr* expDir = (TLinAddr*)Kern::Alloc(exportDirSize);
       
   226 		if(!expDir)
       
   227 			return KErrNoMemory;
       
   228 		iCopyOfExportDir = expDir;
       
   229 		TLinAddr expDirLoad = iRamInfo.iExportDir-iRamInfo.iCodeRunAddr+iRamInfo.iCodeLoadAddr;
       
   230 		memcpy(expDir,(TAny*)(expDirLoad-sizeof(TLinAddr)),exportDirSize);
       
   231 		}
       
   232 
       
   233 	// unmap code from loading process...
       
   234 	DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
       
   235 	__ASSERT_ALWAYS(iCreator==pP, MM::Panic(MM::ECodeSegLoadedNotCreator));
       
   236 	MM::MappingDestroy(iCodeLoadMapping);
       
   237 	MM::MappingAndMemoryDestroy(iDataLoadMapping);
       
   238 	iCreator=NULL;
       
   239 
       
   240 	// Mark the code memory object read only to prevent malicious code modifying it.
       
   241 	TInt r = MM::MemorySetReadOnly(iCodeMemoryObject);
       
   242 	__ASSERT_ALWAYS(r == KErrNone, MM::Panic(MM::ECodeSegSetReadOnlyFailure));
       
   243 
       
   244 	return KErrNone;
       
   245 	}
       
   246 
       
   247 
       
   248 void DMemModelCodeSegMemory::Destroy()
       
   249 	{
       
   250 	MM::MappingDestroy(iCodeLoadMapping);
       
   251 	MM::MappingAndMemoryDestroy(iDataLoadMapping);
       
   252 	}
       
   253 
       
   254 
       
   255 DMemModelCodeSegMemory::~DMemModelCodeSegMemory()
       
   256 	{
       
   257 	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSegMemory::~DMemModelCodeSegMemory %x", this));
       
   258 	__NK_ASSERT_DEBUG(iAccessCount==0);
       
   259 
       
   260 	MM::MappingDestroy(iCodeLoadMapping);
       
   261 	MM::MappingAndMemoryDestroy(iDataLoadMapping);
       
   262 	MM::MemoryDestroy(iCodeMemoryObject);
       
   263 
       
   264 	if(iVirtualAllocCommonSize)
       
   265 		MM::VirtualFreeCommon(iRamInfo.iCodeRunAddr, iVirtualAllocCommonSize);
       
   266 
       
   267 	Kern::Free(iCopyOfExportDir);
       
   268 	Kern::Free(iDataSectionMemory);
       
   269 	}
       
   270 
       
   271 
       
   272 //
       
   273 // DMemModelCodeSeg
       
   274 //
       
   275 
       
   276 DMemModelCodeSeg::DMemModelCodeSeg()
       
   277 	{
       
   278 	}
       
   279 
       
   280 
       
   281 DMemModelCodeSeg::~DMemModelCodeSeg()
       
   282 	{
       
   283 	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::Destruct %C", this));
       
   284 	DCodeSeg::Wait();
       
   285 
       
   286 	MM::MappingDestroy(iCodeLoadMapping);
       
   287 	MM::MappingDestroy(iCodeGlobalMapping);
       
   288 	MM::MemoryDestroy(iCodeMemoryObject);
       
   289 
       
   290 	if(Memory())
       
   291 		Memory()->Destroy();
       
   292 
       
   293 	if(iDataAllocSize)
       
   294 		MM::VirtualFreeCommon(iDataAllocBase,iDataAllocSize);
       
   295 
       
   296 	DCodeSeg::Signal();
       
   297 
       
   298 	Kern::Free(iKernelData);
       
   299 
       
   300 	DEpocCodeSeg::Destruct();
       
   301 	}
       
   302 
       
   303 
       
   304 TInt DMemModelCodeSeg::DoCreateRam(TCodeSegCreateInfo& aInfo, DProcess* aProcess)
       
   305 	{
       
   306 	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::DoCreateRam %C", this));
       
   307 
       
   308 	SRamCodeInfo& ri = RamInfo();
       
   309 	iSize = MM::RoundToPageSize(ri.iCodeSize+ri.iDataSize);
       
   310 	if (iSize==0)
       
   311 		return KErrCorrupt;
       
   312 
       
   313 	TBool kernel = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
       
   314 //	TBool user_global = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal );
       
   315 	TBool user_local = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == 0 );
       
   316 
       
   317 	TUint total_data_size = ri.iDataSize+ri.iBssSize;
       
   318 
       
   319 	if(user_local)
       
   320 		{
       
   321 		// setup paging attribute for code...
       
   322 		if(aInfo.iUseCodePaging)
       
   323 			iAttr |= ECodeSegAttCodePaged;
       
   324 
       
   325 		if(total_data_size && !IsExe())
       
   326 			{
       
   327 			// setup paging attribute for data section...
       
   328 			if(aInfo.iUseCodePaging)
       
   329 				if(K::MemModelAttributes & EMemModelAttrDataPaging)
       
   330 					iAttr |= ECodeSegAttDataPaged;
       
   331 
       
   332 			// allocate virtual address for data section...
       
   333 			TInt r = MM::VirtualAllocCommon(iDataAllocBase,total_data_size,iAttr&ECodeSegAttDataPaged);
       
   334 			if(r!=KErrNone)
       
   335 				return r;
       
   336 			iDataAllocSize = total_data_size;
       
   337 			ri.iDataRunAddr = iDataAllocBase;
       
   338 			}
       
   339 
       
   340 		// create DCodeSegMemory for RAM loaded user local code...
       
   341 		TInt r = Memory()->Create(aInfo,(DMemModelProcess*)aProcess);
       
   342 
       
   343 #ifdef BTRACE_FLEXIBLE_MEM_MODEL
       
   344 		if (r == KErrNone)
       
   345 			{
       
   346 			BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsCodeSeg,Memory()->iCodeMemoryObject,this);
       
   347 			}
       
   348 #endif
       
   349 		
       
   350 		return r;
       
   351 		}
       
   352 
       
   353 	// kernel or user-global code...
       
   354 
       
   355 	// create memory object for codeseg...
       
   356 	TMemoryCreateFlags flags = EMemoryCreateAllowExecution;
       
   357 	if(kernel)
       
   358 		{
       
   359 		flags = (TMemoryCreateFlags)(flags|EMemoryCreateNoWipe);
       
   360 		}
       
   361 	TInt r = MM::MemoryNew(iCodeMemoryObject, EMemoryObjectMovable, MM::BytesToPages(iSize), flags);
       
   362 	if(r!=KErrNone)
       
   363 		return r;
       
   364 
       
   365 	// commit memory...
       
   366 	r = MM::MemoryAlloc(iCodeMemoryObject,0,MM::BytesToPages(iSize));
       
   367 	if(r!=KErrNone)
       
   368 		return r;
       
   369 
       
   370 	// create a mapping of the memory for the loader...
       
   371 	// No need to open reference on os asid it is the current thread/process's.
       
   372 	DMemModelProcess* pP = (DMemModelProcess*)TheCurrentThread->iOwningProcess;
       
   373 	r = MM::MappingNew(iCodeLoadMapping,iCodeMemoryObject,EUserReadWrite,pP->OsAsid());
       
   374 	if(r!=KErrNone)
       
   375 		return r;
       
   376 	ri.iCodeLoadAddr = MM::MappingBase(iCodeLoadMapping);
       
   377 
       
   378 	// create a global mapping of the memory for the codeseg to run at...
       
   379 	r = MM::MappingNew(iCodeGlobalMapping,iCodeMemoryObject,kernel?ESupervisorExecute:EUserExecute,KKernelOsAsid);
       
   380 	if(r!=KErrNone)
       
   381 		return r;
       
   382 	ri.iCodeRunAddr = MM::MappingBase(iCodeGlobalMapping);
       
   383 
       
   384 	if(kernel)
       
   385 		{
       
   386 		// setup data section memory...
       
   387 		if (ri.iDataSize)
       
   388 			ri.iDataLoadAddr = ri.iCodeLoadAddr+ri.iCodeSize;
       
   389 		if (total_data_size)
       
   390 			{
       
   391 			iKernelData = Kern::Alloc(total_data_size);
       
   392 			if (!iKernelData)
       
   393 				return KErrNoMemory;
       
   394 			ri.iDataRunAddr = (TLinAddr)iKernelData;
       
   395 			}
       
   396 		}
       
   397 	else
       
   398 		{
       
   399 		// we don't allow static data in global code...
       
   400 		ri.iDataLoadAddr = 0;
       
   401 		ri.iDataRunAddr = 0;
       
   402 		}
       
   403 
       
   404 #ifdef BTRACE_FLEXIBLE_MEM_MODEL
       
   405 	BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsCodeSeg,iCodeMemoryObject,this);
       
   406 #endif
       
   407 
       
   408 	// done...
       
   409 	return KErrNone;
       
   410 	}
       
   411 
       
   412 
       
   413 TInt DMemModelCodeSeg::DoCreateXIP(DProcess* aProcess)
       
   414 	{
       
   415 //	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::DoCreateXIP %C proc %O", this, aProcess));
       
   416 	return KErrNone;
       
   417 	}
       
   418 
       
   419 
       
   420 TInt DMemModelCodeSeg::Loaded(TCodeSegCreateInfo& aInfo)
       
   421 	{
       
   422 	if(iXIP)
       
   423 		return DEpocCodeSeg::Loaded(aInfo);
       
   424 
       
   425 	TBool kernel = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
       
   426 	TBool user_global = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal );
       
   427 	TBool user_local = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == 0 );
       
   428 	if(user_local)
       
   429 		{
       
   430 		TInt r = Memory()->Loaded(aInfo);
       
   431 		if(r!=KErrNone)
       
   432 			return r;
       
   433 		}
       
   434 	else if((kernel && iExeCodeSeg!=this) || user_global)
       
   435 		{
       
   436 		// user-global or kernel code...
       
   437 		SRamCodeInfo& ri = RamInfo();
       
   438 		UNLOCK_USER_MEMORY();
       
   439 		CacheMaintenance::CodeChanged(ri.iCodeLoadAddr, ri.iCodeSize);
       
   440 		LOCK_USER_MEMORY();
       
   441 		MM::MappingDestroy(iCodeLoadMapping);
       
   442 		// adjust iDataLoadAddr to point to address contents for initial data section
       
   443 		// in running process...
       
   444 		if(ri.iDataLoadAddr)
       
   445 			ri.iDataLoadAddr = ri.iCodeRunAddr+ri.iCodeSize;
       
   446 
       
   447 		// Mark the code memory object read only to prevent malicious code modifying it.
       
   448 		TInt r = MM::MemorySetReadOnly(iCodeMemoryObject);
       
   449 		__ASSERT_ALWAYS(r == KErrNone, MM::Panic(MM::ECodeSegSetReadOnlyFailure));
       
   450 		}
       
   451 	return DEpocCodeSeg::Loaded(aInfo);
       
   452 	}
       
   453 
       
   454 
       
   455 void DMemModelCodeSeg::ReadExportDir(TUint32* aDest)
       
   456 	{
       
   457 	__KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::ReadExportDir %C %08x",this, aDest));
       
   458 
       
   459 	if(!iXIP)
       
   460 		{
       
   461 		// This is not XIP code so the loader can't access the export directory. 
       
   462 		if (Memory()->iCopyOfExportDir)
       
   463 			{// This must be local user side code.
       
   464 			__NK_ASSERT_DEBUG((iAttr & (ECodeSegAttKernel|ECodeSegAttGlobal)) == 0);
       
   465 			// Copy the kernel's copy of the export directory for this code seg to the loader's buffer.
       
   466 			SRamCodeInfo& ri = RamInfo();
       
   467 			TInt size = (ri.iExportDirCount + 1) * sizeof(TLinAddr);
       
   468 			kumemput(aDest, Memory()->iCopyOfExportDir, size);
       
   469 			}
       
   470 		else
       
   471 			{// This must be kernel side code.
       
   472 			__NK_ASSERT_DEBUG((iAttr & (ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel);
       
   473 			// Copy the export directory for this code seg to the loader's buffer.
       
   474 			SRamCodeInfo& ri = RamInfo();
       
   475 			TInt size = (ri.iExportDirCount + 1) * sizeof(TLinAddr);
       
   476 			TAny* expDirLoad = (TAny*)(ri.iExportDir - sizeof(TLinAddr));
       
   477 			kumemput(aDest, expDirLoad, size);
       
   478 			}
       
   479 		}
       
   480 	}
       
   481 
       
   482 
       
   483 TBool DMemModelCodeSeg::OpenCheck(DProcess* aProcess)
       
   484 	{
       
   485 	return FindCheck(aProcess);
       
   486 	}
       
   487 
       
   488 
       
   489 TBool DMemModelCodeSeg::FindCheck(DProcess* aProcess)
       
   490 	{
       
   491 	__KTRACE_OPT(KDLL,Kern::Printf("CSEG:%08x Compat? proc=%O",this,aProcess));
       
   492 	if (aProcess)
       
   493 		{
       
   494 		DMemModelProcess& p=*(DMemModelProcess*)aProcess;
       
   495 		DCodeSeg* pPSeg=p.CodeSeg();
       
   496 		if (iAttachProcess && iAttachProcess!=aProcess)
       
   497 			return EFalse;
       
   498 		if (iExeCodeSeg && iExeCodeSeg!=pPSeg)
       
   499 			return EFalse;
       
   500 		}
       
   501 	return ETrue;
       
   502 	}
       
   503 
       
   504 
       
   505 void DMemModelCodeSeg::BTracePrime(TInt aCategory)
       
   506 	{
       
   507 	DCodeSeg::BTracePrime(aCategory);
       
   508 
       
   509 #ifdef BTRACE_FLEXIBLE_MEM_MODEL
       
   510 	if (aCategory == BTrace::EFlexibleMemModel || aCategory == -1)
       
   511 		{
       
   512 		// code seg mutex is held here, so memory objects cannot be destroyed
       
   513 		DMemModelCodeSegMemory* codeSegMemory = Memory();
       
   514 		if (codeSegMemory)
       
   515 			{
       
   516 			if (codeSegMemory->iCodeMemoryObject)
       
   517 				{
       
   518 				BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsCodeSeg,Memory()->iCodeMemoryObject,this);
       
   519 				}
       
   520 			}
       
   521 		else
       
   522 			{
       
   523 			if (iCodeMemoryObject)
       
   524 				{
       
   525 				BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsCodeSeg,iCodeMemoryObject,this);
       
   526 				}
       
   527 			}
       
   528 		}
       
   529 #endif	
       
   530 	}
       
   531 
       
   532 
       
   533 //
       
   534 // TPagedCodeInfo
       
   535 //
       
   536 
       
   537 TPagedCodeInfo::~TPagedCodeInfo()
       
   538 	{
       
   539 	Kern::Free(iCodeRelocTable);
       
   540 	Kern::Free(iCodePageOffsets);
       
   541 	}
       
   542 
       
   543 
       
   544 TInt TPagedCodeInfo::ReadBlockMap(const TCodeSegCreateInfo& aInfo)
       
   545 	{
       
   546 	if(aInfo.iCodeBlockMapEntriesSize <= 0)
       
   547 		return KErrArgument;  // no block map provided
       
   548 
       
   549 	// get compression data...
       
   550 	iCompressionType = aInfo.iCompressionType;
       
   551 	switch(iCompressionType)
       
   552 		{
       
   553 	case KFormatNotCompressed:
       
   554 		__ASSERT_COMPILE(KFormatNotCompressed==0); // Decompress() assumes this
       
   555 		break;
       
   556 
       
   557 	case KUidCompressionBytePair:
       
   558 		{
       
   559 		if(!aInfo.iCodePageOffsets)
       
   560 			return KErrArgument;
       
   561 
       
   562 		TInt pageCount = MM::RoundToPageCount(aInfo.iCodeSize);
       
   563 
       
   564 		TInt size = sizeof(TInt32) * (pageCount + 1);
       
   565 		iCodePageOffsets = (TInt32*)Kern::Alloc(size);
       
   566 		if(!iCodePageOffsets)
       
   567 			return KErrNoMemory;
       
   568 		kumemget32(iCodePageOffsets, aInfo.iCodePageOffsets, size);
       
   569 
       
   570 #ifdef __DUMP_BLOCKMAP_INFO
       
   571 		Kern::Printf("CodePageOffsets:");
       
   572 		for (TInt i = 0 ; i < pageCount + 1 ; ++i)
       
   573 			Kern::Printf("  %08x", iCodePageOffsets[i]);
       
   574 #endif
       
   575 
       
   576 		TInt last = 0;
       
   577 		for(TInt j=0; j<pageCount+1; ++j)
       
   578 			{
       
   579 			if(iCodePageOffsets[j] < last ||
       
   580 				iCodePageOffsets[j] > (aInfo.iCodeLengthInFile + aInfo.iCodeStartInFile))
       
   581 				{
       
   582 				__NK_ASSERT_DEBUG(0);
       
   583 				return KErrCorrupt;
       
   584 				}
       
   585 			last = iCodePageOffsets[j];
       
   586 			}
       
   587 		}
       
   588 		break;
       
   589 
       
   590 	default:
       
   591 		return KErrNotSupported;
       
   592 		}
       
   593 
       
   594 	// Copy block map data itself...
       
   595 
       
   596 #ifdef __DUMP_BLOCKMAP_INFO
       
   597 	Kern::Printf("Original block map");
       
   598 	Kern::Printf("  block granularity: %d", aInfo.iCodeBlockMapCommon.iBlockGranularity);
       
   599 	Kern::Printf("  block start offset: %x", aInfo.iCodeBlockMapCommon.iBlockStartOffset);
       
   600 	Kern::Printf("  start block address: %016lx", aInfo.iCodeBlockMapCommon.iStartBlockAddress);
       
   601 	Kern::Printf("  local drive number: %d", aInfo.iCodeBlockMapCommon.iLocalDriveNumber);
       
   602 	Kern::Printf("  entry size: %d", aInfo.iCodeBlockMapEntriesSize);
       
   603 #endif
       
   604 
       
   605 	// Find relevant paging device
       
   606 	iCodeLocalDrive = aInfo.iCodeBlockMapCommon.iLocalDriveNumber;
       
   607 	if(TUint(iCodeLocalDrive) >= (TUint)KMaxLocalDrives)
       
   608 		{
       
   609 		__KTRACE_OPT(KPAGING,Kern::Printf("Bad local drive number"));
       
   610 		return KErrArgument;
       
   611 		}
       
   612 
       
   613 	DPagingDevice* device = CodePagingDevice(iCodeLocalDrive);
       
   614 	if(!device)
       
   615 		{
       
   616 		__KTRACE_OPT(KPAGING,Kern::Printf("No paging device installed for drive"));
       
   617 		return KErrNotSupported;
       
   618 		}
       
   619 
       
   620 	// Set code start offset
       
   621 	iCodeStartInFile = aInfo.iCodeStartInFile;
       
   622 	if(iCodeStartInFile < 0)
       
   623 		{
       
   624 		__KTRACE_OPT(KPAGING,Kern::Printf("Bad code start offset"));
       
   625 		return KErrArgument;
       
   626 		}
       
   627 
       
   628 	// Allocate buffer for block map and copy from user-side
       
   629 	TBlockMapEntryBase* buffer = (TBlockMapEntryBase*)Kern::Alloc(aInfo.iCodeBlockMapEntriesSize);
       
   630 	if(!buffer)
       
   631 		return KErrNoMemory;
       
   632 	kumemget32(buffer, aInfo.iCodeBlockMapEntries, aInfo.iCodeBlockMapEntriesSize);
       
   633 
       
   634 #ifdef __DUMP_BLOCKMAP_INFO
       
   635 	Kern::Printf("  entries:");
       
   636 	for (TInt k = 0 ; k < aInfo.iCodeBlockMapEntriesSize / sizeof(TBlockMapEntryBase) ; ++k)
       
   637 		Kern::Printf("    %d: %d blocks at %08x", k, buffer[k].iNumberOfBlocks, buffer[k].iStartBlock);
       
   638 #endif
       
   639 
       
   640 	// Initialise block map
       
   641 	TInt r = iBlockMap.Initialise(aInfo.iCodeBlockMapCommon,
       
   642 								  buffer,
       
   643 								  aInfo.iCodeBlockMapEntriesSize,
       
   644 								  device->iReadUnitShift,
       
   645 								  iCodeStartInFile + aInfo.iCodeLengthInFile);
       
   646 	if(r!=KErrNone)
       
   647 		{
       
   648 		Kern::Free(buffer);
       
   649 		return r;
       
   650 		}
       
   651 
       
   652 #if defined(__DUMP_BLOCKMAP_INFO) && defined(_DEBUG)
       
   653 	iBlockMap.Dump();
       
   654 #endif
       
   655 
       
   656 	iCodeSize = aInfo.iCodeSize;
       
   657 	return KErrNone;
       
   658 	}
       
   659 
       
   660 
       
   661 /**
       
   662 Read code relocation table and import fixup table from user side.
       
   663 */
       
   664 TInt TPagedCodeInfo::ReadFixupTables(const TCodeSegCreateInfo& aInfo)
       
   665 	{
       
   666 	iCodeRelocTableSize = aInfo.iCodeRelocTableSize;
       
   667 	iImportFixupTableSize = aInfo.iImportFixupTableSize;
       
   668 	iCodeDelta = aInfo.iCodeDelta;
       
   669 	iDataDelta = aInfo.iDataDelta;
       
   670 
       
   671 	// round sizes up to four-byte boundaries...
       
   672 	TUint relocSize = (iCodeRelocTableSize + 3) & ~3;
       
   673 	TUint fixupSize = (iImportFixupTableSize + 3) & ~3;
       
   674 
       
   675 	// copy relocs and fixups...
       
   676 	iCodeRelocTable = (TUint8*)Kern::Alloc(relocSize+fixupSize);
       
   677 	if (!iCodeRelocTable)
       
   678 		return KErrNoMemory;
       
   679 	iImportFixupTable = iCodeRelocTable + relocSize;
       
   680 	kumemget32(iCodeRelocTable, aInfo.iCodeRelocTable, relocSize);
       
   681 	kumemget32(iImportFixupTable, aInfo.iImportFixupTable, fixupSize);
       
   682 
       
   683 	return KErrNone;
       
   684 	}
       
   685 
       
   686 
       
   687 void TPagedCodeInfo::ApplyFixups(TLinAddr aBuffer, TUint iIndex)
       
   688 	{
       
   689 //	START_PAGING_BENCHMARK;
       
   690 	
       
   691 	// relocate code...
       
   692 	if(iCodeRelocTableSize)
       
   693 		{
       
   694 		TUint8* codeRelocTable = iCodeRelocTable;
       
   695 		TUint startOffset = ((TUint32*)codeRelocTable)[iIndex];
       
   696 		TUint endOffset = ((TUint32*)codeRelocTable)[iIndex+1];
       
   697 
       
   698 		__KTRACE_OPT(KPAGING, Kern::Printf("Performing code relocation: start == %x, end == %x", startOffset, endOffset));
       
   699 		__ASSERT_ALWAYS(startOffset<=endOffset && endOffset<=iCodeRelocTableSize, K::Fault(K::ECodeSegBadFixupTables));
       
   700 
       
   701 		const TUint32 codeDelta = iCodeDelta;
       
   702 		const TUint32 dataDelta = iDataDelta;
       
   703 
       
   704 		const TUint16* ptr = (const TUint16*)(codeRelocTable + startOffset);
       
   705 		const TUint16* end = (const TUint16*)(codeRelocTable + endOffset);
       
   706 		while(ptr<end)
       
   707 			{
       
   708 			TUint16 entry = *ptr++;
       
   709 			TUint32* addr = (TUint32*)(aBuffer+(entry&0x0fff));
       
   710 			TUint32 word = *addr;
       
   711 #ifdef _DEBUG
       
   712 			TInt type = entry&0xf000;
       
   713 			__NK_ASSERT_DEBUG(type==KTextRelocType || type==KDataRelocType);
       
   714 #endif
       
   715 			if(entry<KDataRelocType)
       
   716 				word += codeDelta;
       
   717 			else
       
   718 				word += dataDelta;
       
   719 			*addr = word;
       
   720 			}
       
   721 		}
       
   722 
       
   723 	// fixup imports...
       
   724 	if(iImportFixupTableSize)
       
   725 		{
       
   726 		TUint8* importFixupTable = iImportFixupTable;
       
   727 		TUint startOffset = ((TUint32*)importFixupTable)[iIndex];
       
   728 		TUint endOffset = ((TUint32*)importFixupTable)[iIndex+1];
       
   729 
       
   730 		__KTRACE_OPT(KPAGING, Kern::Printf("Performing import fixup: start == %x, end == %x", startOffset, endOffset));
       
   731 		__ASSERT_ALWAYS(startOffset<=endOffset && endOffset<=iImportFixupTableSize, K::Fault(K::ECodeSegBadFixupTables));
       
   732 
       
   733 		const TUint16* ptr = (const TUint16*)(importFixupTable + startOffset);
       
   734 		const TUint16* end = (const TUint16*)(importFixupTable + endOffset);
       
   735 
       
   736 		while(ptr<end)
       
   737 			{
       
   738 			TUint16 offset = *ptr++;
       
   739 			TUint32 wordLow = *ptr++;
       
   740 			TUint32 wordHigh = *ptr++;
       
   741 			TUint32 word = (wordHigh << 16) | wordLow;
       
   742 //			__KTRACE_OPT(KPAGING, Kern::Printf("DP: Fixup %08x=%08x", iRamInfo.iCodeRunAddr+(page<<KPageShift)+offset, word));
       
   743 			*(TUint32*)(aBuffer+offset) = word;
       
   744 			}
       
   745 		}
       
   746 	
       
   747 //	END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmFixupCodePage);
       
   748 	}
       
   749 
       
   750