kernel/eka/include/kernel/cache_maintenance.h
changeset 43 96e5fb8b040d
child 45 329ab0095843
equal deleted inserted replaced
-1:000000000000 43:96e5fb8b040d
       
     1 // Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // eka\include\kernel\cache_maintenance.h
       
    15 // 
       
    16 // Contains Kernel's internal API for cache maintenance 
       
    17 
       
    18 /**
       
    19  @file
       
    20  @internalComponent
       
    21 */
       
    22 
       
    23 #ifndef __CACHE_MAINTENANCE_H__
       
    24 #define __CACHE_MAINTENANCE_H__
       
    25 
       
    26 #include <e32err.h>
       
    27 #include <nk_cpu.h>
       
    28 
       
    29 #if defined(__CPU_HAS_CACHE)
       
    30 #include <platform.h>
       
    31 #include <mmboot.h>
       
    32 
       
    33 /*
       
    34  * Specifies the number of different cache types/levels in InternalCache class.
       
    35  */
       
    36 #if defined(__CPU_ARMV7)
       
    37 const TInt KNumCacheInfos=3; 	// ICache, DCache_PoC & DCache_PoU
       
    38 #else // defined(__CPU_ARMV7)
       
    39 const TInt KNumCacheInfos=2; 	// ICache & DCache
       
    40 #endif//else defined(__CPU_ARMV7)
       
    41 
       
    42 const TInt KCacheInfoI=0;		// InternalCache info for ICache. On ARMv7, this applies to the point-of-unification.
       
    43 const TInt KCacheInfoD=1;		// InternalCache info for DCache. On ARMv7, this applies to the point-of-coherency.
       
    44 const TInt KCacheInfoD_PoU=2;	// InternalCache info for ARMv7 DCache for the point-of-unification.
       
    45 
       
    46 /* 
       
    47  * Cache info of particular cache type or level.
       
    48  */
       
    49 struct SCacheInfo
       
    50 	{
       
    51 	TUint32 iSize;					// Total size in cache lines
       
    52 	TUint16 iAssoc;					// Associativity
       
    53 	TUint16 iLineLength;			// Line length in bytes. For multilevel cache, this is minimum length.
       
    54 	TUint32 iInvalidateThreshold;	// Size threshold for line-by-line Invalidate (in cache lines)
       
    55 	TUint32 iCleanThreshold;		// Size threshold for line-by-line Clean (in cache lines)
       
    56 	TUint32 iCleanAndInvalidateThreshold;// Size threshold for line-by-line CleanAndInvalidate (in cache lines)
       
    57 #if !defined(__CPU_ARMV7)
       
    58 	TUint iCleanAndInvalidatePtr;	// CleanAndInvalidate pointer
       
    59 	TUint iCleanAndInvalidateMask;	// Mask to wrap CleanAndInvalidate pointer
       
    60 #endif
       
    61 	TUint8 iLineLenLog2;			// log2(iLineLength)
       
    62 	TUint8 iPreemptBlock;			// Number of cache lines to clean before checking for system lock contention
       
    63 	inline TUint InvalidateThresholdBytes()
       
    64 		{ return iInvalidateThreshold<<iLineLenLog2; }
       
    65 	inline TUint CleanThresholdBytes()
       
    66 		{ return iCleanThreshold<<iLineLenLog2; }
       
    67 	inline TUint CleanAndInvalidateThresholdBytes()
       
    68 		{ return iCleanAndInvalidateThreshold<<iLineLenLog2; }
       
    69 	inline TUint InvalidateThresholdPages()
       
    70 		{ return iInvalidateThreshold >> (KPageShift-iLineLenLog2);}
       
    71 	};
       
    72 
       
    73 /*
       
    74  * A set of static utility functions for internal (MMU controlled) cache memory.
       
    75  * Unless  otherwise specified, the following is assumed:
       
    76  *  - All DCache maintenance primitives apply to the Point of Coherency.
       
    77  *  - All ICache maintenance primitives apply to the Point of Unification.
       
    78  *  - Multilevel caches are maintained either:
       
    79  * 			- starting from the level closest to CPU, or
       
    80  * 			- all level are maintained simultaneously. 
       
    81  */
       
    82 class InternalCache
       
    83 	{
       
    84 	friend class CacheMaintenance;
       
    85 	friend class Cache;
       
    86 
       
    87 public:	
       
    88 /*
       
    89  * Initializes internal data structure for different cache types/levels.
       
    90  * Must be called during Init1 boot phase.
       
    91  * @pre 	All internal cache memory is already configured and switched on in bootstrap.
       
    92  * 			Single thread environment is assumed (e.g. during boot time).
       
    93  */
       
    94 	static void Init1();
       
    95 /*
       
    96  * @return MMU's cache type register.
       
    97  */	
       
    98 	static TUint32 TypeRegister();
       
    99 	
       
   100 /*
       
   101  * @return	Internal and external cache attributes (orred TMappingAttributes enums)
       
   102  * 			that match aType memory type.
       
   103  * @panic:	If aType 4-7 is specified on platform with no __CPU_MEMORY_TYPE_REMAPPING.
       
   104  */
       
   105 	static TUint32 TypeToCachingAttributes(TMemoryType aType);
       
   106 
       
   107 #if defined(__CPU_ARMV7)
       
   108 
       
   109 /*
       
   110  * @return MMU's cache level ID register
       
   111  */
       
   112 	static TUint32 LevelIDRegister();
       
   113 
       
   114 /*
       
   115  * @return MMU's cache size ID  register for given cache type & cache level.
       
   116  * @arg aType 0 for data or unified cache, 1 for instruction cache.
       
   117  * @arg aLevel 0-7 where 0 indicates the closest level to CPU.
       
   118  */
       
   119 	static TUint32 SizeIdRegister(TUint32 aType, TUint32 aLevel);
       
   120 
       
   121 #endif //defined(__CPU_ARMV7)
       
   122 
       
   123 #if !defined(__MEMMODEL_MOVING__)
       
   124 // Moving memory model is aware of cache implementation on ARMv5 and does some direct calls to
       
   125 // InternalCache class.	
       
   126 private:	
       
   127 #endif
       
   128 
       
   129 /*
       
   130  * Invalidates a memory region from cache(s) on all the cores. If ICache is specified in aMask,
       
   131  * it also drains branch predictors and instruction pipelines(ISB barrier).
       
   132  * If aSize is bigger than invalidate-threshold of any specified cache, it may clean
       
   133  * and invalidate entire cache. 
       
   134  * @arg See Clean method for details.
       
   135  * 
       
   136  * @note CacheMaintanance assumes that on H/W with NOT (defined(__CPU_ARMV7) && defined(__SMP__),
       
   137  * this will clean and invalidate entire DCache if invalidate threshold is reached.
       
   138  */
       
   139 	static void Invalidate(TUint aMask, TLinAddr aBase, TUint aSize);
       
   140 
       
   141 /*
       
   142  * Drains the buffers in cache memory. On ARMv6 onwards, this operation is known as DSB (Data 
       
   143  * Synchronisation Barrier).
       
   144  * On SMP, only the buffers of the running core are drained. 
       
   145  */
       
   146 	static void DrainBuffers();
       
   147 
       
   148 /*
       
   149  * Holds thresholds, cache line size,... for different types/levels of cache.
       
   150  */ 
       
   151 	static SCacheInfo Info[KNumCacheInfos];
       
   152 
       
   153 private:
       
   154 #if defined(__BROADCAST_CACHE_MAINTENANCE__)
       
   155 
       
   156 //	__BROADCAST_CACHE_MAINTENANCE__ is specified when cache maintenance has to be broadcasted
       
   157 //	across all cores on SMP platforms by software.
       
   158 //	This is only defined on arm11 SMP HW as it doesn't have HW broadcasting any cache maintenance.
       
   159 //	CORTEX_A9 SMP has H/W broadcasting of line-by-line maintenance, while index/way is not used.
       
   160 
       
   161 /*
       
   162  * Cleans a memory region from cache(s) & drain write buffers (DSB barrier)
       
   163  * on a core that executes the call.
       
   164  * @arg See Clean method for other details.
       
   165  */ 	
       
   166 	static void LocalClean(TUint aMask, TLinAddr aBase, TUint aSize);
       
   167 
       
   168 /*
       
   169  * Invalidates a memory region from cache(s) on a core that executes the call.
       
   170  * @arg See Invalidate method for other details.
       
   171  */ 	
       
   172 	static void LocalInvalidate(TUint aMask, TLinAddr aBase, TUint aSize);
       
   173 
       
   174 /*
       
   175  * Cleans and invalidates a memory region from cache(s) & drain write buffers (DSB barrier)
       
   176  * on a core that executes the call.
       
   177  * @arg See CleanAndInvalidate method for details.
       
   178  */	
       
   179 	static void LocalCleanAndInvalidate(TUint aMask, TLinAddr aBase, TUint aSize);
       
   180 
       
   181 #endif //defined(__BROADCAST_CACHE_MAINTENANCE__)
       
   182 
       
   183 /*
       
   184  * Cleans a memory region from cache(s) & drain write buffers (DSB barrier) on all the cores.
       
   185  * If aSize is bigger than clean threshold of any specified cache, it may clean entire cache.
       
   186  * @arg aMask	Specifies which caches to clean by orring KCacheSelectI (for ICache) and
       
   187  * 				KCacheSelectD (for DCache or unified cache).
       
   188  * @arg aBase	Linear address of the start of the region to clean.
       
   189  * @arg aSize	Size of the region in bytes.
       
   190  */ 	
       
   191 	static void Clean(TUint aMask, TLinAddr aBase, TUint aSize);
       
   192 
       
   193 #if defined(__CPU_ARMV7)
       
   194 /*
       
   195  * Cleans a memory region from DCache to the Point of Unification & drains write buffers(DSB barrier)
       
   196  * on all the cores. If aSize is bigger than clean-to-the-point-to-unification threshold, it
       
   197  * may clean the entire cache(s) to the point-of-unification.
       
   198  * @arg See Clean method for details.
       
   199  */
       
   200 	static void CleanPoU(TLinAddr aBase, TUint aSize);
       
   201 #endif	// defined(__CPU_ARMV7)
       
   202 
       
   203 /*
       
   204  * Invalidates a memory region from data and unified cache(s) on all the cores. It either:
       
   205  * 		- starts from the level which is the furthest from CPU, or
       
   206  * 		- invalidates all levels at once.
       
   207  * If aSize is bigger than invalidate-threshold of any specified cache, it may clean
       
   208  * and invalidate the entire cache.
       
   209  * @arg aBase	Linear address of the start of the region to clean.
       
   210  * @arg aSize	Size of the region in bytes.
       
   211  */
       
   212 	static void Invalidate_DCache_Reverse(TLinAddr aBase, TUint aSize);
       
   213 	
       
   214 /*
       
   215  * Cleans and invalidates a memory region from cache(s) & drain write buffers (DSB barrier) on
       
   216  * all the cores. 
       
   217  * If ICache is specified in aMask, it drains branch predictor and instruction pipeline(ISB barrier).
       
   218  * If aSize is bigger than CleanAndInvalidate threshold of any specified cache, it may clean and
       
   219  * invalidate the entire cache(s).
       
   220  * @arg See Clean method for details.
       
   221  */ 	
       
   222 	static void CleanAndInvalidate(TUint aMask, TLinAddr aBase, TUint aSize);
       
   223 	
       
   224 /*
       
   225  * Invalidates a region of memory in instruction cache and drains branch predictor and
       
   226  * instruction pipeline(ISB barrier).
       
   227  * On SMP arm11mpcore, only the running core is maintained.
       
   228  * On SMP ArmV7 onwards, this maintains all the cores. However, ISB barrier applies only
       
   229  * to the running core. The caller must ensure ISB is broadcasted by other maens.
       
   230  * @arg aBase	Linear address of the start of the region.
       
   231  * @arg aSize	Size of the region in bytes.
       
   232  */
       
   233 	static void Invalidate_ICache_Region(TLinAddr aBase, TUint aSize);
       
   234 
       
   235 /*
       
   236  * Invalidates entire content of instruction cache(s) and drains branch predictor and
       
   237  * instruction pipeline(ISB barrier).
       
   238  * On SMP arm11mpcore, only the running core is maintained. 
       
   239  * On SMP ArmV7 onwards, this maintains all the cores. However, ISB barrier applies only
       
   240  * to the running core. The caller must ensure ISB is broadcasted by other maens.
       
   241  */ 
       
   242 	static void Invalidate_ICache_All();
       
   243 
       
   244 /*
       
   245  * Invalidates a region of memory in data and unified cache(s).
       
   246  * On SMP arm11mpcore, only the running core is maintained. 
       
   247  * On SMP ArmV7 onwards, this maintains all the cores.
       
   248  * @arg aBase	Linear address of the start of the region.
       
   249  * @arg aSize	Size of the region in bytes.
       
   250  */
       
   251 	static void Invalidate_DCache_Region(TLinAddr aBase, TUint aSize);
       
   252 
       
   253 /*
       
   254  * Cleans a region of memory in data and unified cache(s) and drains write buffers (DSB barrier).
       
   255  * On SMP arm11mpcore, only the running core is maintained. 
       
   256  * On SMP ArmV7 onwards, this maintains all the cores.
       
   257  * @arg aBase	Linear address of the start of the region.
       
   258  * @arg aSize	The size of the region in bytes.
       
   259  */
       
   260 	static void Clean_DCache_Region(TLinAddr aBase, TUint aSize);
       
   261 
       
   262 #if defined(__CPU_ARMV7)
       
   263 /*
       
   264  * Cleans a region of memory in data and unified cache(s) to the point-of-unification and drains
       
   265  * write buffers (DSB barrier).
       
   266  * On SMP, it maintains all the cores.
       
   267  * @arg aBase	Linear address of the start of the region.
       
   268  * @arg aSize	Size of the region in bytes.
       
   269  */
       
   270 	static void Clean_PoU_DCache_Region(TLinAddr aBase, TUint aSize);
       
   271 #endif  //defined(__CPU_ARMV7)
       
   272 	
       
   273 /*
       
   274  * Cleans the entire content of data and unified caches and drains write buffers (DSB barrier).
       
   275  * On SMP, only the running core is maintained. 
       
   276  */
       
   277 	static void Clean_DCache_All();
       
   278 
       
   279 #if defined(__CPU_ARMV7)
       
   280 /*
       
   281  * Cleans the entire content of data and unified cache(s) to the point-of-unification and drains
       
   282  * write buffers (DSB barrier).
       
   283  * On SMP, only the running core is maintained. 
       
   284  */
       
   285 	static void Clean_PoU_DCache_All();
       
   286 #endif //defined(__CPU_ARMV7)
       
   287 	
       
   288 /*
       
   289  * Cleans and invalidates a region of memory in data and unified cache(s) and drains
       
   290  * write buffers (DSB barrier).
       
   291  * On SMP arm11mpcore, only the running core is maintained. 
       
   292  * On SMP ArmV7 onwards, this maintains all the cores.
       
   293  */
       
   294 	static void CleanAndInvalidate_DCache_Region(TLinAddr aBase, TUint aSize);
       
   295 
       
   296 /*
       
   297  * Cleans and invalidates the entire content of data and unified cache(s) and drains
       
   298  * write buffers (DSB barrier)..
       
   299  * On SMP, only the running core is maintained. 
       
   300  */
       
   301 	static void CleanAndInvalidate_DCache_All();
       
   302 
       
   303 /*
       
   304  * Synchronises a single line of cache(s) for instruction execution.
       
   305  * On SMP, only the running core is maintained.
       
   306  * 
       
   307  * @arg aAddr Virtual address that belongs to the cache line.
       
   308  * 
       
   309  * NOTE: On SMP this is guaranted NOT to broadcast to other cores.
       
   310  * NOTE: It assumes the same line size for ICache and DCache
       
   311  */
       
   312 	static void IMB_CacheLine(TLinAddr aAddr);
       
   313 
       
   314 private:	
       
   315 #if !defined(__CPU_ARMV7)
       
   316 /* 
       
   317  * A primitive that parses the content of cache type MMU register.
       
   318  */
       
   319 	static void ParseCacheSizeInfo(TUint32 aValue, SCacheInfo& aInfo);
       
   320 #endif	
       
   321 	
       
   322 #if defined(__CPU_MEMORY_TYPE_REMAPPING)
       
   323 /* 
       
   324  * @return The content of Primary Region Remap Register.
       
   325  */
       
   326 	static TUint32 PrimaryRegionRemapRegister();
       
   327 
       
   328 /*
       
   329  * @return The content of Normal Memory Remap Register.
       
   330  */
       
   331 	static TUint32 NormalMemoryRemapRegister();
       
   332 #endif // defined(__CPU_MEMORY_TYPE_REMAPPING)
       
   333 	
       
   334 #if defined(__CPU_ARMV7)
       
   335 	static TInt DmaBufferAlignementLog2;	// Holds the alignement requirement for DMA buffers. 
       
   336 #endif
       
   337 	};
       
   338 
       
   339 #ifdef __HAS_EXTERNAL_CACHE__
       
   340 //ARM External Cache register offsets
       
   341 const TUint ARML2C_AuxiliaryControl = 0x104;
       
   342 	const TUint ARML2C_WaySize_Mask = 0xe0000;
       
   343 	const TUint ARML2C_WaySize_Shift = 17;
       
   344 #if defined (__ARM_PL310_CACHE__)
       
   345 	const TUint ARML2C_Assoc_Mask = 0x10000;
       
   346 #else
       
   347 	const TUint ARML2C_Assoc_Mask = 0x1e000;
       
   348 	const TUint ARML2C_Assoc_Shift = 13;
       
   349 #endif
       
   350 	
       
   351 const TUint ARML2C_CacheSync = 0x730;
       
   352 
       
   353 const TUint ARML2C_InvalidateLineByPA = 0x770;
       
   354 const TUint ARML2C_CleanLineByPA = 0x7b0;
       
   355 const TUint ARML2C_CleanInvalidateLineByPA = 0x7f0;
       
   356 
       
   357 const TUint ARML2C_CleanByIndexWay = 0x7b8;
       
   358 const TUint ARML2C_CleanInvalidateByIndexWay = 0x7f8;
       
   359 
       
   360 const TUint ARML2C_CleanByWay = 0x7bc;
       
   361 const TUint ARML2C_InvalidateByWay = 0x77c;
       
   362 const TUint ARML2C_CleanInvalidateByWay = 0x7fc;
       
   363 
       
   364 /*
       
   365  * A set of static utility functions for external cache memory.
       
   366  * The following external cache controllers are supported:
       
   367  * 	- L210
       
   368  *  - L220
       
   369  *  - PL310
       
   370  */
       
   371 class ExternalCache
       
   372 	{
       
   373 	friend class CacheMaintenance;
       
   374 	friend class Cache;
       
   375 
       
   376 public:
       
   377 /*
       
   378  * Initializes internal cache infos. Must be called during Init1 boot phase.
       
   379  * @pre 	External cache controller is already configured and started in bootstrap.
       
   380  * 			Single thread environment is assumed (e.g. during boot time).
       
   381  */
       
   382 	static void Init1();
       
   383 private:
       
   384 /*
       
   385  * Cleans a region of memory in cache and drains its buffers.
       
   386  * If aSize is bigger than clean threshold, it may clean the entire cache.
       
   387  * @arg aBase	Linear address of the start of the region.
       
   388  * @arg aSize	Size of the region in bytes.
       
   389  */
       
   390 	static void Clean(TLinAddr aBase, TUint aSize);
       
   391 /*
       
   392  * Invalidates a region of memory in cache.
       
   393  * If aSize is bigger than invalidate threshold, it may clean and invalidate the entire cache.
       
   394  * @arg aBase	Linear address of the start of the region.
       
   395  * @arg aSize	Size of the region in bytes.
       
   396  */
       
   397 	static void Invalidate(TLinAddr aBase, TUint aSize);
       
   398 
       
   399 /*	
       
   400  * Cleans and invalidates a region of memory in cache and drains its buffers.
       
   401  * If aSize is bigger than clean and invalidate threshold, it may clean and invalidate the entire cache.
       
   402  * @arg aBase	Linear address of the start of the region.
       
   403  * @arg aSize	Size of the region in bytes.
       
   404  */
       
   405 	static void CleanAndInvalidate(TLinAddr aBase, TUint aSize);
       
   406 
       
   407 /*
       
   408  * Cleans a region of contiguous physical memory in cache and drains its buffers.
       
   409  * It doesn't check clean threshold.
       
   410  * @arg aBase	Physical address of the start of the region.
       
   411  * @arg aAddr	Size of the region in bytes.
       
   412  */
       
   413 	static void CleanPhysicalMemory(TPhysAddr aAddr, TUint aSize);
       
   414 
       
   415 /*	
       
   416  * Invalidates a region of contiguous physical memory in cache.
       
   417  * It doesn't check invalidate threshold.
       
   418  * @arg aBase	Physical address of the start of the region.
       
   419  * @arg aAddr	Size of the region in bytes.
       
   420  */
       
   421 	static void InvalidatePhysicalMemory(TPhysAddr aAddr, TUint aSize);
       
   422 	
       
   423 /*
       
   424  * Clean and invalidates a region of contiguous physical memory in cache and drains its buffers.
       
   425  * It doesn't check clean and invalidate threshold.
       
   426  * @arg aBase	Physical address of the start of the region.
       
   427  * @arg aAddr	Size of the region in bytes.
       
   428  */
       
   429 	static void CleanAndInvalidatePhysicalMemory(TPhysAddr aAddr, TUint aSize);
       
   430 
       
   431 /*
       
   432  * Ensures the entire content of cache is copied back to main memory.
       
   433  * On some platforms, it may not invalidate cache content.
       
   434  * @pre Interupts are disabled.
       
   435  */
       
   436 	static void AtomicSync();
       
   437 
       
   438 private:
       
   439 
       
   440 /*
       
   441  * Generic function that cleans and/or invalidates memory region.
       
   442  * @arg aBase		Linear address of the start of the region.
       
   443  * @arg aSize		Size of the region in bytes.
       
   444  * @param aCtrlReg	The address of the register to access in order to trigger the maintenance
       
   445  * 					operation. The following values are valid:
       
   446  * 						- to invalidate the region:
       
   447  *                              ExternalControllerBaseAddress+ARML2C_InvalidateLineByPA
       
   448  *						- to clean the region:
       
   449  *						-       ExternalControllerBaseAddress+ARML2C_CleanLineByPA
       
   450  * 						- to clean and invalidate the region:
       
   451  *                              ExternalControllerBaseAddress+ARML2C_CleanInvalidateLineByPA
       
   452  */
       
   453 	static void Maintain_Region(TLinAddr aBase, TUint aSize, TInt* aCtrlReg);
       
   454 
       
   455 /*
       
   456  * Generic function that cleans or clean&invalidates the entire content of cache.
       
   457  * @param aCtrlReg	The address of the register to access in order to trigger the maintenance
       
   458  * 					operation. The following values are valid:
       
   459  *						- to clean:
       
   460  *						        ExternalControllerBaseAddress+ARML2C_CleanByIndexWay
       
   461  * 						- to clean and invalidate:
       
   462  *                              ExternalControllerBaseAddress+ARML2C_CleanInvalidateByWay
       
   463  */
       
   464 	static void Maintain_All(TInt* aCtrlReg);
       
   465 
       
   466 /*
       
   467  * Drains all the buffers in the cache controller.
       
   468  */
       
   469 	static void DrainBuffers();
       
   470 	
       
   471 #if defined(__ARM_PL310_CACHE__)
       
   472 	static TInt Lock();
       
   473 	static void FlashLock(TInt aIrq);
       
   474 	static void Unlock(TInt iIrq);
       
   475 	const static  TUint KMaxCacheLinesPerSpinLock = 10;//Max number of cache lines to maintain while spin lock is held.
       
   476 	static TSpinLock iLock;
       
   477 #endif //defined(__ARM_PL310_CACHE__)
       
   478 	
       
   479 	static TLinAddr Base;	//Base address of the external cache controller.
       
   480 	static SCacheInfo Info;
       
   481 	};
       
   482 #endif //#ifdef __HAS_EXTERNAL_CACHE__
       
   483 
       
   484 
       
   485 /*
       
   486  * Collector class of cache memory maintenance primitives.
       
   487  * They do not maintain TLBs, branch predictor nor CPU pipeline unless specified otherwise.
       
   488  * No preconditions are assumed unless specified otherwise.
       
   489  * @internalComponent
       
   490  */
       
   491 class CacheMaintenance
       
   492 	{
       
   493 public:
       
   494 /*
       
   495  * Initializes internal structures of cache configuration. Must be called during Init1 boot phase.
       
   496  *
       
   497  * @pre Single thread environment is assumed (e.g. during boot time).
       
   498  */
       
   499 	static void Init1();
       
   500 
       
   501 /*
       
   502  * Maintains cache(s) for a single page of physical memory that is about to change
       
   503  * its mapping/caching attributes. Note that the content of the memory may be lost.
       
   504  * 
       
   505  * The client may call this method either:
       
   506  * 	- during the process of invalidating old mapping(s), or
       
   507  *  - as background maintenance of free physical memory, or
       
   508  * 	- when the physical memory is about to be reused again.
       
   509  * 
       
   510  * Either this method or PageToPreserveAndReuse should be called for every page that was mapped
       
   511  * as normal memory. To check whether memory type is normal, use CacheMaintenance::IsNormal.
       
   512  * 
       
   513  * The old mapping(s) should be removed before calling this method to ensure
       
   514  * no accidental/speculative access occurs afterwards, as it would negate the effect of this
       
   515  * procedure on cache memory.
       
   516  * 
       
   517  * Since linear address is required for aBase input parameter, the caller may need to apply
       
   518  * temporary mapping. Memory type of the temporary mapping must be as it is specified
       
   519  * by CacheMaintenance::TemporaryMapping. In addition, the page colouring of the
       
   520  * old mapping(s) must apply to the temporary mapping.
       
   521  * 
       
   522  * @arg aBase				Linear address of the page.
       
   523  * @arg aOldType			Memory type of the old mapping.
       
   524  * @arg aPhysAddr			Physical adress of the page or KPhysAddrInvalid. If known, physical address
       
   525  * 							should be always specified (for performance reason).
       
   526  * @arg aMask				Orred values of TPageToReuseMask enum:
       
   527  * 			EThresholdReached:
       
   528  * 							If set, the method will trigger the maintenance on entire cache(s)(as
       
   529  * 							opposed to maintenance of only the specified region). This will effectively
       
   530  * 							sort out cache maintenance for all free pages waiting for PageToReuse call.
       
   531  * 							However, some cache levels may be unaffected by this global maintenance.
       
   532  * 							Therefore, the method still has to be called for all freed pages, but
       
   533  * 							those that follow should have EPageHasBeenPartiallySynced set in aMask.
       
   534  * 			EPageHasBeenPartiallySynced:
       
   535  *							Indicates if the page was in the queue for cache maintenance when the
       
   536  * 							maintenance of a whole cache(s) is triggered by the previous call
       
   537  * 							of this method with EThresholdReached in aMask.
       
   538  * 							If true, the method will sort out only those caches not affected by
       
   539  * 							the global cache maintenance.
       
   540  * 			EOldAndNewMappingMatch:
       
   541  * 							Indicates that the old and new caching attributes for the page are the same.
       
   542  * 							If true, the method may avoid unnecessary maintenance on some platforms.
       
   543  *
       
   544  * @return					True if page has been removed from cache memory.
       
   545  * 							False if it wasn't because aOldType doesn't require it, or
       
   546  * 							EOldAndNewMappingMatch is set on H/W platform where it is safe not
       
   547  * 							to remove page from cache if the mapping remains the same.
       
   548  */
       
   549 	static TBool PageToReuse (TLinAddr aBase, TMemoryType aOldType, TPhysAddr aPhysAddr, TInt aMask=0);
       
   550 
       
   551 /*
       
   552  * Indicates whether the number of pages waiting for PageToReuse maintenance is big enough to
       
   553  * trigger the maintenance of the entire cache(s) on particular levels. Use this method to decide
       
   554  * whether to set EThresholdReached in aMask when PageToReuse is called.
       
   555  * 
       
   556  * @arg aPageCount	Number of pages waiting in queue for CacheMaintenance::PageToReuse call. 
       
   557  * @return			True if aPageCount is big enough to trigger the maintenance of entire cache(s)
       
   558  * 					In that case, client may decide to call CacheMaintenance::PageToReuse with 
       
   559  * 					EThresholdReached in aMask argument.
       
   560  * 
       
   561  * Note:			H/W platforms which are not able to maintain entire cache always returns EFalse.
       
   562  */
       
   563 	static TBool IsPageToReuseThresholdReached(TUint aPageCount);
       
   564 
       
   565 /*
       
   566  * Specifies additional argument in aMask when CacheMaintenance::PageToReuse is called.
       
   567  */	
       
   568 	enum TPageToReuseMask
       
   569 		{
       
   570 		/*
       
   571 		 * Indicates that the call of PageToReuse maintenance must trigger the maintenance
       
   572 		 * of entire cache(s) on particular level(s). The client should set
       
   573 		 * this only if CacheMaintenance::IsPageToReuseThresholdReached returns ETrue.
       
   574 		 */
       
   575 		EThresholdReached = 1,
       
   576 		/*
       
   577 		 * Indicates that the page was in the queue for CacheMaintenance::PageToReuse
       
   578 		 * call when the maintenance of a whole cache(s) is triggered by the previous
       
   579 		 * call of CacheMaintenance::PageToReuse with EThresholdReached set in aMask.
       
   580 		 */
       
   581 		EPageHasBeenPartiallySynced = 2,
       
   582 		/* 
       
   583 		 * Indicates that the old and new cacing attributes for the page are the same.
       
   584 		 */ 
       
   585 		EOldAndNewMappingMatch = 4,
       
   586 		};
       
   587 	
       
   588 /*	
       
   589  * Preserves the content and maintains cache(s) for a single page of physical memory that
       
   590  * is about to change its mapping or caching attributes.
       
   591  * 
       
   592  * The client may call this method either:
       
   593  * 	- during the process of invalidating old mapping(s), or
       
   594  *  - as background maintenance of free physical memory, or
       
   595  * 	- when the physical memory is about to be reused again.
       
   596  * 
       
   597  * Either PageToReuse or this method should be called for every page that was mapped as normal
       
   598  * memory. To check whether memory type is normal, use CacheMaintenance::IsNormal.
       
   599 
       
   600  * The old mapping(s) should be removed before calling this method to ensure
       
   601  * no accidental/speculative access occurs afterwards, as it would negate the effect of this
       
   602  * procedure on cache memory.
       
   603  * 
       
   604  * Since linear address is required for aBase input parameter, the caller may need to apply
       
   605  * temporary mapping. Memory type of the temporary mapping must be as it is specified
       
   606  * by CacheMaintenance::TemporaryMapping. In addition, the page colouring of the
       
   607  * old mapping(s) must apply to the temporary mapping.
       
   608  * 
       
   609  * @arg aBase				Linear address of the page.
       
   610  * @arg aOldType			Memory type of the old mapping.
       
   611  * @arg aPhysAddr			Physical adress of the page or KPhysAddrInvalid.
       
   612  */
       
   613 	 static void PageToPreserveAndReuse(TLinAddr aBase, TMemoryType aOldType, TPhysAddr aPhysAddr);
       
   614 
       
   615 /*
       
   616  * @return	Memory type for the temporary mapping for a physical page when PageToReuse or
       
   617  * 			PageToPreserveAndReuse is called.
       
   618  */
       
   619 	static TMemoryType TemporaryMapping();
       
   620 	
       
   621 /*
       
   622  * Specifies how the source code has been changed when CodeChanged is called.
       
   623  */	
       
   624 	enum TCodeChangedBy
       
   625 		{
       
   626 		/*
       
   627 		 * The content of executable memory is overwritten through cached mapping.
       
   628 		 */
       
   629 		ECPUThroughCache,		
       
   630 		/*
       
   631 		 * The content of executable memory is overwritten through uncached mapping.
       
   632 		 */
       
   633 		ECPUUncached,
       
   634 		/*
       
   635 		 * The executable memory region is remapped.
       
   636 		 */   
       
   637 		EMemoryRemap,
       
   638 		/*
       
   639 		 * Code is changed by code modifier. It is assumed that:
       
   640 		 *  - the range of modified code is within a single cache line,
       
   641 		 * 	- code modifier has its own way to broadcast primitives, therefore, any cache
       
   642 		 * 	  maintenance caused by this call will NOT be broadcasted by S/W.
       
   643 		 */
       
   644 		ECodeModifier
       
   645 		};
       
   646 
       
   647 /*
       
   648  * Maintains cache for newly loaded or changed code.  It also ensures branch predictor & execution
       
   649  * pipeline are drained accordingly.
       
   650  * Call this method after the code has been changed and before it executes.
       
   651  * 
       
   652  * The method may generate data abort exception if any part of defined memory region is not valid.
       
   653  * 
       
   654  * @arg aBase 		Linear address of the start of code. 
       
   655  * @arg aSize 		The size of the region (in bytes) whose code has been changed.
       
   656  * @arg aChangedBy	Specifies the way source code has been changed. 
       
   657  */
       
   658 	static void CodeChanged(TLinAddr aBase, TUint aSize, TCodeChangedBy aChangedBy = ECPUThroughCache);
       
   659 
       
   660 /*
       
   661  * Ensures the changes in the specified memory region made by CPUs are visible to the
       
   662  * external agents/observers.
       
   663  *   
       
   664  * The method may generate data abort exception if any part of the region is not valid.
       
   665  * 
       
   666  * @arg aBase 			Linear address of the start of memory region.
       
   667  * @arg aSize 			The size of the region in bytes.
       
   668  * @arg aMapAttr		The attributes of the region(orred TMappingAttributes enum values).
       
   669  * @arg aPhysAddr		Physical address that corresponds to aBase linear address. KPhysAddrInvalid if
       
   670  * 						unspecified. Specify this argument only if the region is contiguously mapped.
       
   671  * 
       
   672  * @pre 				As specified by MASK_THREAD_STANDARD mask. 
       
   673  */
       
   674 	static void MakeCPUChangesVisible(TLinAddr aBase, TUint aSize, TUint32 aMapAttr, TPhysAddr aPhysAddr = KPhysAddrInvalid);
       
   675 
       
   676 /*
       
   677  * Prepares memory region for the external agents' write access.
       
   678  * It ensures that cache doesn't accidentally overwrite physical memory that the external agent is
       
   679  * about to write into. CPUs must not rely on the content of the region nor write into it. Once the
       
   680  * external writes are completed, CacheMaintenance::MakeExternalChangesVisible must be called.
       
   681  * 
       
   682  * Note that this will invalidate CPU writes in the region even if no external write occures.
       
   683  * 
       
   684  * The method may generate data abort exception if any part of the region is not valid.
       
   685  * 
       
   686  * @arg aBase 			Linear address of the start of memory region.
       
   687  * @arg aSize 			The size of the region in bytes.
       
   688  * @arg aMapAttr		The attributes of the region(orred TMappingAttributes enum values).
       
   689  * @arg aPhysAddr		Physical address that corresponds to aBase linear address. KPhysAddrInvalid if
       
   690  * 						unspecified. Specify this argument only if the region is contiguously mapped.
       
   691  * 
       
   692  * @pre 				As specified by MASK_THREAD_STANDARD mask. 
       
   693  */
       
   694 	static void PrepareMemoryForExternalWrites(TLinAddr aBase, TUint aSize, TUint32 aMapAttr, TPhysAddr aPhysAddr = KPhysAddrInvalid);
       
   695 
       
   696 /*
       
   697  * Ensures the changes in the specified memory region made by the external agent are visible by CPUs.
       
   698  * 
       
   699  * The method may generate data abort exception if any part of the region is not valid.
       
   700  * 
       
   701  * @arg aBase 			Linear address of the start of memory region.
       
   702  * @arg aSize 			The size of the region in bytes.
       
   703  * @arg aMapAttr		The attributes of the region(orred TMappingAttributes enum values).
       
   704  * @arg aPhysAddr		Physical address that corresponds to aBase linear address. KPhysAddrInvalid if
       
   705  * 						unspecified. Specify this argument only if the region is contiguously mapped.
       
   706  */
       
   707 	static void MakeExternalChangesVisible(TLinAddr aBase, TUint aSize, TUint32 aMapAttr, TPhysAddr aPhysAddr = KPhysAddrInvalid);
       
   708 
       
   709 #if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__)
       
   710 // The following method maintain cache on page table/directory change.
       
   711 // Moving memory always model maps page table as write-through memory so
       
   712 // InternalCache::DrainBuffers is sufficient in that case.
       
   713 	
       
   714 /*	
       
   715  * Ensures the change in page table is visible by MMU's Page-Table Walk.
       
   716  * Client should call this method when a single entry in a page table has been changed.
       
   717  * 
       
   718  * @arg aBase 			Linear address of page table entry that has been changed.
       
   719  * 
       
   720  * @see CACHE_MAINTENANCE_PDE_PTE_UPDATED is alternative assembler macro for cia files. 
       
   721  */
       
   722 	inline static void SinglePteUpdated(TLinAddr aAddr);
       
   723 
       
   724 /*	
       
   725  * Ensures the changes in a page table are visible by MMU's Page-Table Walk.
       
   726  * Client should call this method when two and more consecutive entries in a page table
       
   727  * have been changed.
       
   728  * 
       
   729  * @arg aBase 			Linear address of the first page table entry that has been changed.
       
   730  * @arg aSize 			The size of the region (in bytes) of the altered page table entries.
       
   731  */
       
   732 	inline static void MultiplePtesUpdated(TLinAddr aAddr, TUint aSize);
       
   733 
       
   734 /*
       
   735  * Ensures the change in page directory is visible by MMU's Page-Table Walk.
       
   736  * Client should call this method when a single entry in a page directory has been changed.
       
   737  * In case of page mapping, it should also ensure that the content of page table pointed by the new
       
   738  * value is either initialised or marked as invalid (no random values are allowed).
       
   739  * 
       
   740  * @arg aBase 			Linear address of page directory entry that has been changed.
       
   741  * 
       
   742  * @see CACHE_MAINTENANCE_PDE_PTE_UPDATED is alternative assembler macro for cia files. 
       
   743  */	
       
   744 	inline static void SinglePdeUpdated(TLinAddr aAddr);
       
   745 
       
   746 /*
       
   747  * Ensures the change in page directory is visible by MMU's Page-Table Walk.
       
   748  * Client should call this method when two and more consecutive entries in a directory table
       
   749  * have been changed.
       
   750  * In case of page mapping, it should also ensure that the content of page table pointed by the new
       
   751  * value is either initialised or marked as invalid (no random values are allowed).
       
   752  * 
       
   753  * @arg aBase 			Linear address of the first page directory entry that has been changed.
       
   754  * @arg aSize 			The size of the region (in bytes) of the altered page table entries.
       
   755  */
       
   756 	inline static void PdesInitialised(TLinAddr aPde, TUint aSize);
       
   757 
       
   758 #endif //#if defined(__MEMMODEL_MULTIPLE__) || defined(__MEMMODEL_FLEXIBLE__)
       
   759 
       
   760 /*
       
   761  * @arg	aType	Memory Type
       
   762  * @return 		False if memory type is guaranteed not to be normal memory.
       
   763  * 				True if memory type may be normal memory.
       
   764  * 
       
   765  * @note		Normal uncached memory is not held in cache but may use cache buffers.
       
   766  */
       
   767 	static TBool IsNormal(TMemoryType aType);
       
   768 		
       
   769 /*
       
   770  * @arg	aType	Memory Type
       
   771  * @return 		False if memory type is guaranteed not to be cached at any level.
       
   772  * 				True if memory type may be cached at any level.
       
   773  */
       
   774 	static TBool IsCached(TMemoryType aType);
       
   775 
       
   776 #if defined(__MEMMODEL_MOVING__) || defined(__MEMMODEL_MULTIPLE__)
       
   777 
       
   778 /*
       
   779  * Ensures the changes in the specified memory region made by CPUs are visible to the
       
   780  * external agents/observers. The region is also removed from the caches.
       
   781  * 
       
   782  * On multiple memory model, memory region should be unmapped from its original mapping and
       
   783  * temporary mapping should be applied, as described in PageToReuse & PageToPreserveAndReuse methods.
       
   784  * On moving memory model, call this function before unmappppping occures.
       
   785  *   
       
   786  * The method may generate data abort exception if any part of the region is not valid.
       
   787  * 
       
   788  * @arg aBase 			Linear address of the start of memory region.
       
   789  * @arg aSize 			The size of the region in bytes.
       
   790  * @arg aMapAttr		The attributes of the region(orred TMappingAttributes enum values).
       
   791  */
       
   792 	static void MemoryToPreserveAndReuse(TLinAddr aLinAddr, TUint aSize, TUint32 aMapAttr);
       
   793 
       
   794 /*
       
   795  * Ensures the entire content of physical (VIPT & PIPT) data cache(s) is written down
       
   796  * to memory and the cache is emptied.
       
   797  */
       
   798 	static void SyncPhysicalCache_All();
       
   799 
       
   800 /*
       
   801  * @return 	Performance threshold for SyncPhysicalCache_All method in page count.
       
   802  * 			If the number of pages to be recommitted is bigger than the threshold,
       
   803  * 			the client may decide to use CacheMaintenance::SyncPhysicalCache_All
       
   804  * 			instead of CacheMaintenance::PageToReuse.
       
   805  */
       
   806 	inline static TUint SyncAllPerformanceThresholdPages()
       
   807 	{
       
   808 #if defined(__ARM_PL310_CACHE__) && !defined(__ARM_PL310_ERRATUM_588369_FIXED)
       
   809 	// Clean&Invalidate by Set/Way in pl310 is broken, so we cannot maintain entire cache(s).
       
   810 	// This will ensure no cache threshold is reached so all cache maitenance will be performed by cache line(s).
       
   811 	return KMaxTUint;
       
   812 #else
       
   813 	return InternalCache::Info[KCacheInfoD].InvalidateThresholdPages();
       
   814 #endif // #if defined(__ARM_PL310_CACHE__) && !defined(__ARM_PL310_ERRATUM_588369_FIXED)
       
   815 	}
       
   816 
       
   817 #endif // #if defined(__MEMMODEL_MOVING__) || defined(__MEMMODEL_MULTIPLE__)
       
   818 
       
   819 #if defined(__MEMMODEL_MOVING__)
       
   820 //	Moving memory model is based on ARMv5 architecture and requires virtual cache memory to be
       
   821 //	flushed away on process switch. For that reason, this memory model needs separate sets
       
   822 //	of primitives for virtual (VIVT) and physical (VIPT or PIPT) cache.
       
   823 
       
   824 /*
       
   825  * Perform any cache/memory synchronisation required prior to a change
       
   826  * in virtual to physical address mappings.
       
   827  * Enter and return with system locked.
       
   828  */	
       
   829 	static void OnProcessSwitch();
       
   830 	
       
   831 /*
       
   832  * Maintains virtual cache for a single page of physical memory that is about to change
       
   833  * its mapping/caching attributes. It is presumed the memory is fully cached.
       
   834  * @arg aBase				Linear address of the page.
       
   835  */	
       
   836 	static void PageToReuseVirtualCache(TLinAddr aBase);
       
   837 
       
   838 /*
       
   839  * Maintains virtual cache for a single page of physical memory that is about to change
       
   840  * its mapping/caching attributes. In addition, the content of physical memory is preserved.
       
   841  * It is presumed the memory is fully cached.
       
   842  * @arg aBase				Linear address of the page.
       
   843  */	
       
   844 	static void PageToPreserveAndReuseVirtualCache(TLinAddr aBase);
       
   845 	
       
   846 /*
       
   847  * Maintains physical cache for a single page of physical memory that is about to change
       
   848  * its mapping/caching attributes. It is presumed the memory is fully cached.
       
   849  * @arg aPhysAddr			Physical adress of the page.
       
   850  */	
       
   851 	static void PageToReusePhysicalCache(TPhysAddr aPhysAddr);
       
   852 
       
   853 #endif // defined(__MEMMODEL_MOVING__)
       
   854 
       
   855 #if defined(__MEMMODEL_DIRECT__)
       
   856 /*
       
   857  * Maintains cache(s) for a memory region that is about to change its mapping/caching attributes.
       
   858  * @arg aBase				Linear address of the page.
       
   859  * @arg aSize				The size of the region.
       
   860  */
       
   861 	static void MemoryToReuse (TLinAddr aBase, TUint aSize);
       
   862 #endif //defined(__MEMMODEL_DIRECT__)
       
   863 
       
   864 private:
       
   865 
       
   866 #if defined (__CPU_OUTER_CACHE_IS_INTERNAL_CACHE)
       
   867 /*
       
   868  * Combines inner and outer caching attributes.
       
   869  * 
       
   870  * @arg aMapAttr		On entry, holds inner and outer caching attributes (orred
       
   871  * 						TMappingAttributes enum values).
       
   872  * 						On exit, inner caching attribute holds combined inner and outer values,
       
   873  * 						while outer caching attribute remains unchanged.
       
   874  * 
       
   875  * Note: 	On __CPU_CORTEX_A8__ both inner & outer caches are MMU controlled.
       
   876  */
       
   877 	static void CombineCacheAttributes (TUint32& aMapAttr);
       
   878 #endif
       
   879 	};
       
   880 
       
   881 
       
   882 #if  defined(__SMP__) && !defined(__BROADCAST_CACHE_MAINTENANCE__)
       
   883 //Platforms that rely on H/W broadcast of cache maintenance have to broadcast ISB by softwer. 
       
   884 #define __BROADCAST_ISB
       
   885 
       
   886 class T_ISB_IPI : public TGenericIPI
       
   887     {
       
   888 public:
       
   889     T_ISB_IPI();
       
   890     static void ISBIsr(TGenericIPI*);
       
   891     void Do();
       
   892     };
       
   893 #endif  //defined(__SMP__) && !defined(__BROADCAST_CACHE_MAINTENANCE__)
       
   894 
       
   895 #endif // defined(__CPU_HAS_CACHE)
       
   896 
       
   897 #endif //#ifndef __CACHE_MAINTENANCE_H__