|
1 /* |
|
2 * Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies). |
|
3 * All rights reserved. |
|
4 * This component and the accompanying materials are made available |
|
5 * under the terms of the License "Eclipse Public License v1.0" |
|
6 * which accompanies this distribution, and is available |
|
7 * at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 * |
|
9 * Initial Contributors: |
|
10 * Nokia Corporation - initial contribution. |
|
11 * |
|
12 * Contributors: |
|
13 * |
|
14 * Description: |
|
15 * |
|
16 */ |
|
17 |
|
18 |
|
19 |
|
20 #include <platform.h> // For DPlatChunkHw |
|
21 #include <nk_priv.h> // For __ASSERT_NO_FAST_MUTEX (published to partners) |
|
22 |
|
23 #include "memmanager.h" // For MemManager |
|
24 #include "memmanagertrace.h" // For C_TRACE... |
|
25 |
|
26 DMemManager* DMemManager::iThisptr = NULL; |
|
27 TDfcQue* DMemManager::iDfcQueue = NULL; |
|
28 |
|
29 |
|
30 // Memory management fault enumerations |
|
31 enum TMemFault |
|
32 { |
|
33 EMemBlockAllocationFailed = 0, |
|
34 EMemBlockSizeZero, |
|
35 EExtensionMemoryAllocationFailed, |
|
36 EDfcCreateFailed, |
|
37 EDfcQueueCreateFailed, |
|
38 EInvalidParameter, |
|
39 EPhysicalMemReleaseFailed, |
|
40 EMemoryAllocationFailed, |
|
41 EHWMemAllocFailed, |
|
42 EPhysicalMemAllocFailed, |
|
43 EInvalidQueueCount, |
|
44 EAllocNotThreadContext, |
|
45 EDeallocNotThreadContext, |
|
46 EMemBlockInvalidReleaseDetected |
|
47 }; |
|
48 |
|
49 /* |
|
50 * Constructor. |
|
51 */ |
|
52 DMemManager::DMemManager() |
|
53 { |
|
54 |
|
55 C_TRACE( ( _T( "DMemManager::DMemManager>" ) ) ); |
|
56 |
|
57 iFastMutex = new NFastMutex(); |
|
58 ASSERT_RESET_ALWAYS( iFastMutex, ( EMemoryAllocationFailed | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
59 |
|
60 //Priority must be larger than the threads calling alloc and dealloc |
|
61 Kern::DfcQCreate( iDfcQueue, 28, &KMemManagerDfcQThreadName ); |
|
62 ASSERT_RESET_ALWAYS( ( iDfcQueue ), ( EDfcQueueCreateFailed | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
63 |
|
64 iPoolAllocateDfc = new TDfc( PoolAllocateDfc, this, iDfcQueue, KPoolAllocateDfcPriority ); |
|
65 ASSERT_RESET_ALWAYS( ( iPoolAllocateDfc ), ( EDfcCreateFailed | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
66 |
|
67 iPoolDeleteDfc = new TDfc( PoolDeleteDfc, this, iDfcQueue , KPoolDeleteDfcPriority ); |
|
68 ASSERT_RESET_ALWAYS( ( iPoolDeleteDfc ), ( EDfcCreateFailed | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
69 |
|
70 //Static array configuration |
|
71 iMemPond.Append( new DMemPool( 16, 384 ) ); |
|
72 iMemPond.Append( new DMemPool( 32, 128 ) ); |
|
73 iMemPond.Append( new DMemPool( 64, 64 ) ); |
|
74 iMemPond.Append( new DMemPool( 128, 80 ) ); |
|
75 iMemPond.Append( new DMemPool( 256, 60 ) ); |
|
76 iMemPond.Append( new DMemPool( 2048, 100 ) ); |
|
77 iMemPond.Append( new DMemPool( 4096, 100 ) ); |
|
78 iMemPond.Append( new DMemPool( 65524, 4 ) ); |
|
79 |
|
80 C_TRACE( ( _T( "DMemManager::DMemManager<" ) ) ); |
|
81 } |
|
82 |
|
83 /* |
|
84 * Destructor. |
|
85 */ |
|
86 DMemManager::~DMemManager() |
|
87 { |
|
88 C_TRACE( ( _T( "DMemManager::~DMemManager>" ) ) ); |
|
89 |
|
90 for( int i = 0; i < iMemPond.Count(); i++ ) |
|
91 { |
|
92 delete iMemPond[i]; |
|
93 } |
|
94 |
|
95 iMemPond.Reset(); |
|
96 |
|
97 for( int i = 0; i < iPoolCreateQueue.Count(); i++ ) |
|
98 { |
|
99 delete iPoolCreateQueue[i]; |
|
100 } |
|
101 |
|
102 iPoolCreateQueue.Reset(); |
|
103 |
|
104 for( TInt i = 0; i < iPoolDeleteQueue.Count(); i++ ) |
|
105 { |
|
106 delete iPoolDeleteQueue[i]; |
|
107 } |
|
108 |
|
109 iPoolDeleteQueue.Reset(); |
|
110 |
|
111 delete iFastMutex; |
|
112 iFastMutex = NULL; |
|
113 |
|
114 C_TRACE( ( _T( "DMemManager::~DMemManager<" ) ) ); |
|
115 } |
|
116 |
|
117 /* |
|
118 * DFC for dynamic pool allocation. |
|
119 */ |
|
120 void DMemManager::PoolAllocateDfc( |
|
121 TAny* aPtr // Pointer to this object. |
|
122 ) |
|
123 { |
|
124 C_TRACE( ( _T( "DMemManager::PoolAllocateDfc aPtr 0x%x>" ), aPtr ) ); |
|
125 |
|
126 DMemManager& tmp = *reinterpret_cast<DMemManager*>( aPtr ); |
|
127 |
|
128 NKern::FMWait( tmp.iFastMutex ); |
|
129 |
|
130 ASSERT_RESET_ALWAYS( ( tmp.iPoolCreateQueue.Count() > 0 ), ( EInvalidQueueCount | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
131 TInt index = tmp.iMemPond.Find( tmp.iPoolCreateQueue[0] ); |
|
132 ( tmp.iMemPond[ index ] )->iCopyPoolInUse = ETrue; |
|
133 tmp.iMemPond.Insert( (new DMemPool( tmp.iPoolCreateQueue[0]->iBlockSize, tmp.iPoolCreateQueue[0]->iBlockNum ) ), index ); |
|
134 tmp.iPoolCreateQueue.Remove(0); |
|
135 |
|
136 NKern::FMSignal( tmp.iFastMutex ); |
|
137 |
|
138 C_TRACE( ( _T( "DMemManager::PoolAllocateDfc<" ) ) ); |
|
139 } |
|
140 |
|
141 /* |
|
142 * DFC for dynamic pool deletion. |
|
143 */ |
|
144 void DMemManager::PoolDeleteDfc( |
|
145 TAny* aPtr // Pointer to this object. |
|
146 ) |
|
147 { |
|
148 C_TRACE( ( _T( "DMemManager::PoolDeleteDfc aPtr 0x%x>" ), aPtr ) ); |
|
149 |
|
150 DMemManager& tmp = *reinterpret_cast<DMemManager*>( aPtr ); |
|
151 |
|
152 NKern::FMWait( tmp.iFastMutex ); |
|
153 |
|
154 ASSERT_RESET_ALWAYS( ( tmp.iPoolDeleteQueue.Count() > 0 ), ( EInvalidQueueCount | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
155 delete tmp.iPoolDeleteQueue[0]; |
|
156 tmp.iPoolDeleteQueue.Remove(0); |
|
157 |
|
158 NKern::FMSignal( tmp.iFastMutex ); |
|
159 |
|
160 C_TRACE( ( _T( "DMemManager::PoolDeleteDfc<" ) ) ); |
|
161 } |
|
162 |
|
163 |
|
164 /* |
|
165 * Constructor. |
|
166 */ |
|
167 DMemManager::DMemPool::DMemPool( const TUint16 aUnitSize, const TUint16 aUnitNum ) : |
|
168 iAllocatedMemBlock(NULL), iFreeMemBlock(NULL), iMemoryArea(NULL), |
|
169 iPoolSize( aUnitNum * ( aUnitSize + sizeof(struct sUnit) ) ), |
|
170 iBlockSize( aUnitSize ), iBlockNum( aUnitNum ), |
|
171 iBlockUsage(0), iHwChunk(NULL), iPhysicalAddress(0) |
|
172 { |
|
173 C_TRACE( ( _T( "DMemManager::DMemPool::DMemPool aUnitSize 0x%x, aUnitNum 0x%x>" ), aUnitSize, aUnitNum ) ); |
|
174 |
|
175 ASSERT_RESET_ALWAYS( ((aUnitSize || aUnitNum) != 0 ), ( EInvalidParameter | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
176 |
|
177 #ifndef __WINS__ |
|
178 TInt r = Epoc::AllocPhysicalRam( iPoolSize, iPhysicalAddress ); |
|
179 ASSERT_RESET_ALWAYS( ( r == KErrNone ), ( EPhysicalMemAllocFailed | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
180 |
|
181 // Create chunk as: non accessible from user side and non-cached. |
|
182 |
|
183 r = DPlatChunkHw::New( iHwChunk, iPhysicalAddress, iPoolSize, EMapAttrSupRw | EMapAttrFullyBlocking ); |
|
184 ASSERT_RESET_ALWAYS( ( r == KErrNone ), ( EHWMemAllocFailed | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
185 |
|
186 iMemoryArea = (TUint8*)(iHwChunk->LinearAddress()); |
|
187 ASSERT_RESET_ALWAYS( ( iMemoryArea ), ( EPhysicalMemAllocFailed | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
188 |
|
189 #else |
|
190 iMemoryArea = (TUint8*)Kern::Alloc( iPoolSize ); |
|
191 //ASSERT_RESET_ALWAYS( ( r == KErrNone ), ( EPhysicalMemAllocFailed | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
192 |
|
193 // Create chunk as: non accessible from user side and non-cached. |
|
194 |
|
195 //iMemoryArea = (TUint8*)(iPoolSize/*->LinearAddress()*/); |
|
196 ASSERT_RESET_ALWAYS( ( iMemoryArea ), ( EPhysicalMemAllocFailed | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
197 #endif // __WINS__ |
|
198 |
|
199 for( TUint16 i = 0; i < aUnitNum; i++ ) //Link all mem unit . Create linked list. |
|
200 { |
|
201 struct sUnit *pCurUnit = (struct sUnit *)( iMemoryArea + i*(aUnitSize+sizeof(struct sUnit)) ); |
|
202 |
|
203 pCurUnit->iPrev = NULL; |
|
204 pCurUnit->iNext = iFreeMemBlock; //Insert the new unit at head. |
|
205 |
|
206 if(NULL != iFreeMemBlock) |
|
207 { |
|
208 iFreeMemBlock->iPrev = pCurUnit; |
|
209 } |
|
210 |
|
211 pCurUnit->iMemPtr = new TPtr8( ((TUint8*)pCurUnit + sizeof(struct sUnit)) , 0, iBlockSize ); |
|
212 |
|
213 iFreeMemBlock = pCurUnit; |
|
214 } |
|
215 |
|
216 iHighWaterMark = ( KPoolHighWaterLimit * aUnitNum ) / 100; |
|
217 |
|
218 C_TRACE( ( _T( "DMemManager::DMemPool::DMemPool<" ) ) ); |
|
219 } |
|
220 |
|
221 /* |
|
222 * Destructor. |
|
223 */ |
|
224 DMemManager::DMemPool::~DMemPool() |
|
225 { |
|
226 C_TRACE( ( _T( "DMemManager::DMemPool::~DMemPool>" ) ) ); |
|
227 |
|
228 for( TUint16 i = 0; i < iBlockNum; i++ ) |
|
229 { |
|
230 struct sUnit *pCurUnit = (struct sUnit *)( iMemoryArea + i*(iBlockSize+sizeof(struct sUnit)) ); |
|
231 |
|
232 if( pCurUnit->iMemPtr ) |
|
233 { |
|
234 delete pCurUnit->iMemPtr; |
|
235 pCurUnit->iMemPtr = NULL; |
|
236 } |
|
237 } |
|
238 #ifndef __WINS__ |
|
239 TInt r = Epoc::FreePhysicalRam( iPhysicalAddress, iPoolSize ); |
|
240 ASSERT_RESET_ALWAYS( ( r == KErrNone ), ( EPhysicalMemReleaseFailed | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
241 |
|
242 iHwChunk->Close(NULL); |
|
243 |
|
244 iHwChunk = NULL; |
|
245 #else |
|
246 |
|
247 Kern::Free( iMemoryArea ); |
|
248 |
|
249 #endif // __WINS__ |
|
250 |
|
251 C_TRACE( ( _T( "DMemManager::DMemPool::~DMemPool<" ) ) ); |
|
252 } |
|
253 |
|
254 |
|
255 /* |
|
256 * Allocate memory unit. |
|
257 */ |
|
258 TPtr8* DMemManager::DMemPool::Alloc( const TUint16 aSize ) |
|
259 { |
|
260 C_TRACE( ( _T( "DMemManager::DMemPool::Alloc>" ) ) ); |
|
261 |
|
262 ASSERT_RESET_ALWAYS( (iMemoryArea || iFreeMemBlock), ( EMemBlockAllocationFailed | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
263 |
|
264 struct sUnit *pCurUnit = iFreeMemBlock; |
|
265 |
|
266 iFreeMemBlock = pCurUnit->iNext; |
|
267 |
|
268 if( iFreeMemBlock ) |
|
269 { |
|
270 iFreeMemBlock->iPrev = NULL; |
|
271 } |
|
272 |
|
273 pCurUnit->iNext = iAllocatedMemBlock; |
|
274 |
|
275 if( iAllocatedMemBlock ) |
|
276 { |
|
277 iAllocatedMemBlock->iPrev = pCurUnit; |
|
278 } |
|
279 |
|
280 iAllocatedMemBlock = pCurUnit; |
|
281 |
|
282 iBlockUsage++; |
|
283 |
|
284 C_TRACE( ( _T( "DMemManager::DMemPool::Alloc<" ) ) ); |
|
285 return iAllocatedMemBlock->iMemPtr; |
|
286 } |
|
287 |
|
288 |
|
289 /* |
|
290 * Free memory unit. |
|
291 */ |
|
292 TBool DMemManager::DMemPool::Free( const TUint8* aBlockAddress ) |
|
293 { |
|
294 C_TRACE( ( _T( "DMemManager::DMemPool::Free>" ) ) ); |
|
295 |
|
296 struct sUnit *pCurUnit = (struct sUnit *)(aBlockAddress - sizeof(struct sUnit) ); |
|
297 |
|
298 iAllocatedMemBlock = pCurUnit->iNext; |
|
299 |
|
300 if(NULL != iAllocatedMemBlock) |
|
301 { |
|
302 iAllocatedMemBlock->iPrev = NULL; |
|
303 } |
|
304 |
|
305 pCurUnit->iNext = iFreeMemBlock; |
|
306 |
|
307 if(NULL != iFreeMemBlock) |
|
308 { |
|
309 iFreeMemBlock->iPrev = pCurUnit; |
|
310 } |
|
311 |
|
312 iFreeMemBlock = pCurUnit; |
|
313 iFreeMemBlock->iMemPtr->Zero(); |
|
314 iBlockUsage--; |
|
315 |
|
316 C_TRACE( ( _T( "DMemManager::DMemPool::Free<" ) ) ); |
|
317 |
|
318 //If empty & ready to be deleted |
|
319 return ( iCopyPoolInUse && iBlockUsage == 0 ) ? ETrue : EFalse; |
|
320 |
|
321 } |
|
322 |
|
323 EXPORT_C TDes8& MemApi::AllocBlock( const TUint16 aSize ) |
|
324 { |
|
325 C_TRACE( ( _T( "MemApi::AllocBlock 0x%x>" ), aSize ) ); |
|
326 |
|
327 __ASSERT_NO_FAST_MUTEX; |
|
328 |
|
329 ASSERT_RESET_ALWAYS( ( aSize > 0 ), ( EMemBlockSizeZero | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
330 ASSERT_THREAD_CONTEXT_ALWAYS( ( EAllocNotThreadContext | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
331 |
|
332 TPtr8* ptr( NULL ); |
|
333 |
|
334 NKern::FMWait( DMemManager::iThisptr->iFastMutex ); |
|
335 |
|
336 for( TUint8 i( 0 ); i < DMemManager::iThisptr->iMemPond.Count(); ++i ) |
|
337 { |
|
338 if( aSize <= DMemManager::iThisptr->iMemPond[ i ]->iBlockSize ) |
|
339 { |
|
340 ptr = DMemManager::iThisptr->iMemPond[i]->Alloc( aSize ); |
|
341 |
|
342 if( DMemManager::iThisptr->iMemPond[i]->iBlockUsage > DMemManager::iThisptr->iMemPond[i]->iHighWaterMark ) |
|
343 { |
|
344 DMemManager::iThisptr->iPoolCreateQueue.Append( DMemManager::iThisptr->iMemPond[i] ); |
|
345 DMemManager::iThisptr->iPoolAllocateDfc->Enque(); |
|
346 } |
|
347 |
|
348 NKern::FMSignal( DMemManager::iThisptr->iFastMutex ); |
|
349 break; |
|
350 } |
|
351 } |
|
352 |
|
353 ASSERT_RESET_ALWAYS( ptr, ( EMemBlockAllocationFailed | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
354 ASSERT_RESET_ALWAYS( ptr->Size() == 0, ( EMemBlockInvalidReleaseDetected | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
355 |
|
356 C_TRACE( ( _T( "MemApi::AllocBlock 0x%x<" ), ptr ) ); |
|
357 return *ptr; |
|
358 } |
|
359 |
|
360 |
|
361 EXPORT_C void MemApi::DeallocBlock( TDes8& aBlock ) |
|
362 { |
|
363 C_TRACE( ( _T( "MemApi::DeallocBlock aBlock 0x%x>" ), &aBlock ) ); |
|
364 |
|
365 __ASSERT_NO_FAST_MUTEX; |
|
366 |
|
367 ASSERT_THREAD_CONTEXT_ALWAYS( ( EDeallocNotThreadContext | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
368 |
|
369 TBool removePool = EFalse; |
|
370 NKern::FMWait( DMemManager::iThisptr->iFastMutex ); |
|
371 |
|
372 for( TUint8 i( 0 ); i < DMemManager::iThisptr->iMemPond.Count(); ++i ) |
|
373 { |
|
374 //Check if inside pools memory area |
|
375 if( ( (DMemManager::iThisptr->iMemPond[i]->iMemoryArea) < aBlock.Ptr() ) && |
|
376 ( aBlock.Ptr() < (DMemManager::iThisptr->iMemPond[i]->iMemoryArea + DMemManager::iThisptr->iMemPond[i]->iPoolSize) ) ) |
|
377 { |
|
378 removePool = DMemManager::iThisptr->iMemPond[i]->Free( aBlock.Ptr() ); |
|
379 |
|
380 if( removePool ) |
|
381 { |
|
382 DMemManager::iThisptr->iPoolDeleteQueue.Append( DMemManager::iThisptr->iMemPond[i] ); |
|
383 DMemManager::iThisptr->iMemPond.Remove( i ); |
|
384 DMemManager::iThisptr->iPoolDeleteDfc->Enque(); |
|
385 } |
|
386 |
|
387 break; |
|
388 } |
|
389 } |
|
390 |
|
391 NKern::FMSignal( DMemManager::iThisptr->iFastMutex ); |
|
392 |
|
393 } |
|
394 |
|
395 DECLARE_STANDARD_EXTENSION() |
|
396 { |
|
397 Kern::Printf( "Memory Manager Extension>" ); |
|
398 DMemManager* extension = new DMemManager(); |
|
399 ASSERT_RESET_ALWAYS( ( extension ), ( EExtensionMemoryAllocationFailed | EDMemmanagerTraceId << KClassIdentifierShift ) ); |
|
400 DMemManager::iThisptr = static_cast< DMemManager* >( extension ); |
|
401 Kern::Printf( "Memory Manager Extension<" ); |
|
402 return KErrNone; |
|
403 } |
|
404 |
|
405 // End of File |
|
406 |