| 0 |      1 | // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
 | 
|  |      2 | // All rights reserved.
 | 
|  |      3 | // This component and the accompanying materials are made available
 | 
|  |      4 | // under the terms of the License "Eclipse Public License v1.0"
 | 
|  |      5 | // which accompanies this distribution, and is available
 | 
|  |      6 | // at the URL "http://www.eclipse.org/legal/epl-v10.html".
 | 
|  |      7 | //
 | 
|  |      8 | // Initial Contributors:
 | 
|  |      9 | // Nokia Corporation - initial contribution.
 | 
|  |     10 | //
 | 
|  |     11 | // Contributors:
 | 
|  |     12 | //
 | 
|  |     13 | // Description:
 | 
|  |     14 | // e32\nkern\nkern.cpp
 | 
|  |     15 | // 
 | 
|  |     16 | //
 | 
|  |     17 | 
 | 
|  |     18 | // NThreadBase member data
 | 
|  |     19 | #define __INCLUDE_NTHREADBASE_DEFINES__
 | 
|  |     20 | 
 | 
|  |     21 | #include "nk_priv.h"
 | 
|  |     22 | 
 | 
|  |     23 | /******************************************************************************
 | 
|  |     24 |  * Fast mutex
 | 
|  |     25 |  ******************************************************************************/
 | 
|  |     26 | 
 | 
|  |     27 | /** Checks if the current thread holds this fast mutex
 | 
|  |     28 | 
 | 
|  |     29 | 	@return TRUE if the current thread holds this fast mutex
 | 
|  |     30 | 	@return FALSE if not
 | 
|  |     31 | */
 | 
|  |     32 | EXPORT_C TBool NFastMutex::HeldByCurrentThread()
 | 
|  |     33 | 	{
 | 
|  |     34 | 	return iHoldingThread == NCurrentThread();
 | 
|  |     35 | 	}
 | 
|  |     36 | 
 | 
|  |     37 | /** Find the fast mutex held by the current thread
 | 
|  |     38 | 
 | 
|  |     39 | 	@return a pointer to the fast mutex held by the current thread
 | 
|  |     40 | 	@return NULL if the current thread does not hold a fast mutex
 | 
|  |     41 | */
 | 
|  |     42 | EXPORT_C NFastMutex* NKern::HeldFastMutex()
 | 
|  |     43 | 	{
 | 
|  |     44 | 	return TheScheduler.iCurrentThread->iHeldFastMutex;
 | 
|  |     45 | 	}
 | 
|  |     46 | 
 | 
|  |     47 | 
 | 
|  |     48 | #ifndef __FAST_MUTEX_MACHINE_CODED__
 | 
|  |     49 | /** Acquires the fast mutex.
 | 
|  |     50 | 
 | 
|  |     51 |     This will block until the mutex is available, and causes
 | 
|  |     52 | 	the thread to enter an implicit critical section until the mutex is released.
 | 
|  |     53 | 
 | 
|  |     54 | 	Generally threads would use NKern::FMWait() which manipulates the kernel lock
 | 
|  |     55 | 	for you.
 | 
|  |     56 | 	
 | 
|  |     57 | 	@pre Kernel must be locked, with lock count 1.
 | 
|  |     58 | 	@pre The calling thread holds no fast mutexes.
 | 
|  |     59 | 	
 | 
|  |     60 | 	@post Kernel is locked, with lock count 1.
 | 
|  |     61 | 	@post The calling thread holds the mutex.
 | 
|  |     62 | 	
 | 
|  |     63 | 	@see NFastMutex::Signal()
 | 
|  |     64 | 	@see NKern::FMWait()
 | 
|  |     65 | */
 | 
|  |     66 | EXPORT_C void NFastMutex::Wait()
 | 
|  |     67 | 	{
 | 
|  |     68 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("FMWait %M",this));
 | 
|  |     69 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED_ONCE|MASK_NO_FAST_MUTEX,"NFastMutex::Wait");			
 | 
|  |     70 | 	NThreadBase* pC=TheScheduler.iCurrentThread;
 | 
|  |     71 | 	if (iHoldingThread)
 | 
|  |     72 | 		{
 | 
|  |     73 | 		iWaiting=1;
 | 
|  |     74 | 		pC->iWaitFastMutex=this;
 | 
|  |     75 | 		__KTRACE_OPT(KNKERN,DEBUGPRINT("FMWait: YieldTo %T",iHoldingThread));
 | 
|  |     76 | 		TheScheduler.YieldTo(iHoldingThread);	// returns with kernel unlocked, interrupts disabled
 | 
|  |     77 | 		TheScheduler.iKernCSLocked = 1;	// relock kernel
 | 
|  |     78 | 		NKern::EnableAllInterrupts();
 | 
|  |     79 | 		pC->iWaitFastMutex=NULL;
 | 
|  |     80 | 		}
 | 
|  |     81 | 	pC->iHeldFastMutex=this;		// automatically puts thread into critical section
 | 
|  |     82 | #ifdef BTRACE_FAST_MUTEX
 | 
|  |     83 | 	BTraceContext4(BTrace::EFastMutex,BTrace::EFastMutexWait,this);
 | 
|  |     84 | #endif
 | 
|  |     85 | 	iHoldingThread=pC;
 | 
|  |     86 | 	}
 | 
|  |     87 | 
 | 
|  |     88 | 
 | 
|  |     89 | /** Releases a previously acquired fast mutex.
 | 
|  |     90 | 	
 | 
|  |     91 | 	Generally, threads would use NKern::FMSignal() which manipulates the kernel lock
 | 
|  |     92 | 	for you.
 | 
|  |     93 | 	
 | 
|  |     94 | 	@pre The calling thread holds the mutex.
 | 
|  |     95 | 	@pre Kernel must be locked.
 | 
|  |     96 | 	
 | 
|  |     97 | 	@post Kernel is locked.
 | 
|  |     98 | 	
 | 
|  |     99 | 	@see NFastMutex::Wait()
 | 
|  |    100 | 	@see NKern::FMSignal()
 | 
|  |    101 | */
 | 
|  |    102 | EXPORT_C void NFastMutex::Signal()
 | 
|  |    103 | 	{
 | 
|  |    104 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("FMSignal %M",this));
 | 
|  |    105 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED,"NFastMutex::Signal");			
 | 
|  |    106 | 	NThreadBase* pC=TheScheduler.iCurrentThread;
 | 
|  |    107 | 	__ASSERT_WITH_MESSAGE_DEBUG(pC->iHeldFastMutex==this,"The calling thread holds the mutex","NFastMutex::Signal");
 | 
|  |    108 | #ifdef BTRACE_FAST_MUTEX
 | 
|  |    109 | 	BTraceContext4(BTrace::EFastMutex,BTrace::EFastMutexSignal,this);
 | 
|  |    110 | #endif
 | 
|  |    111 | 	iHoldingThread=NULL;
 | 
|  |    112 | 	pC->iHeldFastMutex=NULL;
 | 
|  |    113 | 	TBool w=iWaiting;
 | 
|  |    114 | 	iWaiting=0;
 | 
|  |    115 | 	if (w)
 | 
|  |    116 | 		{
 | 
|  |    117 | 		RescheduleNeeded();
 | 
|  |    118 | 		if (pC->iCsFunction && !pC->iCsCount)
 | 
|  |    119 | 			pC->DoCsFunction();
 | 
|  |    120 | 		}
 | 
|  |    121 | 	}
 | 
|  |    122 | 
 | 
|  |    123 | 
 | 
|  |    124 | /** Acquires a fast mutex.
 | 
|  |    125 | 
 | 
|  |    126 |     This will block until the mutex is available, and causes
 | 
|  |    127 | 	the thread to enter an implicit critical section until the mutex is released.
 | 
|  |    128 | 
 | 
|  |    129 | 	@param aMutex The fast mutex to acquire.
 | 
|  |    130 | 	
 | 
|  |    131 | 	@post The calling thread holds the mutex.
 | 
|  |    132 | 	
 | 
|  |    133 | 	@see NFastMutex::Wait()
 | 
|  |    134 | 	@see NKern::FMSignal()
 | 
|  |    135 | 
 | 
|  |    136 | 	@pre No fast mutex can be held.
 | 
|  |    137 | 	@pre Call in a thread context.
 | 
|  |    138 | 	@pre Kernel must be unlocked
 | 
|  |    139 | 	@pre interrupts enabled
 | 
|  |    140 | 
 | 
|  |    141 | */
 | 
|  |    142 | EXPORT_C void NKern::FMWait(NFastMutex* aMutex)
 | 
|  |    143 | 	{
 | 
|  |    144 | 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::FMWait");
 | 
|  |    145 | 	NKern::Lock();
 | 
|  |    146 | 	aMutex->Wait();
 | 
|  |    147 | 	NKern::Unlock();
 | 
|  |    148 | 	}
 | 
|  |    149 | 
 | 
|  |    150 | 
 | 
|  |    151 | /** Releases a previously acquired fast mutex.
 | 
|  |    152 | 	
 | 
|  |    153 | 	@param aMutex The fast mutex to release.
 | 
|  |    154 | 	
 | 
|  |    155 | 	@pre The calling thread holds the mutex.
 | 
|  |    156 | 	
 | 
|  |    157 | 	@see NFastMutex::Signal()
 | 
|  |    158 | 	@see NKern::FMWait()
 | 
|  |    159 | */
 | 
|  |    160 | EXPORT_C void NKern::FMSignal(NFastMutex* aMutex)
 | 
|  |    161 | 	{
 | 
|  |    162 | 	NKern::Lock();
 | 
|  |    163 | 	aMutex->Signal();
 | 
|  |    164 | 	NKern::Unlock();
 | 
|  |    165 | 	}
 | 
|  |    166 | 
 | 
|  |    167 | 
 | 
|  |    168 | /** Acquires the System Lock.
 | 
|  |    169 | 
 | 
|  |    170 |     This will block until the mutex is available, and causes
 | 
|  |    171 | 	the thread to enter an implicit critical section until the mutex is released.
 | 
|  |    172 | 
 | 
|  |    173 | 	@post System lock is held.
 | 
|  |    174 | 
 | 
|  |    175 | 	@see NKern::UnlockSystem()
 | 
|  |    176 | 	@see NKern::FMWait()
 | 
|  |    177 | 
 | 
|  |    178 | 	@pre No fast mutex can be held.
 | 
|  |    179 | 	@pre Call in a thread context.
 | 
|  |    180 | 	@pre Kernel must be unlocked
 | 
|  |    181 | 	@pre interrupts enabled
 | 
|  |    182 | */
 | 
|  |    183 | EXPORT_C void NKern::LockSystem()
 | 
|  |    184 | 	{
 | 
|  |    185 | 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::LockSystem");
 | 
|  |    186 | 	NKern::Lock();
 | 
|  |    187 | 	TheScheduler.iLock.Wait();
 | 
|  |    188 | 	NKern::Unlock();
 | 
|  |    189 | 	}
 | 
|  |    190 | 
 | 
|  |    191 | 
 | 
|  |    192 | /** Releases the System Lock.
 | 
|  |    193 | 
 | 
|  |    194 | 	@pre System lock must be held.
 | 
|  |    195 | 
 | 
|  |    196 | 	@see NKern::LockSystem()
 | 
|  |    197 | 	@see NKern::FMSignal()
 | 
|  |    198 | */
 | 
|  |    199 | EXPORT_C void NKern::UnlockSystem()
 | 
|  |    200 | 	{
 | 
|  |    201 | 	NKern::Lock();
 | 
|  |    202 | 	TheScheduler.iLock.Signal();
 | 
|  |    203 | 	NKern::Unlock();
 | 
|  |    204 | 	}
 | 
|  |    205 | 
 | 
|  |    206 | 
 | 
|  |    207 | /** Temporarily releases a fast mutex if there is contention.
 | 
|  |    208 | 
 | 
|  |    209 |     If there is another thread attempting to acquire the mutex, the calling
 | 
|  |    210 | 	thread releases the mutex and then acquires it again.
 | 
|  |    211 | 	
 | 
|  |    212 | 	This is more efficient than the equivalent code:
 | 
|  |    213 | 	
 | 
|  |    214 | 	@code
 | 
|  |    215 | 	NKern::FMSignal();
 | 
|  |    216 | 	NKern::FMWait();
 | 
|  |    217 | 	@endcode
 | 
|  |    218 | 
 | 
|  |    219 | 	@return	TRUE if the mutex was relinquished, FALSE if not.
 | 
|  |    220 | 
 | 
|  |    221 | 	@pre	The mutex must be held.
 | 
|  |    222 | 
 | 
|  |    223 | 	@post	The mutex is held.
 | 
|  |    224 | */
 | 
|  |    225 | EXPORT_C TBool NKern::FMFlash(NFastMutex* aM)
 | 
|  |    226 | 	{
 | 
|  |    227 | 	__ASSERT_WITH_MESSAGE_DEBUG(aM->HeldByCurrentThread(),"The calling thread holds the mutex","NKern::FMFlash");
 | 
|  |    228 | 	TBool w = aM->iWaiting;
 | 
|  |    229 | 	if (w)
 | 
|  |    230 | 		{
 | 
|  |    231 | 		NKern::Lock();
 | 
|  |    232 | 		aM->Signal();
 | 
|  |    233 | 		NKern::PreemptionPoint();
 | 
|  |    234 | 		aM->Wait();
 | 
|  |    235 | 		NKern::Unlock();
 | 
|  |    236 | 		}
 | 
|  |    237 | #ifdef BTRACE_FAST_MUTEX
 | 
|  |    238 | 	else
 | 
|  |    239 | 		{
 | 
|  |    240 | 		BTraceContext4(BTrace::EFastMutex,BTrace::EFastMutexFlash,aM);
 | 
|  |    241 | 		}
 | 
|  |    242 | #endif
 | 
|  |    243 | 	return w;
 | 
|  |    244 | 	}
 | 
|  |    245 | 
 | 
|  |    246 | 
 | 
|  |    247 | /** Temporarily releases the System Lock if there is contention.
 | 
|  |    248 | 
 | 
|  |    249 |     If there
 | 
|  |    250 | 	is another thread attempting to acquire the System lock, the calling
 | 
|  |    251 | 	thread releases the mutex and then acquires it again.
 | 
|  |    252 | 	
 | 
|  |    253 | 	This is more efficient than the equivalent code:
 | 
|  |    254 | 	
 | 
|  |    255 | 	@code
 | 
|  |    256 | 	NKern::UnlockSystem();
 | 
|  |    257 | 	NKern::LockSystem();
 | 
|  |    258 | 	@endcode
 | 
|  |    259 | 
 | 
|  |    260 | 	Note that this can only allow higher priority threads to use the System
 | 
|  |    261 | 	lock as lower priority cannot cause contention on a fast mutex.
 | 
|  |    262 | 
 | 
|  |    263 | 	@return	TRUE if the system lock was relinquished, FALSE if not.
 | 
|  |    264 | 
 | 
|  |    265 | 	@pre	System lock must be held.
 | 
|  |    266 | 
 | 
|  |    267 | 	@post	System lock is held.
 | 
|  |    268 | 
 | 
|  |    269 | 	@see NKern::LockSystem()
 | 
|  |    270 | 	@see NKern::UnlockSystem()
 | 
|  |    271 | */
 | 
|  |    272 | EXPORT_C TBool NKern::FlashSystem()
 | 
|  |    273 | 	{
 | 
|  |    274 | 	return NKern::FMFlash(&TheScheduler.iLock);
 | 
|  |    275 | 	}
 | 
|  |    276 | #endif
 | 
|  |    277 | 
 | 
|  |    278 | 
 | 
|  |    279 | /******************************************************************************
 | 
|  |    280 |  * Fast semaphore
 | 
|  |    281 |  ******************************************************************************/
 | 
|  |    282 | 
 | 
|  |    283 | /** Sets the owner of a fast semaphore.
 | 
|  |    284 | 
 | 
|  |    285 | 	@param aThread The thread to own this semaphore. If aThread==0, then the
 | 
|  |    286 | 					owner is set to the current thread.
 | 
|  |    287 | 
 | 
|  |    288 | 	@pre Kernel must be locked.
 | 
|  |    289 | 	@pre If changing ownership form one thread to another, the there must be no
 | 
|  |    290 | 		 pending signals or waits.
 | 
|  |    291 | 	@pre Call either in a thread or an IDFC context.
 | 
|  |    292 | 	
 | 
|  |    293 | 	@post Kernel is locked.
 | 
|  |    294 | */
 | 
|  |    295 | EXPORT_C void NFastSemaphore::SetOwner(NThreadBase* aThread)
 | 
|  |    296 | 	{
 | 
|  |    297 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::SetOwner");		
 | 
|  |    298 | 	if(!aThread)
 | 
|  |    299 | 		aThread = TheScheduler.iCurrentThread;
 | 
|  |    300 | 	if(iOwningThread && iOwningThread!=aThread)
 | 
|  |    301 | 		{
 | 
|  |    302 | 		__NK_ASSERT_ALWAYS(!iCount);	// Can't change owner if iCount!=0
 | 
|  |    303 | 		}
 | 
|  |    304 | 	iOwningThread = aThread;
 | 
|  |    305 | 	}
 | 
|  |    306 | 
 | 
|  |    307 | 
 | 
|  |    308 | #ifndef __FAST_SEM_MACHINE_CODED__
 | 
|  |    309 | /** Waits on a fast semaphore.
 | 
|  |    310 | 
 | 
|  |    311 |     Decrements the signal count for the semaphore and
 | 
|  |    312 | 	removes the calling thread from the ready-list if the sempahore becomes
 | 
|  |    313 | 	unsignalled. Only the thread that owns a fast semaphore can wait on it.
 | 
|  |    314 | 	
 | 
|  |    315 | 	Note that this function does not block, it merely updates the NThread state,
 | 
|  |    316 | 	rescheduling will only occur when the kernel is unlocked. Generally threads
 | 
|  |    317 | 	would use NKern::FSWait() which manipulates the kernel lock for you.
 | 
|  |    318 | 
 | 
|  |    319 | 	@pre The calling thread must own the semaphore.
 | 
|  |    320 | 	@pre No fast mutex can be held.
 | 
|  |    321 | 	@pre Kernel must be locked.
 | 
|  |    322 | 	
 | 
|  |    323 | 	@post Kernel is locked.
 | 
|  |    324 | 	
 | 
|  |    325 | 	@see NFastSemaphore::Signal()
 | 
|  |    326 | 	@see NKern::FSWait()
 | 
|  |    327 | 	@see NKern::Unlock()
 | 
|  |    328 |  */
 | 
|  |    329 | EXPORT_C void NFastSemaphore::Wait()
 | 
|  |    330 | 	{
 | 
|  |    331 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NO_FAST_MUTEX,"NFastSemaphore::Wait");		
 | 
|  |    332 | 	NThreadBase* pC=TheScheduler.iCurrentThread;
 | 
|  |    333 | 	__ASSERT_WITH_MESSAGE_ALWAYS(pC==iOwningThread,"The calling thread must own the semaphore","NFastSemaphore::Wait");
 | 
|  |    334 | 	if (--iCount<0)
 | 
|  |    335 | 		{
 | 
|  |    336 | 		pC->iNState=NThread::EWaitFastSemaphore;
 | 
|  |    337 | 		pC->iWaitObj=this;
 | 
|  |    338 | 		TheScheduler.Remove(pC);
 | 
|  |    339 | 		RescheduleNeeded();
 | 
|  |    340 | 		}
 | 
|  |    341 | 	}
 | 
|  |    342 | 
 | 
|  |    343 | 
 | 
|  |    344 | /** Signals a fast semaphore.
 | 
|  |    345 | 
 | 
|  |    346 |     Increments the signal count of a fast semaphore by
 | 
|  |    347 | 	one and releases any waiting thread if the semphore becomes signalled.
 | 
|  |    348 | 	
 | 
|  |    349 | 	Note that a reschedule will not occur before this function returns, this will
 | 
|  |    350 | 	only take place when the kernel is unlocked. Generally threads
 | 
|  |    351 | 	would use NKern::FSSignal() which manipulates the kernel lock for you.
 | 
|  |    352 | 	
 | 
|  |    353 | 	@pre Kernel must be locked.
 | 
|  |    354 | 	@pre Call either in a thread or an IDFC context.
 | 
|  |    355 | 	
 | 
|  |    356 | 	@post Kernel is locked.
 | 
|  |    357 | 	
 | 
|  |    358 | 	@see NFastSemaphore::Wait()
 | 
|  |    359 | 	@see NKern::FSSignal()
 | 
|  |    360 | 	@see NKern::Unlock()
 | 
|  |    361 |  */
 | 
|  |    362 | EXPORT_C void NFastSemaphore::Signal()
 | 
|  |    363 | 	{
 | 
|  |    364 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::Signal");			
 | 
|  |    365 | 	if (++iCount<=0)
 | 
|  |    366 | 		{
 | 
|  |    367 | 		iOwningThread->iWaitObj=NULL;
 | 
|  |    368 | 		iOwningThread->CheckSuspendThenReady();
 | 
|  |    369 | 		}
 | 
|  |    370 | 	}
 | 
|  |    371 | 
 | 
|  |    372 | 
 | 
|  |    373 | /** Signals a fast semaphore multiple times.
 | 
|  |    374 | 
 | 
|  |    375 | 	@pre Kernel must be locked.
 | 
|  |    376 | 	@pre Call either in a thread or an IDFC context.
 | 
|  |    377 | 	
 | 
|  |    378 | 	@post Kernel is locked.
 | 
|  |    379 | 
 | 
|  |    380 | 	@internalComponent	
 | 
|  |    381 |  */
 | 
|  |    382 | EXPORT_C void NFastSemaphore::SignalN(TInt aCount)
 | 
|  |    383 | 	{
 | 
|  |    384 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::SignalN");			
 | 
|  |    385 | 	__NK_ASSERT_DEBUG(aCount>=0);
 | 
|  |    386 | 	if (aCount>0 && iCount<0)
 | 
|  |    387 | 		{
 | 
|  |    388 | 		iOwningThread->iWaitObj=NULL;
 | 
|  |    389 | 		iOwningThread->CheckSuspendThenReady();
 | 
|  |    390 | 		}
 | 
|  |    391 | 	iCount+=aCount;
 | 
|  |    392 | 	}
 | 
|  |    393 | 
 | 
|  |    394 | 
 | 
|  |    395 | /** Resets a fast semaphore.
 | 
|  |    396 | 
 | 
|  |    397 | 	@pre Kernel must be locked.
 | 
|  |    398 | 	@pre Call either in a thread or an IDFC context.
 | 
|  |    399 | 	
 | 
|  |    400 | 	@post Kernel is locked.
 | 
|  |    401 | 
 | 
|  |    402 | 	@internalComponent	
 | 
|  |    403 |  */
 | 
|  |    404 | EXPORT_C void NFastSemaphore::Reset()
 | 
|  |    405 | 	{
 | 
|  |    406 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::Reset");			
 | 
|  |    407 | 	if (iCount<0)
 | 
|  |    408 | 		{
 | 
|  |    409 | 		iOwningThread->iWaitObj=NULL;
 | 
|  |    410 | 		iOwningThread->CheckSuspendThenReady();
 | 
|  |    411 | 		}
 | 
|  |    412 | 	iCount=0;
 | 
|  |    413 | 	}
 | 
|  |    414 | 
 | 
|  |    415 | 
 | 
|  |    416 | /** Cancels a wait on a fast semaphore.
 | 
|  |    417 | 
 | 
|  |    418 | 	@pre Kernel must be locked.
 | 
|  |    419 | 	@pre Call either in a thread or an IDFC context.
 | 
|  |    420 | 	
 | 
|  |    421 | 	@post Kernel is locked.
 | 
|  |    422 | 
 | 
|  |    423 | 	@internalComponent	
 | 
|  |    424 |  */
 | 
|  |    425 | void NFastSemaphore::WaitCancel()
 | 
|  |    426 | 	{
 | 
|  |    427 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::WaitCancel");			
 | 
|  |    428 | 	iCount=0;
 | 
|  |    429 | 	iOwningThread->iWaitObj=NULL;
 | 
|  |    430 | 	iOwningThread->CheckSuspendThenReady();
 | 
|  |    431 | 	}
 | 
|  |    432 | 
 | 
|  |    433 | 
 | 
|  |    434 | /** Waits for a signal on the current thread's I/O semaphore.
 | 
|  |    435 | 
 | 
|  |    436 | 	@pre No fast mutex can be held.
 | 
|  |    437 | 	@pre Call in a thread context.
 | 
|  |    438 | 	@pre Kernel must be unlocked
 | 
|  |    439 | 	@pre interrupts enabled
 | 
|  |    440 |  */
 | 
|  |    441 | EXPORT_C void NKern::WaitForAnyRequest()
 | 
|  |    442 | 	{
 | 
|  |    443 | 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::WaitForAnyRequest");
 | 
|  |    444 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("WfAR"));
 | 
|  |    445 | 	NThreadBase* pC=TheScheduler.iCurrentThread;
 | 
|  |    446 | 	NKern::Lock();
 | 
|  |    447 | 	pC->iRequestSemaphore.Wait();
 | 
|  |    448 | 	NKern::Unlock();
 | 
|  |    449 | 	}
 | 
|  |    450 | #endif
 | 
|  |    451 | 
 | 
|  |    452 | 
 | 
|  |    453 | /** Sets the owner of a fast semaphore.
 | 
|  |    454 | 
 | 
|  |    455 | 	@param aSem The semaphore to change ownership off.
 | 
|  |    456 | 	@param aThread The thread to own this semaphore. If aThread==0, then the
 | 
|  |    457 | 					owner is set to the current thread.
 | 
|  |    458 | 
 | 
|  |    459 | 	@pre If changing ownership form one thread to another, the there must be no
 | 
|  |    460 | 		 pending signals or waits.
 | 
|  |    461 | */
 | 
|  |    462 | EXPORT_C void NKern::FSSetOwner(NFastSemaphore* aSem,NThreadBase* aThread)
 | 
|  |    463 | 	{
 | 
|  |    464 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSetOwner %m %T",aSem,aThread));
 | 
|  |    465 | 	NKern::Lock();
 | 
|  |    466 | 	aSem->SetOwner(aThread);
 | 
|  |    467 | 	NKern::Unlock();
 | 
|  |    468 | 	}
 | 
|  |    469 | 
 | 
|  |    470 | /** Waits on a fast semaphore.
 | 
|  |    471 | 
 | 
|  |    472 |     Decrements the signal count for the semaphore
 | 
|  |    473 | 	and waits for a signal if the sempahore becomes unsignalled. Only the
 | 
|  |    474 | 	thread that owns a fast	semaphore can wait on it.
 | 
|  |    475 | 
 | 
|  |    476 | 	@param aSem The semaphore to wait on.
 | 
|  |    477 | 	
 | 
|  |    478 | 	@pre The calling thread must own the semaphore.
 | 
|  |    479 | 	@pre No fast mutex can be held.
 | 
|  |    480 | 	
 | 
|  |    481 | 	@see NFastSemaphore::Wait()
 | 
|  |    482 | */
 | 
|  |    483 | EXPORT_C void NKern::FSWait(NFastSemaphore* aSem)
 | 
|  |    484 | 	{
 | 
|  |    485 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSWait %m",aSem));
 | 
|  |    486 | 	NKern::Lock();
 | 
|  |    487 | 	aSem->Wait();
 | 
|  |    488 | 	NKern::Unlock();
 | 
|  |    489 | 	}
 | 
|  |    490 | 
 | 
|  |    491 | 
 | 
|  |    492 | /** Signals a fast semaphore.
 | 
|  |    493 | 
 | 
|  |    494 |     Increments the signal count of a fast semaphore
 | 
|  |    495 | 	by one and releases any	waiting thread if the semphore becomes signalled.
 | 
|  |    496 | 	
 | 
|  |    497 | 	@param aSem The semaphore to signal.
 | 
|  |    498 | 
 | 
|  |    499 | 	@see NKern::FSWait()
 | 
|  |    500 | 
 | 
|  |    501 | 	@pre Interrupts must be enabled.
 | 
|  |    502 | 	@pre Do not call from an ISR
 | 
|  |    503 |  */
 | 
|  |    504 | EXPORT_C void NKern::FSSignal(NFastSemaphore* aSem)
 | 
|  |    505 | 	{
 | 
|  |    506 | 	CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::FSSignal(NFastSemaphore*)");
 | 
|  |    507 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSignal %m",aSem));
 | 
|  |    508 | 	NKern::Lock();
 | 
|  |    509 | 	aSem->Signal();
 | 
|  |    510 | 	NKern::Unlock();
 | 
|  |    511 | 	}
 | 
|  |    512 | 
 | 
|  |    513 | 
 | 
|  |    514 | /** Atomically signals a fast semaphore and releases a fast mutex.
 | 
|  |    515 | 
 | 
|  |    516 | 	Rescheduling only occurs after both synchronisation operations are complete.
 | 
|  |    517 | 	
 | 
|  |    518 | 	@param aSem The semaphore to signal.
 | 
|  |    519 | 	@param aMutex The mutex to release. If NULL, the System Lock is released
 | 
|  |    520 | 
 | 
|  |    521 | 	@pre The calling thread must hold the mutex.
 | 
|  |    522 | 	
 | 
|  |    523 | 	@see NKern::FMSignal()
 | 
|  |    524 |  */
 | 
|  |    525 | EXPORT_C void NKern::FSSignal(NFastSemaphore* aSem, NFastMutex* aMutex)
 | 
|  |    526 | 	{
 | 
|  |    527 | 	if (!aMutex)
 | 
|  |    528 | 		aMutex=&TheScheduler.iLock;
 | 
|  |    529 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSignal %m +FM %M",aSem,aMutex));
 | 
|  |    530 | 	NKern::Lock();
 | 
|  |    531 | 	aSem->Signal();
 | 
|  |    532 | 	aMutex->Signal();
 | 
|  |    533 | 	NKern::Unlock();
 | 
|  |    534 | 	}
 | 
|  |    535 | 
 | 
|  |    536 | 
 | 
|  |    537 | /** Signals a fast semaphore multiple times.
 | 
|  |    538 | 
 | 
|  |    539 |     Increments the signal count of a
 | 
|  |    540 | 	fast semaphore by aCount and releases any waiting thread if the semphore
 | 
|  |    541 | 	becomes signalled.
 | 
|  |    542 | 	
 | 
|  |    543 | 	@param aSem The semaphore to signal.
 | 
|  |    544 | 	@param aCount The number of times to signal the semaphore.
 | 
|  |    545 | 
 | 
|  |    546 | 	@see NKern::FSWait()
 | 
|  |    547 | 
 | 
|  |    548 | 	@pre Interrupts must be enabled.
 | 
|  |    549 | 	@pre Do not call from an ISR
 | 
|  |    550 |  */
 | 
|  |    551 | EXPORT_C void NKern::FSSignalN(NFastSemaphore* aSem, TInt aCount)
 | 
|  |    552 | 	{
 | 
|  |    553 | 	CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::FSSignalN(NFastSemaphore*, TInt)");
 | 
|  |    554 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSignalN %m %d",aSem,aCount));
 | 
|  |    555 | 	NKern::Lock();
 | 
|  |    556 | 	aSem->SignalN(aCount);
 | 
|  |    557 | 	NKern::Unlock();
 | 
|  |    558 | 	}
 | 
|  |    559 | 
 | 
|  |    560 | 
 | 
|  |    561 | /** Atomically signals a fast semaphore multiple times and releases a fast mutex.
 | 
|  |    562 | 
 | 
|  |    563 | 	Rescheduling only occurs after both synchronisation operations are complete.
 | 
|  |    564 | 	
 | 
|  |    565 | 	@param aSem The semaphore to signal.
 | 
|  |    566 | 	@param aCount The number of times to signal the semaphore.
 | 
|  |    567 | 	@param aMutex The mutex to release. If NULL, the System Lock is released.
 | 
|  |    568 | 
 | 
|  |    569 | 	@pre The calling thread must hold the mutex.
 | 
|  |    570 | 	
 | 
|  |    571 | 	@see NKern::FMSignal()
 | 
|  |    572 |  */
 | 
|  |    573 | EXPORT_C void NKern::FSSignalN(NFastSemaphore* aSem, TInt aCount, NFastMutex* aMutex)
 | 
|  |    574 | 	{
 | 
|  |    575 | 	if (!aMutex)
 | 
|  |    576 | 		aMutex=&TheScheduler.iLock;
 | 
|  |    577 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSignalN %m %d + FM %M",aSem,aCount,aMutex));
 | 
|  |    578 | 	NKern::Lock();
 | 
|  |    579 | 	aSem->SignalN(aCount);
 | 
|  |    580 | 	aMutex->Signal();
 | 
|  |    581 | 	NKern::Unlock();
 | 
|  |    582 | 	}
 | 
|  |    583 | 
 | 
|  |    584 | 
 | 
|  |    585 | /******************************************************************************
 | 
|  |    586 |  * Thread
 | 
|  |    587 |  ******************************************************************************/
 | 
|  |    588 | 
 | 
|  |    589 | #ifndef __SCHEDULER_MACHINE_CODED__
 | 
|  |    590 | /** Makes a nanothread ready provided that it is not explicitly suspended.
 | 
|  |    591 | 	
 | 
|  |    592 | 	For use by RTOS personality layers.
 | 
|  |    593 | 
 | 
|  |    594 | 	@pre	Kernel must be locked.
 | 
|  |    595 | 	@pre	Call either in a thread or an IDFC context.
 | 
|  |    596 | 
 | 
|  |    597 | 	@post	Kernel is locked.
 | 
|  |    598 |  */
 | 
|  |    599 | EXPORT_C void NThreadBase::CheckSuspendThenReady()
 | 
|  |    600 | 	{
 | 
|  |    601 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::CheckSuspendThenReady");	
 | 
|  |    602 | 	if (iSuspendCount==0)
 | 
|  |    603 | 		Ready();
 | 
|  |    604 | 	else
 | 
|  |    605 | 		iNState=ESuspended;
 | 
|  |    606 | 	}
 | 
|  |    607 | 
 | 
|  |    608 | /** Makes a nanothread ready.
 | 
|  |    609 | 	
 | 
|  |    610 | 	For use by RTOS personality layers.
 | 
|  |    611 | 
 | 
|  |    612 | 	@pre	Kernel must be locked.
 | 
|  |    613 | 	@pre	Call either in a thread or an IDFC context.
 | 
|  |    614 | 	@pre	The thread being made ready must not be explicitly suspended
 | 
|  |    615 | 	
 | 
|  |    616 | 	@post	Kernel is locked.
 | 
|  |    617 |  */
 | 
|  |    618 | EXPORT_C void NThreadBase::Ready()
 | 
|  |    619 | 	{
 | 
|  |    620 | #ifdef _DEBUG
 | 
|  |    621 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::Ready");	
 | 
|  |    622 | 	__ASSERT_WITH_MESSAGE_DEBUG(iSuspendCount==0,"The thread being made ready must not be explicitly suspended","NThreadBase::Ready");
 | 
|  |    623 | 
 | 
|  |    624 | 	if (DEBUGNUM(KCRAZYSCHEDDELAY) && iPriority && TheTimerQ.iMsCount)
 | 
|  |    625 | 		{
 | 
|  |    626 | 		// Delay this thread, unless it's already on the delayed queue
 | 
|  |    627 | 		if ((i_ThrdAttr & KThreadAttDelayed) == 0)
 | 
|  |    628 | 			{
 | 
|  |    629 | 			i_ThrdAttr |= KThreadAttDelayed;
 | 
|  |    630 | 			TheScheduler.iDelayedQ.Add(this);
 | 
|  |    631 | 			}
 | 
|  |    632 | 		}
 | 
|  |    633 | 	else
 | 
|  |    634 | 		{
 | 
|  |    635 | 		// Delayed scheduler off
 | 
|  |    636 | 		// or idle thread, or the tick hasn't started yet
 | 
|  |    637 | 		DoReady();
 | 
|  |    638 | 		}
 | 
|  |    639 | #else
 | 
|  |    640 | 	DoReady();
 | 
|  |    641 | #endif
 | 
|  |    642 | 	}
 | 
|  |    643 | 
 | 
|  |    644 | void NThreadBase::DoReady()
 | 
|  |    645 | 	{
 | 
|  |    646 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::DoReady");	
 | 
|  |    647 | 	__ASSERT_WITH_MESSAGE_DEBUG(iSuspendCount==0,"The thread being made ready must not be explicitly suspended","NThreadBase::DoReady");
 | 
|  |    648 | 
 | 
|  |    649 | 	TScheduler& s=TheScheduler;
 | 
|  |    650 | 	TInt p=iPriority;
 | 
|  |    651 | //	__KTRACE_OPT(KSCHED,Kern::Printf("Ready(%O), priority %d status %d",this,p,iStatus));
 | 
|  |    652 | 	if (iNState==EDead)
 | 
|  |    653 | 		return;
 | 
|  |    654 | 	s.Add(this);
 | 
|  |    655 | 	iNState=EReady;
 | 
|  |    656 | 	if (!(s>p))	// s>p <=> highest ready priority > our priority so no preemption
 | 
|  |    657 | 		{
 | 
|  |    658 | 		// if no other thread at this priority or first thread at this priority has used its timeslice, reschedule
 | 
|  |    659 | 		// note iNext points to first thread at this priority since we got added to the end
 | 
|  |    660 | 		if (iNext==this || ((NThreadBase*)iNext)->iTime==0)
 | 
|  |    661 | 			RescheduleNeeded();
 | 
|  |    662 | 		}
 | 
|  |    663 | 	}
 | 
|  |    664 | #endif
 | 
|  |    665 | 
 | 
|  |    666 | void NThreadBase::DoCsFunction()
 | 
|  |    667 | 	{
 | 
|  |    668 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::DoCsFunction %T %d",this,iCsFunction));
 | 
|  |    669 | 	TInt f=iCsFunction;
 | 
|  |    670 | 	iCsFunction=0;
 | 
|  |    671 | 	if (f>0)
 | 
|  |    672 | 		{
 | 
|  |    673 | 		// suspend this thread f times
 | 
|  |    674 | 		Suspend(f);
 | 
|  |    675 | 		return;
 | 
|  |    676 | 		}
 | 
|  |    677 | 	if (f==ECSExitPending)
 | 
|  |    678 | 		{
 | 
|  |    679 | 		// We need to exit now
 | 
|  |    680 | 		Exit();	// this won't return
 | 
|  |    681 | 		}
 | 
|  |    682 | 	UnknownState(ELeaveCS,f);	// call into RTOS personality
 | 
|  |    683 | 	}
 | 
|  |    684 | 
 | 
|  |    685 | 
 | 
|  |    686 | /** Suspends a nanothread the specified number of times.
 | 
|  |    687 | 	
 | 
|  |    688 | 	For use by RTOS personality layers.
 | 
|  |    689 | 	Do not use this function directly on a Symbian OS thread.
 | 
|  |    690 | 	Since the kernel is locked on entry, any reschedule will be deferred until
 | 
|  |    691 | 	it is unlocked.
 | 
|  |    692 | 	The suspension will be deferred if the target thread is currently in a
 | 
|  |    693 | 	critical section; in this case the suspension will take effect when it exits
 | 
|  |    694 | 	the critical section.
 | 
|  |    695 | 	The thread's unknown state handler will be invoked with function ESuspend and
 | 
|  |    696 | 	parameter aCount if the current NState is not recognised and it is not in a
 | 
|  |    697 | 	critical section.
 | 
|  |    698 | 
 | 
|  |    699 | 	@param	aCount = the number of times to suspend.
 | 
|  |    700 | 	@return	TRUE, if the suspension has taken immediate effect;
 | 
|  |    701 | 			FALSE, if the thread is in a critical section or is already suspended.
 | 
|  |    702 | 	
 | 
|  |    703 | 	@pre	Kernel must be locked.
 | 
|  |    704 | 	@pre	Call in a thread context.
 | 
|  |    705 | 	
 | 
|  |    706 | 	@post	Kernel is locked.
 | 
|  |    707 |  */
 | 
|  |    708 | EXPORT_C TBool NThreadBase::Suspend(TInt aCount)
 | 
|  |    709 | 	{
 | 
|  |    710 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NThreadBase::Suspend");		
 | 
|  |    711 | 	// If thread is executing a critical section, we must defer the suspend
 | 
|  |    712 | 	if (iNState==EDead)
 | 
|  |    713 | 		return FALSE;		// already dead so suspension is a no-op
 | 
|  |    714 | 	if (iCsCount || iHeldFastMutex)
 | 
|  |    715 | 		{
 | 
|  |    716 | 		__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Suspend %T (CSF %d) %d",this,iCsFunction,aCount));
 | 
|  |    717 | 		if (iCsFunction>=0)			// -ve means thread is about to exit
 | 
|  |    718 | 			{
 | 
|  |    719 | 			iCsFunction+=aCount;	// so thread will suspend itself when it leaves the critical section
 | 
|  |    720 | 			if (iHeldFastMutex && iCsCount==0)
 | 
|  |    721 | 				iHeldFastMutex->iWaiting=1;
 | 
|  |    722 | 			}
 | 
|  |    723 | 		return FALSE;
 | 
|  |    724 | 		}
 | 
|  |    725 | 
 | 
|  |    726 | 	// thread not in critical section, so suspend it
 | 
|  |    727 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Suspend %T (NState %d) %d",this,iNState,aCount));
 | 
|  |    728 | 	switch (iNState)
 | 
|  |    729 | 		{
 | 
|  |    730 | 		case EReady:
 | 
|  |    731 | 			TheScheduler.Remove(this);
 | 
|  |    732 | 			RescheduleNeeded();
 | 
|  |    733 | 			iNState=ESuspended;
 | 
|  |    734 | 		case EWaitFastSemaphore:
 | 
|  |    735 | 		case EWaitDfc:
 | 
|  |    736 | 		case ESleep:
 | 
|  |    737 | 		case EBlocked:
 | 
|  |    738 | 		case ESuspended:
 | 
|  |    739 | 			break;
 | 
|  |    740 | 		default:
 | 
|  |    741 | 			UnknownState(ESuspend,aCount);
 | 
|  |    742 | 			break;
 | 
|  |    743 | 		}
 | 
|  |    744 | 	TInt old_suspend=iSuspendCount;
 | 
|  |    745 | 	iSuspendCount-=aCount;
 | 
|  |    746 | 	return (old_suspend==0);	// return TRUE if thread has changed from not-suspended to suspended.
 | 
|  |    747 | 	}
 | 
|  |    748 | 
 | 
|  |    749 | 
 | 
|  |    750 | /** Resumes a nanothread, cancelling one suspension.
 | 
|  |    751 | 	
 | 
|  |    752 | 	For use by RTOS personality layers.
 | 
|  |    753 | 	Do not use this function directly on a Symbian OS thread.
 | 
|  |    754 | 	Since the kernel is locked on entry, any reschedule will be deferred until
 | 
|  |    755 | 	it is unlocked.
 | 
|  |    756 | 	If the target thread is currently in a critical section this will simply
 | 
|  |    757 | 	cancel one deferred suspension.
 | 
|  |    758 | 	The thread's unknown state handler will be invoked with function EResume if
 | 
|  |    759 | 	the current NState is not recognised and it is not in a	critical section.
 | 
|  |    760 | 
 | 
|  |    761 | 	@return	TRUE, if the resumption has taken immediate effect;
 | 
|  |    762 | 			FALSE, if the thread is in a critical section or is still suspended.
 | 
|  |    763 | 	
 | 
|  |    764 | 	@pre	Kernel must be locked.
 | 
|  |    765 | 	@pre	Call either in a thread or an IDFC context.
 | 
|  |    766 | 	
 | 
|  |    767 | 	@post	Kernel must be locked.
 | 
|  |    768 |  */
 | 
|  |    769 | EXPORT_C TBool NThreadBase::Resume()
 | 
|  |    770 | 	{
 | 
|  |    771 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NThreadBase::Resume");		
 | 
|  |    772 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Resume %T, state %d CSC %d CSF %d",this,iNState,iCsCount,iCsFunction));
 | 
|  |    773 | 	if (iNState==EDead)
 | 
|  |    774 | 		return FALSE;
 | 
|  |    775 | 
 | 
|  |    776 | 	// If thread is in critical section, just cancel deferred suspends
 | 
|  |    777 | 	if (iCsCount || iHeldFastMutex)
 | 
|  |    778 | 		{
 | 
|  |    779 | 		if (iCsFunction>0)
 | 
|  |    780 | 			--iCsFunction;	// one less deferred suspension
 | 
|  |    781 | 		return FALSE;
 | 
|  |    782 | 		}
 | 
|  |    783 | 	if (iSuspendCount<0 && ++iSuspendCount==0)
 | 
|  |    784 | 		{
 | 
|  |    785 | 		switch (iNState)
 | 
|  |    786 | 			{
 | 
|  |    787 | 			case ESuspended:
 | 
|  |    788 | 				Ready();
 | 
|  |    789 | 			case EReady:
 | 
|  |    790 | 			case EWaitFastSemaphore:
 | 
|  |    791 | 			case EWaitDfc:
 | 
|  |    792 | 			case ESleep:
 | 
|  |    793 | 			case EBlocked:
 | 
|  |    794 | 				break;
 | 
|  |    795 | 			default:
 | 
|  |    796 | 				UnknownState(EResume,0);
 | 
|  |    797 | 				break;
 | 
|  |    798 | 			}
 | 
|  |    799 | 		return TRUE;	// thread has changed from suspended to not-suspended
 | 
|  |    800 | 		}
 | 
|  |    801 | 	return FALSE;	// still suspended or not initially suspended so no higher level action required
 | 
|  |    802 | 	}
 | 
|  |    803 | 
 | 
|  |    804 | 
 | 
|  |    805 | /** Resumes a nanothread, cancelling all outstanding suspensions.
 | 
|  |    806 | 	
 | 
|  |    807 | 	For use by RTOS personality layers.
 | 
|  |    808 | 	Do not use this function directly on a Symbian OS thread.
 | 
|  |    809 | 	Since the kernel is locked on entry, any reschedule will be deferred until
 | 
|  |    810 | 	it is unlocked.
 | 
|  |    811 | 	If the target thread is currently in a critical section this will simply
 | 
|  |    812 | 	cancel all deferred suspensions.
 | 
|  |    813 | 	The thread's unknown state handler will be invoked with function EForceResume
 | 
|  |    814 | 	if the current NState is not recognised and it is not in a	critical section.
 | 
|  |    815 | 
 | 
|  |    816 | 	@return	TRUE, if the resumption has taken immediate effect;
 | 
|  |    817 | 			FALSE, if the thread is in a critical section.
 | 
|  |    818 | 
 | 
|  |    819 | 	@pre	Kernel must be locked.
 | 
|  |    820 | 	@pre	Call either in a thread or an IDFC context.
 | 
|  |    821 | 
 | 
|  |    822 | 	@post	Kernel is locked.
 | 
|  |    823 |  */
 | 
|  |    824 | EXPORT_C TBool NThreadBase::ForceResume()
 | 
|  |    825 | 	{
 | 
|  |    826 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::ForceResume");		
 | 
|  |    827 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::ForceResume %T, state %d CSC %d CSF %d",this,iNState,iCsCount,iCsFunction));
 | 
|  |    828 | 	if (iNState==EDead)
 | 
|  |    829 | 		return FALSE;
 | 
|  |    830 | 
 | 
|  |    831 | 	// If thread is in critical section, just cancel deferred suspends
 | 
|  |    832 | 	if (iCsCount || iHeldFastMutex)
 | 
|  |    833 | 		{
 | 
|  |    834 | 		if (iCsFunction>0)
 | 
|  |    835 | 			iCsFunction=0;	// cancel all deferred suspensions
 | 
|  |    836 | 		return FALSE;
 | 
|  |    837 | 		}
 | 
|  |    838 | 	if (iSuspendCount<0)
 | 
|  |    839 | 		{
 | 
|  |    840 | 		iSuspendCount=0;
 | 
|  |    841 | 		switch (iNState)
 | 
|  |    842 | 			{
 | 
|  |    843 | 			case ESuspended:
 | 
|  |    844 | 				Ready();
 | 
|  |    845 | 			case EReady:
 | 
|  |    846 | 			case EWaitFastSemaphore:
 | 
|  |    847 | 			case EWaitDfc:
 | 
|  |    848 | 			case ESleep:
 | 
|  |    849 | 			case EBlocked:
 | 
|  |    850 | 			case EDead:
 | 
|  |    851 | 				break;
 | 
|  |    852 | 			default:
 | 
|  |    853 | 				UnknownState(EForceResume,0);
 | 
|  |    854 | 				break;
 | 
|  |    855 | 			}
 | 
|  |    856 | 		}
 | 
|  |    857 | 	return TRUE;
 | 
|  |    858 | 	}
 | 
|  |    859 | 
 | 
|  |    860 | 
 | 
|  |    861 | /** Releases a waiting nanokernel thread.
 | 
|  |    862 | 
 | 
|  |    863 | 	For use by RTOS personality layers.
 | 
|  |    864 | 	Do not use this function directly on a Symbian OS thread.
 | 
|  |    865 | 	This function should make the thread ready (provided it is not explicitly
 | 
|  |    866 | 	suspended) and cancel any wait timeout. It should also remove it from any
 | 
|  |    867 | 	wait queues.
 | 
|  |    868 | 	If aReturnCode is nonnegative it indicates normal completion of the wait.
 | 
|  |    869 | 	If aReturnCode is negative it indicates early/abnormal completion of the
 | 
|  |    870 | 	wait and so any wait object should be reverted as if the wait had never
 | 
|  |    871 | 	occurred (eg semaphore count should be incremented as this thread has not
 | 
|  |    872 | 	actually acquired the semaphore).
 | 
|  |    873 | 	The thread's unknown state handler will be invoked with function ERelease
 | 
|  |    874 | 	and parameter aReturnCode if the current NState is not recognised.
 | 
|  |    875 | 	
 | 
|  |    876 | 	@param aReturnCode	The reason code for release.
 | 
|  |    877 | 
 | 
|  |    878 | 	@pre	Kernel must be locked.
 | 
|  |    879 | 	@pre	Call either in a thread or an IDFC context.
 | 
|  |    880 | 	
 | 
|  |    881 | 	@post	Kernel is locked.
 | 
|  |    882 |  */
 | 
|  |    883 | EXPORT_C void NThreadBase::Release(TInt aReturnCode)
 | 
|  |    884 | 	{
 | 
|  |    885 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::Release");		
 | 
|  |    886 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Release %T, state %d retcode %d",this,iNState,aReturnCode));
 | 
|  |    887 | 	switch(iNState)
 | 
|  |    888 | 		{
 | 
|  |    889 | 		case EDead:
 | 
|  |    890 | 			return;
 | 
|  |    891 | 		case EReady:
 | 
|  |    892 | 		case ESuspended:
 | 
|  |    893 | 			// don't release explicit suspensions
 | 
|  |    894 | 			break;
 | 
|  |    895 | 		case EWaitFastSemaphore:
 | 
|  |    896 | 			if (aReturnCode<0 && iWaitObj)
 | 
|  |    897 | 				((NFastSemaphore*)iWaitObj)->WaitCancel();
 | 
|  |    898 | 			break;
 | 
|  |    899 | 		case ESleep:
 | 
|  |    900 | 		case EBlocked:
 | 
|  |    901 | 		case EWaitDfc:
 | 
|  |    902 | 			CheckSuspendThenReady();
 | 
|  |    903 | 			break;
 | 
|  |    904 | 		default:
 | 
|  |    905 | 			UnknownState(ERelease,aReturnCode);
 | 
|  |    906 | 			break;
 | 
|  |    907 | 		}
 | 
|  |    908 | 	if (iTimer.iUserFlags)
 | 
|  |    909 | 		{
 | 
|  |    910 | 		if (iTimer.iState == NTimer::EIdle)
 | 
|  |    911 | 			{
 | 
|  |    912 | 			// Potential race condition - timer must have completed but expiry
 | 
|  |    913 | 			// handler has not yet run. Signal to the handler that it should do
 | 
|  |    914 | 			// nothing by flipping the bottom bit of iTimer.iPtr
 | 
|  |    915 | 			// This condition cannot possibly recur until the expiry handler has
 | 
|  |    916 | 			// run since all expiry handlers run in DfcThread1.
 | 
|  |    917 | 			TLinAddr& x = *(TLinAddr*)&iTimer.iPtr;
 | 
|  |    918 | 			x ^= 1;
 | 
|  |    919 | 			}
 | 
|  |    920 | 		iTimer.Cancel();
 | 
|  |    921 | 		iTimer.iUserFlags = FALSE;
 | 
|  |    922 | 		}
 | 
|  |    923 | 	iWaitObj=NULL;
 | 
|  |    924 | 	iReturnValue=aReturnCode;
 | 
|  |    925 | 	}
 | 
|  |    926 | 
 | 
|  |    927 | 
 | 
|  |    928 | /** Signals a nanokernel thread's request semaphore.
 | 
|  |    929 | 
 | 
|  |    930 | 	This can also be used on Symbian OS threads.
 | 
|  |    931 | 	
 | 
|  |    932 | 	@pre	Kernel must be locked.
 | 
|  |    933 | 	@pre	Call either in a thread or an IDFC context.
 | 
|  |    934 | 	
 | 
|  |    935 | 	@post	Kernel is locked.
 | 
|  |    936 |  */
 | 
|  |    937 | EXPORT_C void NThreadBase::RequestSignal()
 | 
|  |    938 | 	{
 | 
|  |    939 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::RequestSignal");		
 | 
|  |    940 | 	iRequestSemaphore.Signal();
 | 
|  |    941 | 	}
 | 
|  |    942 | 
 | 
|  |    943 | void NThreadBase::TimerExpired(TAny* aPtr)
 | 
|  |    944 | 	{
 | 
|  |    945 | 	TLinAddr cookie = (TLinAddr)aPtr;
 | 
|  |    946 | 	NThread* pT = (NThread*)(cookie &~ 3);
 | 
|  |    947 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::TimerExpired %T, state %d",pT,pT->iNState));
 | 
|  |    948 | 	NThreadTimeoutHandler th = pT->iHandlers->iTimeoutHandler;
 | 
|  |    949 | 	NKern::Lock();
 | 
|  |    950 | 	if (pT->iNState<ENumNStates && pT->iNState!=EBlocked)
 | 
|  |    951 | 		th = NULL;
 | 
|  |    952 | 	if (th)
 | 
|  |    953 | 		{
 | 
|  |    954 | 		// Use higher level timeout handler
 | 
|  |    955 | 		NKern::Unlock();
 | 
|  |    956 | 		(*th)(pT, ETimeoutPreamble);
 | 
|  |    957 | 		TInt param = ETimeoutPostamble;
 | 
|  |    958 | 		NKern::Lock();
 | 
|  |    959 | 		TLinAddr current_cookie = (TLinAddr)pT->iTimer.iPtr;
 | 
|  |    960 | 		if ((cookie ^ current_cookie) & 1)
 | 
|  |    961 | 			{
 | 
|  |    962 | 			// The timer was cancelled just after expiring but before this function
 | 
|  |    963 | 			// managed to call NKern::Lock(), so it's spurious
 | 
|  |    964 | 			param = ETimeoutSpurious;
 | 
|  |    965 | 			}
 | 
|  |    966 | 		else
 | 
|  |    967 | 			pT->iTimer.iUserFlags = FALSE;
 | 
|  |    968 | 		NKern::Unlock();
 | 
|  |    969 | 		(*th)(pT, param);
 | 
|  |    970 | 		return;
 | 
|  |    971 | 		}
 | 
|  |    972 | 	TLinAddr current_cookie = (TLinAddr)pT->iTimer.iPtr;
 | 
|  |    973 | 	if ((cookie ^ current_cookie) & 1)
 | 
|  |    974 | 		{
 | 
|  |    975 | 		// The timer was cancelled just after expiring but before this function
 | 
|  |    976 | 		// managed to call NKern::Lock(), so just return without doing anything.
 | 
|  |    977 | 		NKern::Unlock();
 | 
|  |    978 | 		return;
 | 
|  |    979 | 		}
 | 
|  |    980 | 	pT->iTimer.iUserFlags = FALSE;
 | 
|  |    981 | 	switch(pT->iNState)
 | 
|  |    982 | 		{
 | 
|  |    983 | 		case EDead:
 | 
|  |    984 | 		case EReady:
 | 
|  |    985 | 		case ESuspended:
 | 
|  |    986 | 			NKern::Unlock();
 | 
|  |    987 | 			return;
 | 
|  |    988 | 		case EWaitFastSemaphore:
 | 
|  |    989 | 			((NFastSemaphore*)pT->iWaitObj)->WaitCancel();
 | 
|  |    990 | 			break;
 | 
|  |    991 | 		case EBlocked:
 | 
|  |    992 | 		case ESleep:
 | 
|  |    993 | 		case EWaitDfc:
 | 
|  |    994 | 			pT->CheckSuspendThenReady();
 | 
|  |    995 | 			break;
 | 
|  |    996 | 		default:
 | 
|  |    997 | 			pT->UnknownState(ETimeout,0);
 | 
|  |    998 | 			break;
 | 
|  |    999 | 		}
 | 
|  |   1000 | 	pT->iWaitObj=NULL;
 | 
|  |   1001 | 	pT->iReturnValue=KErrTimedOut;
 | 
|  |   1002 | 	NKern::Unlock();
 | 
|  |   1003 | 	}
 | 
|  |   1004 | 
 | 
|  |   1005 | 
 | 
|  |   1006 | /** Changes the priority of a nanokernel thread.
 | 
|  |   1007 | 
 | 
|  |   1008 | 	For use by RTOS personality layers.
 | 
|  |   1009 | 	Do not use this function directly on a Symbian OS thread.
 | 
|  |   1010 | 
 | 
|  |   1011 | 	The thread's unknown state handler will be invoked with function EChangePriority
 | 
|  |   1012 | 	and parameter newp if the current NState is not recognised and the new priority
 | 
|  |   1013 | 	is not equal to the original priority.
 | 
|  |   1014 | 	
 | 
|  |   1015 | 	@param	newp  The new nanokernel priority (0 <= newp < KNumPriorities).
 | 
|  |   1016 | 
 | 
|  |   1017 | 	@pre	Kernel must be locked.
 | 
|  |   1018 | 	@pre	Call in a thread context.
 | 
|  |   1019 | 	
 | 
|  |   1020 | 	@post	Kernel is locked.
 | 
|  |   1021 |  */
 | 
|  |   1022 | EXPORT_C void NThreadBase::SetPriority(TInt newp)
 | 
|  |   1023 | 	{
 | 
|  |   1024 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_IDFC|MASK_NOT_ISR,"NThreadBase::SetPriority");		
 | 
|  |   1025 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::SetPriority %T %d->%d, state %d",this,iPriority,newp,iNState));
 | 
|  |   1026 | #ifdef _DEBUG
 | 
|  |   1027 | 	// When the crazy scheduler is active, refuse to set any priority higher than 1
 | 
|  |   1028 | 	if (KCrazySchedulerEnabled() && newp>1)
 | 
|  |   1029 | 		newp=1;
 | 
|  |   1030 | #endif
 | 
|  |   1031 | 	if (newp==iPriority)
 | 
|  |   1032 | 		return;
 | 
|  |   1033 | #ifdef BTRACE_THREAD_PRIORITY
 | 
|  |   1034 | 	BTrace8(BTrace::EThreadPriority,BTrace::ENThreadPriority,this,newp);
 | 
|  |   1035 | #endif
 | 
|  |   1036 | 	switch(iNState)
 | 
|  |   1037 | 		{
 | 
|  |   1038 | 		case EReady:
 | 
|  |   1039 | 			{
 | 
|  |   1040 | 			TInt oldp=iPriority;
 | 
|  |   1041 | 			TheScheduler.ChangePriority(this,newp);
 | 
|  |   1042 | 			NThreadBase* pC=TheScheduler.iCurrentThread;
 | 
|  |   1043 | 			if (this==pC)
 | 
|  |   1044 | 				{
 | 
|  |   1045 | 				if (newp<oldp && (TheScheduler>newp || !TPriListLink::Alone()))	// can't have scheduler<newp
 | 
|  |   1046 | 					RescheduleNeeded();
 | 
|  |   1047 | 				}
 | 
|  |   1048 | 			else if (newp>oldp)
 | 
|  |   1049 | 				{
 | 
|  |   1050 | 				TInt cp=pC->iPriority;
 | 
|  |   1051 | 				if (newp>cp)
 | 
|  |   1052 | 					RescheduleNeeded();
 | 
|  |   1053 | 				else if (newp==cp && pC->iTime==0)
 | 
|  |   1054 | 					{
 | 
|  |   1055 | 					if (pC->iHeldFastMutex)
 | 
|  |   1056 | 						pC->iHeldFastMutex->iWaiting=1;	// don't round-robin now, wait until fast mutex released
 | 
|  |   1057 | 					else
 | 
|  |   1058 | 						RescheduleNeeded();
 | 
|  |   1059 | 					}
 | 
|  |   1060 | 				}
 | 
|  |   1061 | 			break;
 | 
|  |   1062 | 			}
 | 
|  |   1063 | 		case ESuspended:
 | 
|  |   1064 | 		case EWaitFastSemaphore:
 | 
|  |   1065 | 		case EWaitDfc:
 | 
|  |   1066 | 		case ESleep:
 | 
|  |   1067 | 		case EBlocked:
 | 
|  |   1068 | 		case EDead:
 | 
|  |   1069 | 			iPriority=TUint8(newp);
 | 
|  |   1070 | 			break;
 | 
|  |   1071 | 		default:
 | 
|  |   1072 | 			UnknownState(EChangePriority,newp);
 | 
|  |   1073 | 			break;
 | 
|  |   1074 | 		}
 | 
|  |   1075 | 	}
 | 
|  |   1076 | 
 | 
|  |   1077 | void NThreadBase::Exit()
 | 
|  |   1078 | 	{
 | 
|  |   1079 | 	// The current thread is exiting
 | 
|  |   1080 | 	// Enter with kernel locked, don't return
 | 
|  |   1081 | 	__NK_ASSERT_DEBUG(this==TheScheduler.iCurrentThread);
 | 
|  |   1082 | 
 | 
|  |   1083 | 	OnExit();
 | 
|  |   1084 | 
 | 
|  |   1085 | 	TInt threadCS=iCsCount;
 | 
|  |   1086 | 	TInt kernCS=TheScheduler.iKernCSLocked;
 | 
|  |   1087 | 	iCsCount=1;
 | 
|  |   1088 | 	iCsFunction=ECSExitInProgress;
 | 
|  |   1089 | 	NKern::Unlock();
 | 
|  |   1090 | 	__KTRACE_OPT(KSCHED,DEBUGPRINT("Exit %T %u",this,NTickCount()));
 | 
|  |   1091 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Exit %T, CSC %d HeldFM %M KernCS %d",this,threadCS,iHeldFastMutex,kernCS));
 | 
|  |   1092 | 	if (kernCS!=1)
 | 
|  |   1093 | 		FAULT();
 | 
|  |   1094 | 	if (iHeldFastMutex)
 | 
|  |   1095 | 		FAULT();
 | 
|  |   1096 | 	if (threadCS)
 | 
|  |   1097 | 		FAULT();
 | 
|  |   1098 | 	TDfc* pD=NULL;
 | 
|  |   1099 | 	NThreadExitHandler xh = iHandlers->iExitHandler;
 | 
|  |   1100 | 	if (xh)
 | 
|  |   1101 | 		pD=(*xh)((NThread*)this);		// call exit handler
 | 
|  |   1102 | 	NKern::Lock();
 | 
|  |   1103 | 	if (pD)
 | 
|  |   1104 | 		pD->DoEnque();
 | 
|  |   1105 | 	iNState=EDead;
 | 
|  |   1106 | 	TheScheduler.Remove(this);
 | 
|  |   1107 | 	RescheduleNeeded();
 | 
|  |   1108 | #ifdef BTRACE_THREAD_IDENTIFICATION
 | 
|  |   1109 | 	BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadDestroy,this);
 | 
|  |   1110 | #endif
 | 
|  |   1111 | 	__NK_ASSERT_ALWAYS(iCsFunction == ECSExitInProgress);
 | 
|  |   1112 | 	TScheduler::Reschedule();	// this won't return
 | 
|  |   1113 | 	FAULT();
 | 
|  |   1114 | 	}
 | 
|  |   1115 | 
 | 
|  |   1116 | 
 | 
|  |   1117 | /** Kills a nanokernel thread.
 | 
|  |   1118 | 
 | 
|  |   1119 | 	For use by RTOS personality layers.
 | 
|  |   1120 | 	Do not use this function directly on a Symbian OS thread.
 | 
|  |   1121 | 
 | 
|  |   1122 | 	When acting on the calling thread, causes the calling thread to exit.
 | 
|  |   1123 | 
 | 
|  |   1124 | 	When acting on another thread, causes that thread to exit unless it is
 | 
|  |   1125 | 	currently in a critical section. In this case the thread is marked as
 | 
|  |   1126 | 	"exit pending" and will exit as soon as it leaves the critical section.
 | 
|  |   1127 | 
 | 
|  |   1128 | 	In either case the exiting thread first invokes its exit handler (if it
 | 
|  |   1129 | 	exists). The handler runs with preemption enabled and with the thread in a
 | 
|  |   1130 | 	critical section so that it may not be suspended or killed again. The
 | 
|  |   1131 | 	handler may return a pointer to a TDfc, which will be enqueued just before
 | 
|  |   1132 | 	the thread finally terminates (after the kernel has been relocked). This DFC
 | 
|  |   1133 | 	will therefore execute once the NThread has been safely removed from the
 | 
|  |   1134 | 	scheduler and is intended to be used to cleanup the NThread object and any
 | 
|  |   1135 | 	associated personality layer resources.
 | 
|  |   1136 | 	
 | 
|  |   1137 | 	@pre	Kernel must be locked.
 | 
|  |   1138 | 	@pre	Call in a thread context.
 | 
|  |   1139 | 	@pre	If acting on calling thread, calling thread must not be in a
 | 
|  |   1140 | 			critical section; if it is the kernel will fault. Also, the kernel
 | 
|  |   1141 | 			must be locked exactly once (iKernCSLocked = 1).
 | 
|  |   1142 | 	
 | 
|  |   1143 | 	@post	Kernel is locked, if not acting on calling thread.
 | 
|  |   1144 | 	@post	Does not return if it acts on the calling thread.
 | 
|  |   1145 |  */
 | 
|  |   1146 | EXPORT_C void NThreadBase::Kill()
 | 
|  |   1147 | 	{
 | 
|  |   1148 | 	// Kill a thread
 | 
|  |   1149 | 	// Enter with kernel locked
 | 
|  |   1150 | 	// Exit with kernel locked if not current thread, otherwise does not return
 | 
|  |   1151 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED_ONCE|MASK_NOT_IDFC|MASK_NOT_ISR,"NThreadBase::Kill");
 | 
|  |   1152 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Kill %T, state %d CSC %d HeldFM %M",this,iNState,iCsCount,iHeldFastMutex));
 | 
|  |   1153 | 	OnKill(); // platform-specific hook
 | 
|  |   1154 | 	NThreadBase* pC=TheScheduler.iCurrentThread;
 | 
|  |   1155 | 	if (this==pC)
 | 
|  |   1156 | 		{
 | 
|  |   1157 | 		if (iCsFunction==ECSExitInProgress)
 | 
|  |   1158 | 			FAULT();
 | 
|  |   1159 | 		Exit();				// this will not return
 | 
|  |   1160 | 		}
 | 
|  |   1161 | 	if (iCsCount || iHeldFastMutex)
 | 
|  |   1162 | 		{
 | 
|  |   1163 | 		__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Kill %T deferred",this));
 | 
|  |   1164 | 		if (iCsFunction<0)
 | 
|  |   1165 | 			return;			// thread is already exiting
 | 
|  |   1166 | 		iCsFunction=ECSExitPending;		// zap any suspensions pending
 | 
|  |   1167 | 		if (iHeldFastMutex && iCsCount==0)
 | 
|  |   1168 | 			iHeldFastMutex->iWaiting=1;
 | 
|  |   1169 | 		return;
 | 
|  |   1170 | 		}
 | 
|  |   1171 | 
 | 
|  |   1172 | 	// thread is not in critical section
 | 
|  |   1173 | 	// make the thread divert to Exit() when it next runs
 | 
|  |   1174 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Kill diverting %T",this));
 | 
|  |   1175 | 	Release(KErrDied);		// cancel any waits on semaphores etc.
 | 
|  |   1176 | 	ForceResume();			// release any suspensions
 | 
|  |   1177 | 	iWaitFastMutex=NULL;	// if thread was waiting for a fast mutex it needn't bother
 | 
|  |   1178 | 	iCsCount=1;				// stop anyone suspending the thread
 | 
|  |   1179 | 	iCsFunction=ECSExitPending;
 | 
|  |   1180 | 	ForceExit();			// get thread to call Exit when it is next scheduled
 | 
|  |   1181 | 	}
 | 
|  |   1182 | 
 | 
|  |   1183 | 
 | 
|  |   1184 | /** Suspends the execution of a thread.
 | 
|  |   1185 | 
 | 
|  |   1186 | 	This function is intended to be used by the EPOC layer and personality layers.
 | 
|  |   1187 | 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadSuspend().
 | 
|  |   1188 | 
 | 
|  |   1189 |     If the thread is in a critical section or holds a fast mutex, the suspension will
 | 
|  |   1190 |     be deferred until the thread leaves the critical section or signals the fast mutex.
 | 
|  |   1191 |     Otherwise the thread will be suspended with immediate effect. If the thread it's
 | 
|  |   1192 |     running, the execution of the thread will be suspended and a reschedule will occur.
 | 
|  |   1193 | 
 | 
|  |   1194 |     @param aThread Thread to be suspended.
 | 
|  |   1195 |     @param aCount  Number of times to suspend this thread.
 | 
|  |   1196 |     
 | 
|  |   1197 |     @return TRUE, if the thread had changed the state from non-suspended to suspended;
 | 
|  |   1198 | 	        FALSE, otherwise.
 | 
|  |   1199 | 	     
 | 
|  |   1200 | 	@see Kern::ThreadSuspend()
 | 
|  |   1201 | */
 | 
|  |   1202 | EXPORT_C TBool NKern::ThreadSuspend(NThread* aThread, TInt aCount)
 | 
|  |   1203 | 	{	
 | 
|  |   1204 | 	NKern::Lock();
 | 
|  |   1205 | 	TBool r=aThread->Suspend(aCount);
 | 
|  |   1206 | 	NKern::Unlock();
 | 
|  |   1207 | 	return r;
 | 
|  |   1208 | 	}
 | 
|  |   1209 | 
 | 
|  |   1210 | 
 | 
|  |   1211 | /** Resumes the execution of a thread.
 | 
|  |   1212 | 
 | 
|  |   1213 | 	This function is intended to be used by the EPOC layer and personality layers.
 | 
|  |   1214 | 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
 | 
|  |   1215 | 
 | 
|  |   1216 |     This function resumes the thread once. If the thread was suspended more than once
 | 
|  |   1217 |     the thread will remain suspended.
 | 
|  |   1218 |     If the thread is in a critical section, this function will decrease the number of
 | 
|  |   1219 |     deferred suspensions.
 | 
|  |   1220 | 
 | 
|  |   1221 |     @param aThread Thread to be resumed.
 | 
|  |   1222 |     
 | 
|  |   1223 |     @return TRUE, if the thread had changed the state from suspended to non-suspended;
 | 
|  |   1224 |             FALSE, otherwise.
 | 
|  |   1225 |             
 | 
|  |   1226 | 	@see Kern::ThreadResume()
 | 
|  |   1227 | */
 | 
|  |   1228 | EXPORT_C TBool NKern::ThreadResume(NThread* aThread)
 | 
|  |   1229 | 	{	
 | 
|  |   1230 | 	NKern::Lock();
 | 
|  |   1231 | 	TBool r=aThread->Resume();
 | 
|  |   1232 | 	NKern::Unlock();
 | 
|  |   1233 | 	return r;
 | 
|  |   1234 | 	}
 | 
|  |   1235 | 
 | 
|  |   1236 | 
 | 
|  |   1237 | /** Resumes the execution of a thread and signals a mutex.
 | 
|  |   1238 | 
 | 
|  |   1239 | 	This function is intended to be used by the EPOC layer and personality layers.
 | 
|  |   1240 | 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
 | 
|  |   1241 | 
 | 
|  |   1242 |     This function resumes the thread once. If the thread was suspended more than once
 | 
|  |   1243 |     the thread will remain suspended.
 | 
|  |   1244 |     If the thread is in a critical section, this function will decrease the number of
 | 
|  |   1245 |     deferred suspensions.
 | 
|  |   1246 | 
 | 
|  |   1247 |     @param aThread Thread to be resumed.
 | 
|  |   1248 |     @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled.
 | 
|  |   1249 | 
 | 
|  |   1250 |     @return TRUE, if the thread had changed the state from suspended to non-suspended;
 | 
|  |   1251 |             FALSE, otherwise.
 | 
|  |   1252 |            
 | 
|  |   1253 | 	@see Kern::ThreadResume()
 | 
|  |   1254 | */
 | 
|  |   1255 | EXPORT_C TBool NKern::ThreadResume(NThread* aThread, NFastMutex* aMutex)
 | 
|  |   1256 | 	{
 | 
|  |   1257 | 	if (!aMutex)
 | 
|  |   1258 | 		aMutex=&TheScheduler.iLock;
 | 
|  |   1259 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadResume %T + FM %M",aThread,aMutex));
 | 
|  |   1260 | 	NKern::Lock();
 | 
|  |   1261 | 	TBool r=aThread->Resume();
 | 
|  |   1262 | 	aMutex->Signal();
 | 
|  |   1263 | 	NKern::Unlock();
 | 
|  |   1264 | 	return r;
 | 
|  |   1265 | 	}
 | 
|  |   1266 | 
 | 
|  |   1267 | 
 | 
|  |   1268 | /** Forces the execution of a thread to be resumed.
 | 
|  |   1269 | 
 | 
|  |   1270 | 	This function is intended to be used by the EPOC layer and personality layers.
 | 
|  |   1271 | 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
 | 
|  |   1272 | 
 | 
|  |   1273 |     This function cancels all suspensions on a thread.
 | 
|  |   1274 | 
 | 
|  |   1275 |     @param aThread Thread to be resumed.
 | 
|  |   1276 |     
 | 
|  |   1277 |     @return TRUE, if the thread had changed the state from suspended to non-suspended;
 | 
|  |   1278 |             FALSE, otherwise.
 | 
|  |   1279 |             
 | 
|  |   1280 | 	@see Kern::ThreadResume()
 | 
|  |   1281 | */
 | 
|  |   1282 | EXPORT_C TBool NKern::ThreadForceResume(NThread* aThread)
 | 
|  |   1283 | 	{	
 | 
|  |   1284 | 	NKern::Lock();
 | 
|  |   1285 | 	TBool r=aThread->ForceResume();
 | 
|  |   1286 | 	NKern::Unlock();
 | 
|  |   1287 | 	return r;
 | 
|  |   1288 | 	}
 | 
|  |   1289 | 
 | 
|  |   1290 | 
 | 
|  |   1291 | /** Forces the execution of a thread to be resumed and signals a mutex.
 | 
|  |   1292 | 
 | 
|  |   1293 | 	This function is intended to be used by the EPOC layer and personality layers.
 | 
|  |   1294 | 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
 | 
|  |   1295 | 
 | 
|  |   1296 |     This function cancels all suspensions on a thread.
 | 
|  |   1297 | 
 | 
|  |   1298 |     @param aThread Thread to be resumed.
 | 
|  |   1299 |     @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled.
 | 
|  |   1300 |     
 | 
|  |   1301 |     @return TRUE, if the thread had changed the state from suspended to non-suspended;
 | 
|  |   1302 |             FALSE, otherwise.
 | 
|  |   1303 |             
 | 
|  |   1304 |     @see Kern::ThreadResume()
 | 
|  |   1305 | */
 | 
|  |   1306 | EXPORT_C TBool NKern::ThreadForceResume(NThread* aThread, NFastMutex* aMutex)
 | 
|  |   1307 | 	{
 | 
|  |   1308 | 	if (!aMutex)
 | 
|  |   1309 | 		aMutex=&TheScheduler.iLock;
 | 
|  |   1310 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadForceResume %T + FM %M",aThread,aMutex));
 | 
|  |   1311 | 	NKern::Lock();
 | 
|  |   1312 | 	TBool r=aThread->ForceResume();
 | 
|  |   1313 | 	aMutex->Signal();
 | 
|  |   1314 | 	NKern::Unlock();
 | 
|  |   1315 | 	return r;
 | 
|  |   1316 | 	}
 | 
|  |   1317 | 
 | 
|  |   1318 | 
 | 
|  |   1319 | /** Awakens a nanothread.
 | 
|  |   1320 | 
 | 
|  |   1321 | 	This function is used to implement synchronisation primitives in the EPOC
 | 
|  |   1322 | 	kernel (e.g. DMutex and DSemaphore) and in personality layers.  It is not
 | 
|  |   1323 | 	intended to be used directly by device drivers.
 | 
|  |   1324 | 
 | 
|  |   1325 | 	If the nanothread is waiting on a fast semaphore, waiting for a DFC, or is
 | 
|  |   1326 | 	blocked in a call to NKern::Block, it is awakened and put back on the ready
 | 
|  |   1327 | 	list.  Otherwise, the thread state is unchanged.  In particular, nothing
 | 
|  |   1328 | 	happens if the nanothread has been explicitly suspended.
 | 
|  |   1329 | 
 | 
|  |   1330 | 	@param aThread Thread to release.
 | 
|  |   1331 | 	@param aReturnValue Value returned by NKern::Block if the thread was blocked.
 | 
|  |   1332 | 
 | 
|  |   1333 | 	@see NKern::Block()
 | 
|  |   1334 | 
 | 
|  |   1335 | 	@pre Interrupts must be enabled.
 | 
|  |   1336 | 	@pre Do not call from an ISR
 | 
|  |   1337 |  */
 | 
|  |   1338 | EXPORT_C void NKern::ThreadRelease(NThread* aThread, TInt aReturnValue)
 | 
|  |   1339 | 	{
 | 
|  |   1340 | 	CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::ThreadRelease(NThread*, TInt)");
 | 
|  |   1341 | 	NKern::Lock();
 | 
|  |   1342 | 	aThread->Release(aReturnValue);
 | 
|  |   1343 | 	NKern::Unlock();
 | 
|  |   1344 | 	}
 | 
|  |   1345 | 
 | 
|  |   1346 | 
 | 
|  |   1347 | /** Atomically awakens a nanothread and signals a fast mutex.
 | 
|  |   1348 | 
 | 
|  |   1349 | 	This function is used to implement synchronisation primitives in the EPOC
 | 
|  |   1350 | 	kernel (e.g. DMutex and DSemaphore) and in personality layers.  It is not
 | 
|  |   1351 | 	intended to be used directly by device drivers.
 | 
|  |   1352 | 
 | 
|  |   1353 | 	@param aThread Thread to release.
 | 
|  |   1354 | 	@param aReturnValue Value returned by NKern::Block if the thread was blocked.
 | 
|  |   1355 | 	@param aMutex Fast mutex to signal. If NULL, the system lock is signalled.
 | 
|  |   1356 | 
 | 
|  |   1357 | 	@see NKern::ThreadRelease(NThread*, TInt)
 | 
|  |   1358 | 	@see NKern::Block()
 | 
|  |   1359 | 
 | 
|  |   1360 | 	@pre	Call in a thread context.
 | 
|  |   1361 | 	@pre	Interrupts must be enabled.
 | 
|  |   1362 | 	@pre	Kernel must be unlocked.
 | 
|  |   1363 | 	@pre	Specified mutex must be held
 | 
|  |   1364 |  */
 | 
|  |   1365 | EXPORT_C void NKern::ThreadRelease(NThread* aThread, TInt aReturnValue, NFastMutex* aMutex)
 | 
|  |   1366 | 	{
 | 
|  |   1367 | 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadRelease(NThread*,TInt,NFastMutex*)");
 | 
|  |   1368 | 	if (!aMutex)
 | 
|  |   1369 | 		aMutex=&TheScheduler.iLock;
 | 
|  |   1370 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadRelease %T ret %d + FM %M",aThread,aReturnValue,aMutex));
 | 
|  |   1371 | 	NKern::Lock();
 | 
|  |   1372 | 	aThread->Release(aReturnValue);
 | 
|  |   1373 | 	aMutex->Signal();
 | 
|  |   1374 | 	NKern::Unlock();
 | 
|  |   1375 | 	}
 | 
|  |   1376 | 
 | 
|  |   1377 | 
 | 
|  |   1378 | /** Changes the priority of a thread.
 | 
|  |   1379 | 
 | 
|  |   1380 | 	This function is intended to be used by the EPOC layer and personality layers.
 | 
|  |   1381 | 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadSetPriority().
 | 
|  |   1382 | 
 | 
|  |   1383 |     @param aThread Thread to receive the new priority.
 | 
|  |   1384 |     @param aPriority New priority for aThread.
 | 
|  |   1385 |     
 | 
|  |   1386 | 	@see Kern::SetThreadPriority()
 | 
|  |   1387 | */
 | 
|  |   1388 | EXPORT_C void NKern::ThreadSetPriority(NThread* aThread, TInt aPriority)
 | 
|  |   1389 | 	{
 | 
|  |   1390 | 	NKern::Lock();
 | 
|  |   1391 | 	aThread->SetPriority(aPriority);
 | 
|  |   1392 | 	NKern::Unlock();
 | 
|  |   1393 | 	}
 | 
|  |   1394 | 
 | 
|  |   1395 | 
 | 
|  |   1396 | /** Changes the priority of a thread and signals a mutex.
 | 
|  |   1397 | 
 | 
|  |   1398 | 	This function is intended to be used by the EPOC layer and personality layers.
 | 
|  |   1399 | 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadSetPriority().
 | 
|  |   1400 | 
 | 
|  |   1401 |     @param aThread Thread to receive the new priority.
 | 
|  |   1402 |     @param aPriority New priority for aThread.
 | 
|  |   1403 |     @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled.
 | 
|  |   1404 |         
 | 
|  |   1405 | 	@see Kern::SetThreadPriority()
 | 
|  |   1406 | */
 | 
|  |   1407 | EXPORT_C void NKern::ThreadSetPriority(NThread* aThread, TInt aPriority, NFastMutex* aMutex)
 | 
|  |   1408 | 	{	
 | 
|  |   1409 | 	if (!aMutex)
 | 
|  |   1410 | 		aMutex=&TheScheduler.iLock;
 | 
|  |   1411 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadSetPriority %T->%d + FM %M",aThread,aPriority,aMutex));
 | 
|  |   1412 | 	NKern::Lock();
 | 
|  |   1413 | 	aThread->SetPriority(aPriority);
 | 
|  |   1414 | 	aMutex->Signal();
 | 
|  |   1415 | 	NKern::Unlock();
 | 
|  |   1416 | 	}
 | 
|  |   1417 | 
 | 
|  |   1418 | #ifndef __SCHEDULER_MACHINE_CODED__
 | 
|  |   1419 | 
 | 
|  |   1420 | /** Signals the request semaphore of a nanothread.
 | 
|  |   1421 | 
 | 
|  |   1422 | 	This function is intended to be used by the EPOC layer and personality
 | 
|  |   1423 | 	layers.  Device drivers should use Kern::RequestComplete instead.
 | 
|  |   1424 | 
 | 
|  |   1425 | 	@param aThread Nanothread to signal. Must be non NULL.
 | 
|  |   1426 | 
 | 
|  |   1427 | 	@see Kern::RequestComplete()
 | 
|  |   1428 | 
 | 
|  |   1429 | 	@pre Interrupts must be enabled.
 | 
|  |   1430 | 	@pre Do not call from an ISR
 | 
|  |   1431 |  */
 | 
|  |   1432 | EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread)
 | 
|  |   1433 | 	{
 | 
|  |   1434 | 	CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::ThreadRequestSignal(NThread*)");
 | 
|  |   1435 | 	NKern::Lock();
 | 
|  |   1436 | 	aThread->iRequestSemaphore.Signal();
 | 
|  |   1437 | 	NKern::Unlock();
 | 
|  |   1438 | 	}
 | 
|  |   1439 | 
 | 
|  |   1440 | 
 | 
|  |   1441 | /** Atomically signals the request semaphore of a nanothread and a fast mutex.
 | 
|  |   1442 | 
 | 
|  |   1443 | 	This function is intended to be used by the EPOC layer and personality
 | 
|  |   1444 | 	layers.  Device drivers should use Kern::RequestComplete instead.
 | 
|  |   1445 | 
 | 
|  |   1446 | 	@param aThread Nanothread to signal.  Must be non NULL.
 | 
|  |   1447 | 	@param aMutex Fast mutex to signal.  If NULL, the system lock is signaled.
 | 
|  |   1448 | 
 | 
|  |   1449 | 	@see Kern::RequestComplete()
 | 
|  |   1450 | 
 | 
|  |   1451 | 	@pre	Call in a thread context.
 | 
|  |   1452 | 	@pre	Interrupts must be enabled.
 | 
|  |   1453 | 	@pre	Kernel must be unlocked.
 | 
|  |   1454 | 	@pre	Specified mutex must be held
 | 
|  |   1455 |  */
 | 
|  |   1456 | EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread, NFastMutex* aMutex)
 | 
|  |   1457 | 	{
 | 
|  |   1458 | 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadRequestSignal(NThread*,NFastMutex*)");
 | 
|  |   1459 | 	if (!aMutex)
 | 
|  |   1460 | 		aMutex=&TheScheduler.iLock;
 | 
|  |   1461 | 	NKern::Lock();
 | 
|  |   1462 | 	aThread->iRequestSemaphore.Signal();
 | 
|  |   1463 | 	aMutex->Signal();
 | 
|  |   1464 | 	NKern::Unlock();
 | 
|  |   1465 | 	}
 | 
|  |   1466 | #endif
 | 
|  |   1467 | 
 | 
|  |   1468 | 
 | 
|  |   1469 | /** Signals the request semaphore of a nanothread several times.
 | 
|  |   1470 | 
 | 
|  |   1471 | 	This function is intended to be used by the EPOC layer and personality
 | 
|  |   1472 | 	layers.  Device drivers should use Kern::RequestComplete instead.
 | 
|  |   1473 | 
 | 
|  |   1474 | 	@param aThread Nanothread to signal.  If NULL, the current thread is signaled.
 | 
|  |   1475 | 	@param aCount Number of times the request semaphore must be signaled.
 | 
|  |   1476 | 	
 | 
|  |   1477 | 	@pre aCount >= 0
 | 
|  |   1478 | 
 | 
|  |   1479 | 	@see Kern::RequestComplete()
 | 
|  |   1480 |  */
 | 
|  |   1481 | EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread, TInt aCount)
 | 
|  |   1482 | 	{
 | 
|  |   1483 | 	__ASSERT_WITH_MESSAGE_DEBUG(aCount >= 0,"aCount >= 0","NKern::ThreadRequestSignal");
 | 
|  |   1484 | 	if (!aThread)
 | 
|  |   1485 | 		aThread=(NThread*)TheScheduler.iCurrentThread;
 | 
|  |   1486 | 	NKern::Lock();
 | 
|  |   1487 | 	aThread->iRequestSemaphore.SignalN(aCount);
 | 
|  |   1488 | 	NKern::Unlock();
 | 
|  |   1489 | 	}
 | 
|  |   1490 | 
 | 
|  |   1491 | 
 | 
|  |   1492 | /**	Kills a nanothread.
 | 
|  |   1493 | 
 | 
|  |   1494 | 	This function is intended to be used by the EPOC layer and personality layers.
 | 
|  |   1495 | 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadKill().
 | 
|  |   1496 | 
 | 
|  |   1497 | 	This function does not return if the current thread is killed.  
 | 
|  |   1498 | 	This function is asynchronous (i.e. the thread to kill may still be alive when the call returns).
 | 
|  |   1499 | 
 | 
|  |   1500 | 	@param aThread Thread to kill.  Must be non NULL.
 | 
|  |   1501 | 
 | 
|  |   1502 | 	@pre If acting on calling thread, calling thread must not be in a
 | 
|  |   1503 | 			critical section
 | 
|  |   1504 | 	@pre Thread must not already be exiting.
 | 
|  |   1505 | 
 | 
|  |   1506 | 	@see Kern::ThreadKill()
 | 
|  |   1507 |  */
 | 
|  |   1508 | EXPORT_C void NKern::ThreadKill(NThread* aThread)
 | 
|  |   1509 | 	{
 | 
|  |   1510 | 	NKern::Lock();
 | 
|  |   1511 | 	aThread->Kill();
 | 
|  |   1512 | 	NKern::Unlock();
 | 
|  |   1513 | 	}
 | 
|  |   1514 | 
 | 
|  |   1515 | 
 | 
|  |   1516 | /**	Atomically kills a nanothread and signals a fast mutex.
 | 
|  |   1517 | 
 | 
|  |   1518 | 	This function is intended to be used by the EPOC layer and personality layers.
 | 
|  |   1519 | 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadKill().
 | 
|  |   1520 | 
 | 
|  |   1521 | 	@param aThread Thread to kill.  Must be non NULL.
 | 
|  |   1522 | 	@param aMutex Fast mutex to signal.  If NULL, the system lock is signalled.
 | 
|  |   1523 | 
 | 
|  |   1524 | 	@pre	If acting on calling thread, calling thread must not be in a
 | 
|  |   1525 | 			critical section
 | 
|  |   1526 | 	@pre Thread must not already be exiting.
 | 
|  |   1527 | 
 | 
|  |   1528 | 	@see NKern::ThreadKill(NThread*)
 | 
|  |   1529 |  */
 | 
|  |   1530 | EXPORT_C void NKern::ThreadKill(NThread* aThread, NFastMutex* aMutex)
 | 
|  |   1531 | 	{
 | 
|  |   1532 | 	if (!aMutex)
 | 
|  |   1533 | 		aMutex=&TheScheduler.iLock;
 | 
|  |   1534 | 	NThreadBase* pC=TheScheduler.iCurrentThread;
 | 
|  |   1535 | 	NKern::Lock();
 | 
|  |   1536 | 	if (aThread==pC)
 | 
|  |   1537 | 		{
 | 
|  |   1538 | 		__NK_ASSERT_DEBUG(pC->iCsCount==0);	// Make sure thread isn't in critical section
 | 
|  |   1539 | 		aThread->iCsFunction=NThreadBase::ECSExitPending;
 | 
|  |   1540 | 		aMutex->iWaiting=1;
 | 
|  |   1541 | 		aMutex->Signal();	// this will make us exit
 | 
|  |   1542 | 		FAULT();			// should never get here
 | 
|  |   1543 | 		}
 | 
|  |   1544 | 	else
 | 
|  |   1545 | 		{
 | 
|  |   1546 | 		aThread->Kill();
 | 
|  |   1547 | 		aMutex->Signal();
 | 
|  |   1548 | 		}
 | 
|  |   1549 | 	NKern::Unlock();
 | 
|  |   1550 | 	}
 | 
|  |   1551 | 
 | 
|  |   1552 | 
 | 
|  |   1553 | /** Enters thread critical section.
 | 
|  |   1554 | 
 | 
|  |   1555 | 	This function can safely be used in device drivers.
 | 
|  |   1556 | 
 | 
|  |   1557 |     The current thread will enter its critical section. While in critical section
 | 
|  |   1558 |     the thread cannot be suspended or killed. Any suspension or kill will be deferred
 | 
|  |   1559 |     until the thread leaves the critical section.
 | 
|  |   1560 |     Some API explicitly require threads to be in critical section before calling that
 | 
|  |   1561 |     API.
 | 
|  |   1562 |     Only User threads need to call this function as the concept of thread critical
 | 
|  |   1563 |     section applies to User threads only.
 | 
|  |   1564 | 
 | 
|  |   1565 | 	@pre	Call in a thread context.
 | 
|  |   1566 | 	@pre	Kernel must be unlocked.
 | 
|  |   1567 | */
 | 
|  |   1568 | EXPORT_C void NKern::ThreadEnterCS()
 | 
|  |   1569 | 	{
 | 
|  |   1570 | 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadEnterCS");
 | 
|  |   1571 | 	NThreadBase* pC=TheScheduler.iCurrentThread;
 | 
|  |   1572 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadEnterCS %T",pC));
 | 
|  |   1573 | 	__NK_ASSERT_DEBUG(pC->iCsCount>=0);
 | 
|  |   1574 | 	++pC->iCsCount;
 | 
|  |   1575 | 	}
 | 
|  |   1576 | 
 | 
|  |   1577 | 
 | 
|  |   1578 | NThread* NKern::_ThreadEnterCS()
 | 
|  |   1579 | 	{
 | 
|  |   1580 | 	NThread* pC = (NThread*)TheScheduler.iCurrentThread;
 | 
|  |   1581 | 	__NK_ASSERT_DEBUG(pC->iCsCount>=0);
 | 
|  |   1582 | 	++pC->iCsCount;
 | 
|  |   1583 | 	return pC;
 | 
|  |   1584 | 	}
 | 
|  |   1585 | 
 | 
|  |   1586 | 
 | 
|  |   1587 | /** Leaves thread critical section.
 | 
|  |   1588 | 
 | 
|  |   1589 | 	This function can safely be used in device drivers.
 | 
|  |   1590 | 
 | 
|  |   1591 |     The current thread will leave its critical section. If the thread was suspended/killed
 | 
|  |   1592 |     while in critical section, the thread will be suspended/killed after leaving the
 | 
|  |   1593 |     critical section by calling this function.
 | 
|  |   1594 |     Only User threads need to call this function as the concept of thread critical
 | 
|  |   1595 |     section applies to User threads only.
 | 
|  |   1596 | 
 | 
|  |   1597 | 	@pre	Call in a thread context.
 | 
|  |   1598 | 	@pre	Kernel must be unlocked.
 | 
|  |   1599 | */
 | 
|  |   1600 | 
 | 
|  |   1601 | EXPORT_C void NKern::ThreadLeaveCS()
 | 
|  |   1602 | 	{
 | 
|  |   1603 | 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadLeaveCS");
 | 
|  |   1604 | 	NThreadBase* pC=TheScheduler.iCurrentThread;
 | 
|  |   1605 | 	NKern::Lock();
 | 
|  |   1606 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadLeaveCS %T",pC));
 | 
|  |   1607 | 	__NK_ASSERT_DEBUG(pC->iCsCount>0);
 | 
|  |   1608 | 	if (--pC->iCsCount==0 && pC->iCsFunction!=0)
 | 
|  |   1609 | 		{
 | 
|  |   1610 | 		if (pC->iHeldFastMutex)
 | 
|  |   1611 | 			pC->iHeldFastMutex->iWaiting=1;
 | 
|  |   1612 | 		else
 | 
|  |   1613 | 			pC->DoCsFunction();
 | 
|  |   1614 | 		}
 | 
|  |   1615 | 	NKern::Unlock();
 | 
|  |   1616 | 	}
 | 
|  |   1617 | 
 | 
|  |   1618 | void NKern::_ThreadLeaveCS()
 | 
|  |   1619 | 	{
 | 
|  |   1620 | 	NThreadBase* pC=TheScheduler.iCurrentThread;
 | 
|  |   1621 | 	NKern::Lock();
 | 
|  |   1622 | 	__NK_ASSERT_DEBUG(pC->iCsCount>0);
 | 
|  |   1623 | 	if (--pC->iCsCount==0 && pC->iCsFunction!=0)
 | 
|  |   1624 | 		{
 | 
|  |   1625 | 		if (pC->iHeldFastMutex)
 | 
|  |   1626 | 			pC->iHeldFastMutex->iWaiting=1;
 | 
|  |   1627 | 		else
 | 
|  |   1628 | 			pC->DoCsFunction();
 | 
|  |   1629 | 		}
 | 
|  |   1630 | 	NKern::Unlock();
 | 
|  |   1631 | 	}
 | 
|  |   1632 | 
 | 
|  |   1633 | /** Freeze the CPU of the current thread
 | 
|  |   1634 | 
 | 
|  |   1635 | 	After this the current thread will not migrate to another processor
 | 
|  |   1636 | 
 | 
|  |   1637 | 	On uniprocessor builds does nothing and returns 0
 | 
|  |   1638 | 
 | 
|  |   1639 | 	@return	A cookie to be passed to NKern::EndFreezeCpu() to allow nesting
 | 
|  |   1640 | */
 | 
|  |   1641 | EXPORT_C TInt NKern::FreezeCpu()
 | 
|  |   1642 | 	{
 | 
|  |   1643 | 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::FreezeCpu");
 | 
|  |   1644 | 	return 0;
 | 
|  |   1645 | 	}
 | 
|  |   1646 | 
 | 
|  |   1647 | 
 | 
|  |   1648 | /** Unfreeze the current thread's CPU
 | 
|  |   1649 | 
 | 
|  |   1650 | 	After this the current thread will again be eligible to migrate to another processor
 | 
|  |   1651 | 
 | 
|  |   1652 | 	On uniprocessor builds does nothing
 | 
|  |   1653 | 
 | 
|  |   1654 | 	@param	aCookie the value returned by NKern::FreezeCpu()
 | 
|  |   1655 | */
 | 
|  |   1656 | EXPORT_C void NKern::EndFreezeCpu(TInt /*aCookie*/)
 | 
|  |   1657 | 	{
 | 
|  |   1658 | 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::EndFreezeCpu");
 | 
|  |   1659 | 	}
 | 
|  |   1660 | 
 | 
|  |   1661 | 
 | 
|  |   1662 | /** Change the CPU affinity of a thread
 | 
|  |   1663 | 
 | 
|  |   1664 | 	On uniprocessor builds does nothing
 | 
|  |   1665 | 
 | 
|  |   1666 | 	@pre	Call in a thread context.
 | 
|  |   1667 | 
 | 
|  |   1668 | 	@param	The new CPU affinity mask
 | 
|  |   1669 | 	@return The old affinity mask
 | 
|  |   1670 |  */
 | 
|  |   1671 | EXPORT_C TUint32 NKern::ThreadSetCpuAffinity(NThread*, TUint32)
 | 
|  |   1672 | 	{
 | 
|  |   1673 | 	return 0;	// lock to processor 0
 | 
|  |   1674 | 	}
 | 
|  |   1675 | 
 | 
|  |   1676 | 
 | 
|  |   1677 | /** Modify a thread's timeslice
 | 
|  |   1678 | 
 | 
|  |   1679 | 	@pre	Call in a thread context.
 | 
|  |   1680 | 
 | 
|  |   1681 | 	@param	aTimeslice	The new timeslice value
 | 
|  |   1682 |  */
 | 
|  |   1683 | EXPORT_C void NKern::ThreadSetTimeslice(NThread* aThread, TInt aTimeslice)
 | 
|  |   1684 | 	{
 | 
|  |   1685 | 	NKern::Lock();
 | 
|  |   1686 | 	if (aThread->iTimeslice == aThread->iTime || aTimeslice<0)
 | 
|  |   1687 | 		aThread->iTime = aTimeslice;
 | 
|  |   1688 | 	aThread->iTimeslice = aTimeslice;
 | 
|  |   1689 | 	NKern::Unlock();
 | 
|  |   1690 | 	}
 | 
|  |   1691 | 
 | 
|  |   1692 | 
 | 
|  |   1693 | /** Blocks current nanothread.
 | 
|  |   1694 | 
 | 
|  |   1695 | 	This function is used to implement synchronisation primitives in the EPOC
 | 
|  |   1696 | 	layer and in personality layers.  It is not intended to be used directly by
 | 
|  |   1697 | 	device drivers.  
 | 
|  |   1698 | 
 | 
|  |   1699 | 	@param aTimeout If greater than 0, the nanothread will be blocked for at most
 | 
|  |   1700 | 					aTimeout microseconds.
 | 
|  |   1701 | 	@param aMode	Bitmask whose possible values are documented in TBlockMode.  
 | 
|  |   1702 | 	@param aMutex	Fast mutex to operate on.  If NULL, the system lock is used.
 | 
|  |   1703 | 
 | 
|  |   1704 | 	@see NKern::ThreadRelease()
 | 
|  |   1705 | 	@see TBlockMode
 | 
|  |   1706 | 
 | 
|  |   1707 | 	@pre	Call in a thread context.
 | 
|  |   1708 | 	@pre	Interrupts must be enabled.
 | 
|  |   1709 | 	@pre	Kernel must be unlocked.
 | 
|  |   1710 | 	@pre	Specified mutex must be held
 | 
|  |   1711 |  */
 | 
|  |   1712 | EXPORT_C TInt NKern::Block(TUint32 aTimeout, TUint aMode, NFastMutex* aMutex)
 | 
|  |   1713 | 	{
 | 
|  |   1714 | 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::Block(TUint32,TUint,NFastMutex*)");
 | 
|  |   1715 | 	if (!aMutex)
 | 
|  |   1716 | 		aMutex=&TheScheduler.iLock;
 | 
|  |   1717 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Block time %d mode %d FM %M",aTimeout,aMode,aMutex));
 | 
|  |   1718 | 	NThreadBase* pC=TheScheduler.iCurrentThread;
 | 
|  |   1719 | 	pC->iReturnValue=0;
 | 
|  |   1720 | 	NKern::Lock();
 | 
|  |   1721 | 	if (aMode & EEnterCS)
 | 
|  |   1722 | 		++pC->iCsCount;
 | 
|  |   1723 | 	if (aMode & ERelease)
 | 
|  |   1724 | 		{
 | 
|  |   1725 | #ifdef BTRACE_FAST_MUTEX
 | 
|  |   1726 | 		BTraceContext4(BTrace::EFastMutex,BTrace::EFastMutexSignal,aMutex);
 | 
|  |   1727 | #endif
 | 
|  |   1728 | 		aMutex->iHoldingThread=NULL;
 | 
|  |   1729 | 		TBool w=aMutex->iWaiting;
 | 
|  |   1730 | 		aMutex->iWaiting=0;
 | 
|  |   1731 | 		pC->iHeldFastMutex=NULL;
 | 
|  |   1732 | 		if (w && !pC->iCsCount && pC->iCsFunction)
 | 
|  |   1733 | 			pC->DoCsFunction();
 | 
|  |   1734 | 		}
 | 
|  |   1735 | 	RescheduleNeeded();
 | 
|  |   1736 | 	if (aTimeout)
 | 
|  |   1737 | 		{
 | 
|  |   1738 | 		pC->iTimer.iUserFlags = TRUE;
 | 
|  |   1739 | 		pC->iTimer.OneShot(aTimeout,TRUE);
 | 
|  |   1740 | 		}
 | 
|  |   1741 | 	if (pC->iNState==NThread::EReady)
 | 
|  |   1742 | 		TheScheduler.Remove(pC);
 | 
|  |   1743 | 	pC->iNState=NThread::EBlocked;
 | 
|  |   1744 | 	NKern::Unlock();
 | 
|  |   1745 | 	if (aMode & EClaim)
 | 
|  |   1746 | 		FMWait(aMutex);
 | 
|  |   1747 | 	return pC->iReturnValue;
 | 
|  |   1748 | 	}
 | 
|  |   1749 | 
 | 
|  |   1750 | /**
 | 
|  |   1751 | @pre	Call in a thread context.
 | 
|  |   1752 | @pre	Interrupts must be enabled.
 | 
|  |   1753 | @pre	Kernel must be unlocked.
 | 
|  |   1754 | @pre	No fast mutex can be held
 | 
|  |   1755 | */
 | 
|  |   1756 | /** @see NKern::Block(TUint32, TUint, NFastMutex*) */
 | 
|  |   1757 | EXPORT_C TInt NKern::Block(TUint32 aTimeout, TUint aMode)
 | 
|  |   1758 | 	{
 | 
|  |   1759 | 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::Block(TUint32,TUint)");
 | 
|  |   1760 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Block time %d mode %d",aTimeout,aMode));
 | 
|  |   1761 | 	NThreadBase* pC=TheScheduler.iCurrentThread;
 | 
|  |   1762 | 	pC->iReturnValue=0;
 | 
|  |   1763 | 	NKern::Lock();
 | 
|  |   1764 | 	if (aMode & EEnterCS)
 | 
|  |   1765 | 		++pC->iCsCount;
 | 
|  |   1766 | 	RescheduleNeeded();
 | 
|  |   1767 | 	if (aTimeout)
 | 
|  |   1768 | 		{
 | 
|  |   1769 | 		pC->iTimer.iUserFlags = TRUE;
 | 
|  |   1770 | 		pC->iTimer.OneShot(aTimeout,TRUE);
 | 
|  |   1771 | 		}
 | 
|  |   1772 | 	pC->iNState=NThread::EBlocked;
 | 
|  |   1773 | 	TheScheduler.Remove(pC);
 | 
|  |   1774 | 	NKern::Unlock();
 | 
|  |   1775 | 	return pC->iReturnValue;
 | 
|  |   1776 | 	}
 | 
|  |   1777 | 
 | 
|  |   1778 | 
 | 
|  |   1779 | 
 | 
|  |   1780 | 
 | 
|  |   1781 | EXPORT_C void NKern::NanoBlock(TUint32 aTimeout, TUint aState, TAny* aWaitObj)
 | 
|  |   1782 | /**
 | 
|  |   1783 | Places the current nanothread into a wait state on an externally
 | 
|  |   1784 | defined wait object.
 | 
|  |   1785 | 	
 | 
|  |   1786 | For use by RTOS personality layers.
 | 
|  |   1787 | Do not use this function directly on a Symbian OS thread.
 | 
|  |   1788 | 
 | 
|  |   1789 | Since the kernel is locked on entry, any reschedule will be deferred until
 | 
|  |   1790 | it is unlocked. The thread should be added to any necessary wait queue after
 | 
|  |   1791 | a call to this function, since this function removes it from the ready list.
 | 
|  |   1792 | The thread's wait timer is started if aTimeout is nonzero.
 | 
|  |   1793 | The thread's NState and wait object are updated.
 | 
|  |   1794 | 
 | 
|  |   1795 | Call NThreadBase::Release() when the wait condition is resolved.
 | 
|  |   1796 | 
 | 
|  |   1797 | @param aTimeout The maximum time for which the thread should block, in nanokernel timer ticks.
 | 
|  |   1798 |                 A zero value means wait forever.
 | 
|  |   1799 |                 If the thread is still blocked when the timeout expires,
 | 
|  |   1800 |                 then the timeout state handler will be called.
 | 
|  |   1801 | @param aState   The nanokernel thread state (N-State) value to be set.
 | 
|  |   1802 |                 This state corresponds to the externally defined wait object.
 | 
|  |   1803 |                 This value will be written into the member NThreadBase::iNState.
 | 
|  |   1804 | @param aWaitObj A pointer to an externally defined wait object.
 | 
|  |   1805 |                 This value will be written into the member NThreadBase::iWaitObj.
 | 
|  |   1806 | 
 | 
|  |   1807 | @pre	Kernel must be locked.
 | 
|  |   1808 | @pre	Call in a thread context.
 | 
|  |   1809 | 
 | 
|  |   1810 | @post	Kernel is locked.
 | 
|  |   1811 | 
 | 
|  |   1812 | @see	NThreadBase::Release()
 | 
|  |   1813 | */
 | 
|  |   1814 | 	{
 | 
|  |   1815 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::NanoBlock");		
 | 
|  |   1816 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::NanoBlock time %d state %d obj %08x", aTimeout, aState, aWaitObj));
 | 
|  |   1817 | 	NThreadBase* pC=TheScheduler.iCurrentThread;
 | 
|  |   1818 | 	if (aTimeout)
 | 
|  |   1819 | 		{
 | 
|  |   1820 | 		pC->iTimer.iUserFlags = TRUE;
 | 
|  |   1821 | 		pC->iTimer.OneShot(aTimeout,TRUE);
 | 
|  |   1822 | 		}
 | 
|  |   1823 | 	pC->iNState = (TUint8)aState;
 | 
|  |   1824 | 	pC->iWaitObj = aWaitObj;
 | 
|  |   1825 | 	pC->iReturnValue = 0;
 | 
|  |   1826 | 	TheScheduler.Remove(pC);
 | 
|  |   1827 | 	RescheduleNeeded();
 | 
|  |   1828 | 	}
 | 
|  |   1829 | 
 | 
|  |   1830 | 
 | 
|  |   1831 | 
 | 
|  |   1832 | 
 | 
|  |   1833 | EXPORT_C void NKern::Sleep(TUint32 aTime)
 | 
|  |   1834 | /**
 | 
|  |   1835 | Puts the current nanothread to sleep for the specified duration.
 | 
|  |   1836 | 
 | 
|  |   1837 | It can be called from Symbian OS threads.
 | 
|  |   1838 | 
 | 
|  |   1839 | @param	aTime sleep time in nanokernel timer ticks.
 | 
|  |   1840 | 
 | 
|  |   1841 | @pre    No fast mutex can be held.
 | 
|  |   1842 | @pre    Kernel must be unlocked.
 | 
|  |   1843 | @pre	Call in a thread context.
 | 
|  |   1844 | @pre	Interrupts must be enabled.
 | 
|  |   1845 | */
 | 
|  |   1846 | 	{
 | 
|  |   1847 | 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::Sleep");		
 | 
|  |   1848 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Sleep %d",aTime));
 | 
|  |   1849 | 	NThreadBase* pC=TheScheduler.iCurrentThread;
 | 
|  |   1850 | 	NKern::Lock();
 | 
|  |   1851 | 	pC->iTimer.iUserFlags = TRUE;
 | 
|  |   1852 | 	pC->iTimer.OneShot(aTime,TRUE);
 | 
|  |   1853 | 	pC->iNState=NThread::ESleep;
 | 
|  |   1854 | 	TheScheduler.Remove(pC);
 | 
|  |   1855 | 	RescheduleNeeded();
 | 
|  |   1856 | 	NKern::Unlock();
 | 
|  |   1857 | 	}
 | 
|  |   1858 | 
 | 
|  |   1859 | 
 | 
|  |   1860 | /**	Terminates the current nanothread.
 | 
|  |   1861 | 
 | 
|  |   1862 | 	Calls to this function never return.
 | 
|  |   1863 | 
 | 
|  |   1864 | 	For use by RTOS personality layers.
 | 
|  |   1865 | 	Do not use this function directly on a Symbian OS thread.
 | 
|  |   1866 | 
 | 
|  |   1867 | 	@pre	Call in a thread context.
 | 
|  |   1868 | 	@pre	Interrupts must be enabled.
 | 
|  |   1869 | 	@pre	Kernel must be unlocked.	
 | 
|  |   1870 |  */
 | 
|  |   1871 | EXPORT_C void NKern::Exit()
 | 
|  |   1872 | 	{
 | 
|  |   1873 | 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::Exit");
 | 
|  |   1874 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Exit"));
 | 
|  |   1875 | 	NThreadBase* pC=TheScheduler.iCurrentThread;
 | 
|  |   1876 | 	NKern::Lock();
 | 
|  |   1877 | 	pC->Exit();			// this won't return
 | 
|  |   1878 | 	FAULT();
 | 
|  |   1879 | 	}
 | 
|  |   1880 | 
 | 
|  |   1881 | 
 | 
|  |   1882 | /**	Terminates the current nanothread at the next possible point.
 | 
|  |   1883 | 
 | 
|  |   1884 | 	If the calling thread is not currently in a critical section and does not
 | 
|  |   1885 | 	currently hold a fast mutex, it exits immediately and this function does
 | 
|  |   1886 | 	not return. On the other hand if the thread is in a critical section or
 | 
|  |   1887 | 	holds a fast mutex the thread continues executing but it will exit as soon
 | 
|  |   1888 | 	as it leaves the critical section and/or releases the fast mutex.
 | 
|  |   1889 | 
 | 
|  |   1890 | 	@pre	Call in a thread context.
 | 
|  |   1891 | 	@pre	Interrupts must be enabled.
 | 
|  |   1892 | 	@pre	Kernel must be unlocked.	
 | 
|  |   1893 |  */
 | 
|  |   1894 | EXPORT_C void NKern::DeferredExit()
 | 
|  |   1895 | 	{
 | 
|  |   1896 | 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::DeferredExit");
 | 
|  |   1897 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NDefExit"));
 | 
|  |   1898 | 	NFastMutex* m = HeldFastMutex();
 | 
|  |   1899 | 	NThreadBase* pC = NKern::LockC();
 | 
|  |   1900 | 	if (!m && !pC->iCsCount)
 | 
|  |   1901 | 		pC->Exit();			// this won't return
 | 
|  |   1902 | 	if (pC->iCsFunction >= 0)	// don't touch it if we are already exiting
 | 
|  |   1903 | 		pC->iCsFunction = NThreadBase::ECSExitPending;
 | 
|  |   1904 | 	if (m && !pC->iCsCount)
 | 
|  |   1905 | 		m->iWaiting = TRUE;
 | 
|  |   1906 | 	NKern::Unlock();
 | 
|  |   1907 | 	}
 | 
|  |   1908 | 
 | 
|  |   1909 | 
 | 
|  |   1910 | /** Prematurely terminates the current thread's timeslice
 | 
|  |   1911 | 
 | 
|  |   1912 | 	@pre	Kernel must be unlocked.
 | 
|  |   1913 | 	@pre	Call in a thread context.
 | 
|  |   1914 | 	
 | 
|  |   1915 | 	@post	Kernel is unlocked.
 | 
|  |   1916 |  */
 | 
|  |   1917 | EXPORT_C void NKern::YieldTimeslice()
 | 
|  |   1918 | 	{
 | 
|  |   1919 | 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::YieldTimeslice");
 | 
|  |   1920 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::YieldTimeslice"));
 | 
|  |   1921 | 	NThreadBase* t = NKern::LockC();
 | 
|  |   1922 | 	t->iTime = 0;
 | 
|  |   1923 | 	if (t->iNext != t)
 | 
|  |   1924 | 		RescheduleNeeded();
 | 
|  |   1925 | 	NKern::Unlock();
 | 
|  |   1926 | 	}
 | 
|  |   1927 | 
 | 
|  |   1928 | 
 | 
|  |   1929 | /** Rotates the ready list for threads at the specified priority.
 | 
|  |   1930 | 	
 | 
|  |   1931 | 	For use by RTOS personality layers to allow external control of round-robin
 | 
|  |   1932 | 	scheduling. Not intended for direct use by device drivers.
 | 
|  |   1933 | 
 | 
|  |   1934 | 	@param	aPriority = priority at which threads should be rotated.
 | 
|  |   1935 | 						-1 means use calling thread's priority.
 | 
|  |   1936 | 	
 | 
|  |   1937 | 	@pre	Kernel must be unlocked.
 | 
|  |   1938 | 	@pre	Call in a thread context.
 | 
|  |   1939 | 	
 | 
|  |   1940 | 	@post	Kernel is unlocked.
 | 
|  |   1941 |  */
 | 
|  |   1942 | EXPORT_C void NKern::RotateReadyList(TInt aPriority)
 | 
|  |   1943 | 	{
 | 
|  |   1944 | 	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::RotateReadyList");		
 | 
|  |   1945 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::RotateReadyList %d",aPriority));
 | 
|  |   1946 | 	if (aPriority<0 || aPriority>=KNumPriorities)
 | 
|  |   1947 | 		aPriority=TheScheduler.iCurrentThread->iPriority;
 | 
|  |   1948 | 	NKern::Lock();
 | 
|  |   1949 | 	TheScheduler.RotateReadyList(aPriority);
 | 
|  |   1950 | 	NKern::Unlock();
 | 
|  |   1951 | 	}
 | 
|  |   1952 | 
 | 
|  |   1953 | /** Rotates the ready list for threads at the specified priority.
 | 
|  |   1954 | 	
 | 
|  |   1955 | 	For use by RTOS personality layers to allow external control of round-robin
 | 
|  |   1956 | 	scheduling. Not intended for direct use by device drivers.
 | 
|  |   1957 | 
 | 
|  |   1958 | 	@param	aPriority = priority at which threads should be rotated.
 | 
|  |   1959 | 						-1 means use calling thread's priority.
 | 
|  |   1960 | 	@param	aCpu = which CPU's ready list should be rotated
 | 
|  |   1961 | 					ignored on UP systems.
 | 
|  |   1962 | 	
 | 
|  |   1963 | 	@pre	Kernel must be unlocked.
 | 
|  |   1964 | 	@pre	Call in a thread context.
 | 
|  |   1965 | 	
 | 
|  |   1966 | 	@post	Kernel is unlocked.
 | 
|  |   1967 |  */
 | 
|  |   1968 | EXPORT_C void NKern::RotateReadyList(TInt aPriority, TInt /*aCpu*/)
 | 
|  |   1969 | 	{
 | 
|  |   1970 | 	RotateReadyList(aPriority);
 | 
|  |   1971 | 	}
 | 
|  |   1972 | 
 | 
|  |   1973 | 
 | 
|  |   1974 | /** Returns the NThread control block for the currently scheduled thread.
 | 
|  |   1975 | 
 | 
|  |   1976 |     Note that this is the calling thread if called from a thread context, or the
 | 
|  |   1977 | 	interrupted thread if called from an interrupt context.
 | 
|  |   1978 | 	
 | 
|  |   1979 | 	@return A pointer to the NThread for the currently scheduled thread.
 | 
|  |   1980 | 	
 | 
|  |   1981 | 	@pre Call in any context.
 | 
|  |   1982 | */
 | 
|  |   1983 | EXPORT_C NThread* NKern::CurrentThread()
 | 
|  |   1984 | 	{
 | 
|  |   1985 | 	return (NThread*)TheScheduler.iCurrentThread;
 | 
|  |   1986 | 	}
 | 
|  |   1987 | 
 | 
|  |   1988 | 
 | 
|  |   1989 | /** Returns the CPU number of the calling CPU.
 | 
|  |   1990 | 
 | 
|  |   1991 | 	@return the CPU number of the calling CPU.
 | 
|  |   1992 | 	
 | 
|  |   1993 | 	@pre Call in any context.
 | 
|  |   1994 | */
 | 
|  |   1995 | EXPORT_C TInt NKern::CurrentCpu()
 | 
|  |   1996 | 	{
 | 
|  |   1997 | 	return 0;
 | 
|  |   1998 | 	}
 | 
|  |   1999 | 
 | 
|  |   2000 | 
 | 
|  |   2001 | /** Returns the number of CPUs available to Symbian OS
 | 
|  |   2002 | 
 | 
|  |   2003 | 	@return the number of CPUs
 | 
|  |   2004 | 	
 | 
|  |   2005 | 	@pre Call in any context.
 | 
|  |   2006 | */
 | 
|  |   2007 | EXPORT_C TInt NKern::NumberOfCpus()
 | 
|  |   2008 | 	{
 | 
|  |   2009 | 	return 1;
 | 
|  |   2010 | 	}
 | 
|  |   2011 | 
 | 
|  |   2012 | 
 | 
|  |   2013 | /** Check if the kernel is locked the specified number of times.
 | 
|  |   2014 | 
 | 
|  |   2015 | 	@param aCount	The number of times the kernel should be locked
 | 
|  |   2016 | 					If zero, tests if it is locked at all
 | 
|  |   2017 | 	@return TRUE if the tested condition is true.
 | 
|  |   2018 | 
 | 
|  |   2019 | 	@internalTechnology
 | 
|  |   2020 | */
 | 
|  |   2021 | EXPORT_C TBool NKern::KernelLocked(TInt aCount)
 | 
|  |   2022 | 	{
 | 
|  |   2023 | 	if (aCount)
 | 
|  |   2024 | 		return TheScheduler.iKernCSLocked == aCount;
 | 
|  |   2025 | 	return TheScheduler.iKernCSLocked!=0;
 | 
|  |   2026 | 	}
 | 
|  |   2027 | 
 | 
|  |   2028 | 
 | 
|  |   2029 | /******************************************************************************
 | 
|  |   2030 |  * Priority lists
 | 
|  |   2031 |  ******************************************************************************/
 | 
|  |   2032 | 
 | 
|  |   2033 | #ifndef __PRI_LIST_MACHINE_CODED__
 | 
|  |   2034 | /** Returns the priority of the highest priority item present on a priority list.
 | 
|  |   2035 | 
 | 
|  |   2036 | 	@return	The highest priority present or -1 if the list is empty.
 | 
|  |   2037 |  */
 | 
|  |   2038 | EXPORT_C TInt TPriListBase::HighestPriority()
 | 
|  |   2039 | 	{
 | 
|  |   2040 | //	TUint64 present = MAKE_TUINT64(iPresent[1], iPresent[0]);
 | 
|  |   2041 | //	return __e32_find_ms1_64(present);
 | 
|  |   2042 | 	return __e32_find_ms1_64(iPresent64);
 | 
|  |   2043 | 	}
 | 
|  |   2044 | 
 | 
|  |   2045 | 
 | 
|  |   2046 | /** Finds the highest priority item present on a priority list.
 | 
|  |   2047 | 
 | 
|  |   2048 | 	If multiple items at the same priority are present, return the first to be
 | 
|  |   2049 | 	added in chronological order.
 | 
|  |   2050 | 
 | 
|  |   2051 | 	@return	A pointer to the item or NULL if the list is empty.
 | 
|  |   2052 |  */
 | 
|  |   2053 | EXPORT_C TPriListLink* TPriListBase::First()
 | 
|  |   2054 | 	{
 | 
|  |   2055 | 	TInt p = HighestPriority();
 | 
|  |   2056 | 	return p >=0 ? static_cast<TPriListLink*>(iQueue[p]) : NULL;
 | 
|  |   2057 | 	}
 | 
|  |   2058 | 
 | 
|  |   2059 | 
 | 
|  |   2060 | /** Adds an item to a priority list.
 | 
|  |   2061 | 
 | 
|  |   2062 | 	@param aLink A pointer to the item - must not be NULL.
 | 
|  |   2063 |  */
 | 
|  |   2064 | EXPORT_C void TPriListBase::Add(TPriListLink* aLink)
 | 
|  |   2065 | 	{
 | 
|  |   2066 | 	TInt p = aLink->iPriority;
 | 
|  |   2067 | 	SDblQueLink* head = iQueue[p];
 | 
|  |   2068 | 	if (head)
 | 
|  |   2069 | 		{
 | 
|  |   2070 | 		// already some at this priority
 | 
|  |   2071 | 		aLink->InsertBefore(head);
 | 
|  |   2072 | 		}
 | 
|  |   2073 | 	else
 | 
|  |   2074 | 		{
 | 
|  |   2075 | 		// 'create' new list
 | 
|  |   2076 | 		iQueue[p] = aLink;
 | 
|  |   2077 | 		aLink->iNext = aLink->iPrev = aLink;
 | 
|  |   2078 | 		iPresent[p>>5] |= 1u << (p & 0x1f);
 | 
|  |   2079 | 		}
 | 
|  |   2080 | 	}
 | 
|  |   2081 | 
 | 
|  |   2082 | 
 | 
|  |   2083 | /** Removes an item from a priority list.
 | 
|  |   2084 | 
 | 
|  |   2085 | 	@param aLink A pointer to the item - must not be NULL.
 | 
|  |   2086 |  */
 | 
|  |   2087 | EXPORT_C void TPriListBase::Remove(TPriListLink* aLink)
 | 
|  |   2088 | 	{
 | 
|  |   2089 | 	if (!aLink->Alone())
 | 
|  |   2090 | 		{
 | 
|  |   2091 | 		// not the last on this list
 | 
|  |   2092 | 		TInt p = aLink->iPriority;
 | 
|  |   2093 | 		if (iQueue[p] == aLink)
 | 
|  |   2094 | 			iQueue[p] = aLink->iNext;
 | 
|  |   2095 | 		aLink->Deque();
 | 
|  |   2096 | 		}
 | 
|  |   2097 | 	else
 | 
|  |   2098 | 		{
 | 
|  |   2099 | 		TInt p = aLink->iPriority;
 | 
|  |   2100 | 		iQueue[p] = 0;
 | 
|  |   2101 | 		iPresent[p>>5] &= ~(1u << (p & 0x1f));
 | 
|  |   2102 | 		KILL_LINK(aLink);
 | 
|  |   2103 | 		}
 | 
|  |   2104 | 	}
 | 
|  |   2105 | 
 | 
|  |   2106 | 
 | 
|  |   2107 | /** Changes the priority of an item on a priority list.
 | 
|  |   2108 | 
 | 
|  |   2109 | 	@param	aLink A pointer to the item to act on - must not be NULL.
 | 
|  |   2110 | 	@param	aNewPriority A new priority for the item.
 | 
|  |   2111 |  */
 | 
|  |   2112 | EXPORT_C void TPriListBase::ChangePriority(TPriListLink* aLink, TInt aNewPriority)
 | 
|  |   2113 | 	{
 | 
|  |   2114 | 	if (aLink->iPriority!=aNewPriority)
 | 
|  |   2115 | 		{
 | 
|  |   2116 | 		Remove(aLink);
 | 
|  |   2117 | 		aLink->iPriority=TUint8(aNewPriority);
 | 
|  |   2118 | 		Add(aLink);
 | 
|  |   2119 | 		}
 | 
|  |   2120 | 	}
 | 
|  |   2121 | #endif
 | 
|  |   2122 | 
 | 
|  |   2123 | /** Adds an item to a priority list at the head of the queue for its priority.
 | 
|  |   2124 | 
 | 
|  |   2125 | 	@param aLink A pointer to the item - must not be NULL.
 | 
|  |   2126 |  */
 | 
|  |   2127 | EXPORT_C void TPriListBase::AddHead(TPriListLink* aLink)
 | 
|  |   2128 | 	{
 | 
|  |   2129 | 	TInt p = aLink->iPriority;
 | 
|  |   2130 | 	SDblQueLink* head = iQueue[p];
 | 
|  |   2131 | 	iQueue[p] = aLink;
 | 
|  |   2132 | 	if (head)
 | 
|  |   2133 | 		{
 | 
|  |   2134 | 		// already some at this priority
 | 
|  |   2135 | 		aLink->InsertBefore(head);
 | 
|  |   2136 | 		}
 | 
|  |   2137 | 	else
 | 
|  |   2138 | 		{
 | 
|  |   2139 | 		// 'create' new list
 | 
|  |   2140 | 		aLink->iNext = aLink->iPrev = aLink;
 | 
|  |   2141 | 		iPresent[p>>5] |= 1u << (p & 0x1f);
 | 
|  |   2142 | 		}
 | 
|  |   2143 | 	}
 | 
|  |   2144 | 
 | 
|  |   2145 | 
 |