| 0 |      1 | // Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies).
 | 
|  |      2 | // All rights reserved.
 | 
|  |      3 | // This component and the accompanying materials are made available
 | 
|  |      4 | // under the terms of the License "Eclipse Public License v1.0"
 | 
|  |      5 | // which accompanies this distribution, and is available
 | 
|  |      6 | // at the URL "http://www.eclipse.org/legal/epl-v10.html".
 | 
|  |      7 | //
 | 
|  |      8 | // Initial Contributors:
 | 
|  |      9 | // Nokia Corporation - initial contribution.
 | 
|  |     10 | //
 | 
|  |     11 | // Contributors:
 | 
|  |     12 | //
 | 
|  |     13 | // Description:
 | 
|  |     14 | // e32\nkernsmp\sched.cpp
 | 
|  |     15 | // 
 | 
|  |     16 | //
 | 
|  |     17 | 
 | 
|  |     18 | // NThreadBase member data
 | 
|  |     19 | #define __INCLUDE_NTHREADBASE_DEFINES__
 | 
|  |     20 | 
 | 
|  |     21 | // TDfc member data
 | 
|  |     22 | #define __INCLUDE_TDFC_DEFINES__
 | 
|  |     23 | 
 | 
|  |     24 | #include "nk_priv.h"
 | 
|  |     25 | #include <nk_irq.h>
 | 
|  |     26 | 
 | 
|  |     27 | TSpinLock	NEventHandler::TiedLock(TSpinLock::EOrderEventHandlerTied);
 | 
|  |     28 | 
 | 
|  |     29 | /******************************************************************************
 | 
|  |     30 |  * TScheduler
 | 
|  |     31 |  ******************************************************************************/
 | 
|  |     32 | 
 | 
|  |     33 | // TScheduler resides in .bss so other fields are zero-initialised
 | 
|  |     34 | TScheduler::TScheduler()
 | 
| 44 |     35 | 	:	iActiveCpus1(1),	// only boot CPU for now
 | 
|  |     36 | 		iActiveCpus2(1),	// only boot CPU for now
 | 
| 0 |     37 | 		iIdleSpinLock(TSpinLock::EOrderIdleDFCList),
 | 
| 44 |     38 | 		iCpusNotIdle(1)		// only boot CPU for now
 | 
| 0 |     39 | 	{
 | 
|  |     40 | 	TInt i;
 | 
|  |     41 | 	for (i=0; i<KMaxCpus; ++i)
 | 
|  |     42 | 		{
 | 
|  |     43 | 		TSubScheduler* s = TheSubSchedulers + i;
 | 
|  |     44 | 		iSub[i] = s;
 | 
|  |     45 | 		s->iScheduler = this;
 | 
|  |     46 | 		s->iCpuNum = TUint32(i);
 | 
|  |     47 | 		s->iCpuMask = 1u<<i;
 | 
|  |     48 | 		}
 | 
|  |     49 | 	}
 | 
|  |     50 | 
 | 
|  |     51 | 
 | 
|  |     52 | /** Return a pointer to the scheduler
 | 
|  |     53 | 	Intended for use by the crash debugger, not for general device driver use.
 | 
|  |     54 | 
 | 
|  |     55 | 	@return	Pointer to the scheduler object
 | 
|  |     56 | 	@internalTechnology
 | 
|  |     57 |  */
 | 
|  |     58 | EXPORT_C TScheduler* TScheduler::Ptr()
 | 
|  |     59 | 	{
 | 
|  |     60 | 	return &TheScheduler;
 | 
|  |     61 | 	}
 | 
|  |     62 | 
 | 
|  |     63 | 
 | 
|  |     64 | /******************************************************************************
 | 
|  |     65 |  * TSubScheduler
 | 
|  |     66 |  ******************************************************************************/
 | 
|  |     67 | 
 | 
|  |     68 | // TSubScheduler resides in .bss so other fields are zero-initialised
 | 
|  |     69 | TSubScheduler::TSubScheduler()
 | 
| 44 |     70 | 	:	TPriListBase(KNumPriorities),
 | 
|  |     71 | 		iExIDfcLock(TSpinLock::EOrderExIDfcQ),
 | 
| 0 |     72 | 		iReadyListLock(TSpinLock::EOrderReadyList),
 | 
|  |     73 | 		iKernLockCount(1),
 | 
|  |     74 | 		iEventHandlerLock(TSpinLock::EOrderEventHandlerList)
 | 
|  |     75 | 	{
 | 
|  |     76 | 	}
 | 
|  |     77 | 
 | 
|  |     78 | 
 | 
|  |     79 | /******************************************************************************
 | 
|  |     80 |  * NSchedulable
 | 
|  |     81 |  ******************************************************************************/
 | 
|  |     82 | void NSchedulable::AcqSLock()
 | 
|  |     83 | 	{
 | 
|  |     84 | 	iSSpinLock.LockOnly();
 | 
|  |     85 | 	if (iParent!=this && iParent)
 | 
|  |     86 | 		iParent->AcqSLock();
 | 
|  |     87 | 	}
 | 
|  |     88 | 
 | 
|  |     89 | void NSchedulable::RelSLock()
 | 
|  |     90 | 	{
 | 
|  |     91 | 	if (iParent!=this && iParent)
 | 
|  |     92 | 		iParent->RelSLock();
 | 
|  |     93 | 	iSSpinLock.UnlockOnly();
 | 
|  |     94 | 	}
 | 
|  |     95 | 
 | 
|  |     96 | void NSchedulable::LAcqSLock()
 | 
|  |     97 | 	{
 | 
|  |     98 | 	NKern::Lock();
 | 
|  |     99 | 	AcqSLock();
 | 
|  |    100 | 	}
 | 
|  |    101 | 
 | 
|  |    102 | void NSchedulable::RelSLockU()
 | 
|  |    103 | 	{
 | 
|  |    104 | 	RelSLock();
 | 
|  |    105 | 	NKern::Unlock();
 | 
|  |    106 | 	}
 | 
|  |    107 | 
 | 
|  |    108 | void NSchedulable::UnPauseT()
 | 
|  |    109 | 	{
 | 
|  |    110 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NSchedulable::UnPauseT");
 | 
|  |    111 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nUnPauseT",this));
 | 
|  |    112 | 	__NK_ASSERT_DEBUG(iPauseCount);
 | 
|  |    113 | 	if (--iPauseCount || iReady || iSuspended || (iParent && ((NThread*)this)->iWaitState.ThreadIsBlocked()))
 | 
|  |    114 | 		return;
 | 
|  |    115 | 	ReadyT(EUnPause);
 | 
|  |    116 | 	}
 | 
|  |    117 | 
 | 
|  |    118 | void NSchedulable::DeferredReadyIDfcFn(TAny* aPtr)
 | 
|  |    119 | 	{
 | 
|  |    120 | 	NSchedulable* a = (NSchedulable*)aPtr;
 | 
|  |    121 | 	a->AcqSLock();
 | 
|  |    122 | 	TUint32 evs = __e32_atomic_and_acq32(&a->iEventState, ~EDeferredReady);
 | 
|  |    123 | 	if (evs & EDeferredReady)
 | 
|  |    124 | 		{
 | 
|  |    125 | 		if (a->iParent)
 | 
|  |    126 | 			{
 | 
|  |    127 | 			// thread
 | 
|  |    128 | 			a->UnPauseT();
 | 
|  |    129 | 			}
 | 
|  |    130 | 		else
 | 
|  |    131 | 			{
 | 
|  |    132 | 			// thread group
 | 
|  |    133 | 			NThreadGroup* g = (NThreadGroup*)a;
 | 
|  |    134 | 			__KTRACE_OPT(KNKERN,DEBUGPRINT("%G nDeferredReady",g));
 | 
|  |    135 | 			__NK_ASSERT_DEBUG(g->iPauseCount);
 | 
|  |    136 | 			if (--g->iPauseCount && g->iNThreadList.NonEmpty())
 | 
|  |    137 | 				g->ReadyT(EUnPause);
 | 
|  |    138 | 			}
 | 
|  |    139 | 		}
 | 
|  |    140 | 	a->RelSLock();
 | 
|  |    141 | 	}
 | 
|  |    142 | 
 | 
|  |    143 | TInt NSchedulable::AddTiedEvent(NEventHandler* aEvent)
 | 
|  |    144 | 	{
 | 
|  |    145 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T AddEv %08x",this,aEvent));
 | 
|  |    146 | 	TInt r = KErrGeneral;
 | 
|  |    147 | 	NEventHandler::TiedLock.LockOnly();
 | 
|  |    148 | 	AcqSLock();
 | 
|  |    149 | 	if (iStopping)
 | 
|  |    150 | 		r = KErrDied;
 | 
|  |    151 | 	else if (!aEvent->iTied)
 | 
|  |    152 | 		{
 | 
|  |    153 | 		aEvent->iTied = this;
 | 
|  |    154 | 		iEvents.Add(&aEvent->iTiedLink);
 | 
|  |    155 | 		r = KErrNone;
 | 
|  |    156 | 		}
 | 
|  |    157 | 	RelSLock();
 | 
|  |    158 | 	NEventHandler::TiedLock.UnlockOnly();
 | 
|  |    159 | 	return r;
 | 
|  |    160 | 	}
 | 
|  |    161 | 
 | 
|  |    162 | void ipi_dummy(TGenericIPI*)
 | 
|  |    163 | 	{
 | 
|  |    164 | 	}
 | 
|  |    165 | 
 | 
|  |    166 | /** Detach and cancel any tied events attached to this thread/group
 | 
|  |    167 | 
 | 
|  |    168 | Call in a thread context with interrupts and preemption enabled.
 | 
|  |    169 | Calling thread in critical section.
 | 
|  |    170 | 
 | 
|  |    171 | @internalComponent
 | 
|  |    172 | */
 | 
|  |    173 | void NSchedulable::DetachTiedEvents()
 | 
|  |    174 | 	{
 | 
|  |    175 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T DetTiedEv",this));
 | 
|  |    176 | 	NKern::Lock();
 | 
|  |    177 | 	NEventHandler::TiedLock.LockOnly();
 | 
|  |    178 | 	AcqSLock();
 | 
|  |    179 | 	iStopping = TRUE;
 | 
|  |    180 | 	if (!iParent)
 | 
|  |    181 | 		{
 | 
|  |    182 | 		// can't destroy a group until all threads have detached from it
 | 
|  |    183 | 		NThreadGroup* g = (NThreadGroup*)this;
 | 
|  |    184 | 		__NK_ASSERT_ALWAYS(g->iThreadCount==0 && g->iNThreadList.IsEmpty());
 | 
|  |    185 | 		}
 | 
|  |    186 | 	RelSLock();
 | 
|  |    187 | 	NEventHandler::TiedLock.UnlockOnly();
 | 
|  |    188 | 
 | 
|  |    189 | 	// send IPI to all processors to synchronise
 | 
|  |    190 | 	// after this, any tied IDFCs can only proceed to completion
 | 
|  |    191 | 	// they can't be queued again
 | 
|  |    192 | 	TGenericIPI ipi;
 | 
|  |    193 | 	ipi.QueueAllOther(&ipi_dummy);
 | 
|  |    194 | 	NKern::Unlock();
 | 
|  |    195 | 	ipi.WaitCompletion();
 | 
|  |    196 | 
 | 
|  |    197 | 	FOREVER
 | 
|  |    198 | 		{
 | 
|  |    199 | 		NKern::Lock();
 | 
|  |    200 | 		NEventHandler::TiedLock.LockOnly();
 | 
|  |    201 | 		AcqSLock();
 | 
|  |    202 | 		NEventHandler* h = 0;
 | 
|  |    203 | 		TInt type = -1;
 | 
|  |    204 | 		if (!iEvents.IsEmpty())
 | 
|  |    205 | 			{
 | 
|  |    206 | 			h = _LOFF(iEvents.First()->Deque(), NEventHandler, iTiedLink);
 | 
|  |    207 | 			h->iTiedLink.iNext = 0;
 | 
|  |    208 | 			type = h->iHType;
 | 
|  |    209 | 			}
 | 
|  |    210 | 		RelSLock();
 | 
|  |    211 | 		if (type == NEventHandler::EEventHandlerNTimer)
 | 
|  |    212 | 			{
 | 
|  |    213 | 			// everything's easy for a timer since we can just cancel it here
 | 
|  |    214 | 			NTimer* tmr = (NTimer*)h;
 | 
|  |    215 | 			tmr->DoCancel(NTimer::ECancelDestroy);
 | 
|  |    216 | 			tmr->iTied = 0;
 | 
|  |    217 | 			}
 | 
|  |    218 | 		else if (type == NEventHandler::EEventHandlerIDFC)
 | 
|  |    219 | 			{
 | 
|  |    220 | 			// can just cancel the IDFC with TiedLock held
 | 
|  |    221 | 			// EndTiedEvent() may be delayed, but we wait for that further down
 | 
|  |    222 | 			// iTied will have been captured before the IDFC state is reset
 | 
|  |    223 | 			// Cancel() waits for the state to be reset
 | 
|  |    224 | 			TDfc* d = (TDfc*)h;
 | 
|  |    225 | 			d->Cancel();
 | 
|  |    226 | 			d->iHType = (TUint8)NEventHandler::EEventHandlerDummy;
 | 
|  |    227 | 			d->iTied = 0;
 | 
|  |    228 | 			}
 | 
|  |    229 | 		NEventHandler::TiedLock.UnlockOnly();
 | 
|  |    230 | 		NKern::Unlock();
 | 
|  |    231 | 		if (!h)
 | 
|  |    232 | 			break;
 | 
|  |    233 | 		switch (type)
 | 
|  |    234 | 			{
 | 
|  |    235 | 			case NEventHandler::EEventHandlerIrq:
 | 
|  |    236 | 				{
 | 
|  |    237 | 				NIrqHandler* pH = (NIrqHandler*)h;
 | 
|  |    238 | 				// pH can't have been freed since we dequeued it but left iTied set
 | 
|  |    239 | 				pH->Unbind(pH->iHandle, this);
 | 
|  |    240 | 				break;
 | 
|  |    241 | 				}
 | 
|  |    242 | 			case NEventHandler::EEventHandlerNTimer:
 | 
|  |    243 | 			case NEventHandler::EEventHandlerIDFC:
 | 
|  |    244 | 			case NEventHandler::EEventHandlerDummy:
 | 
|  |    245 | 				// nothing left to do
 | 
|  |    246 | 				break;
 | 
|  |    247 | 			default:
 | 
|  |    248 | 				__NK_ASSERT_ALWAYS(0);
 | 
|  |    249 | 				break;
 | 
|  |    250 | 			}
 | 
|  |    251 | 		}
 | 
|  |    252 | 
 | 
|  |    253 | 	// Wait for any remaining tied event handlers to complete
 | 
|  |    254 | 	while (iEventState & EEventCountMask)
 | 
|  |    255 | 		{
 | 
|  |    256 | 		__chill();
 | 
|  |    257 | 		}
 | 
|  |    258 | 	}
 | 
|  |    259 | 
 | 
|  |    260 | /******************************************************************************
 | 
|  |    261 |  * NThreadGroup
 | 
|  |    262 |  ******************************************************************************/
 | 
|  |    263 | 
 | 
|  |    264 | 
 | 
|  |    265 | /******************************************************************************
 | 
|  |    266 |  * NThreadBase
 | 
|  |    267 |  ******************************************************************************/
 | 
|  |    268 | 
 | 
|  |    269 | /** Makes a nanothread ready.
 | 
|  |    270 | 	
 | 
|  |    271 | 	For use by RTOS personality layers.
 | 
|  |    272 | 
 | 
|  |    273 | 	@pre	Kernel must be locked.
 | 
|  |    274 | 	@pre	Call either in a thread or an IDFC context.
 | 
|  |    275 | 	@pre	The thread being made ready must not be explicitly suspended
 | 
|  |    276 | 	
 | 
|  |    277 | 	@post	Kernel is locked.
 | 
|  |    278 |  */
 | 
|  |    279 | void NSchedulable::ReadyT(TUint aMode)
 | 
|  |    280 | 	{
 | 
|  |    281 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NSchedulable::ReadyT");
 | 
|  |    282 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nReadyT(%x)",this,aMode));
 | 
|  |    283 | 	NThreadBase* t = (NThreadBase*)this;
 | 
|  |    284 | #ifdef _DEBUG
 | 
|  |    285 | 	if (!iParent)
 | 
|  |    286 | 		t = (NThreadBase*)0xface0fff;
 | 
|  |    287 | #endif
 | 
| 44 |    288 | 	__NK_ASSERT_DEBUG(!iReady && (!iParent || (!t->iWaitState.iWtC.iWtStFlags && !t->iPauseCount && !t->iSuspended)));
 | 
| 0 |    289 | 	TSubScheduler& ss0 = SubScheduler();
 | 
|  |    290 | 	NSchedulable* g = this;
 | 
|  |    291 | 	if (iParent != this && iParent)
 | 
|  |    292 | 		{
 | 
|  |    293 | 		NThreadGroup* tg = (NThreadGroup*)iParent;
 | 
|  |    294 | 		iReady = EReadyGroup;
 | 
|  |    295 | 		if (tg->iReady)
 | 
|  |    296 | 			{
 | 
|  |    297 | 			// extra thread added to group - change priority if necessary
 | 
|  |    298 | 			tg->iNThreadList.Add(this);
 | 
|  |    299 | 			TInt gp = tg->iPriority;
 | 
|  |    300 | 			TSubScheduler& ss = TheSubSchedulers[tg->iReady & EReadyCpuMask];
 | 
|  |    301 | 			ss.iReadyListLock.LockOnly();
 | 
|  |    302 | 			TInt hp = ss.HighestPriority();
 | 
|  |    303 | 			if (iPriority>gp)
 | 
| 44 |    304 | 				ss.ChangePriority(tg, iPriority);
 | 
| 0 |    305 | 			if (iPriority>hp || (iPriority==hp && ss.iCurrentThread && ss.iCurrentThread->iTime==0))
 | 
|  |    306 | 				{
 | 
|  |    307 | 				if (&ss == &ss0)
 | 
|  |    308 | 					RescheduleNeeded();					// reschedule on this processor
 | 
|  |    309 | 				else
 | 
|  |    310 | 					ss0.iReschedIPIs |= ss.iCpuMask;	// will kick the other CPU when this CPU reenables preemption
 | 
|  |    311 | 				}
 | 
| 44 |    312 | 			if ((aMode & ENewTimeslice) && t->iTime==0 && (iNext!=this || ss.iQueue[iPriority]))
 | 
| 0 |    313 | 				t->iTime = t->iTimeslice;
 | 
|  |    314 | 			ss.iReadyListLock.UnlockOnly();
 | 
|  |    315 | 			return;
 | 
|  |    316 | 			}
 | 
|  |    317 | 		tg->iNThreadList.Add(this);
 | 
|  |    318 | 		tg->iPriority = iPriority;	// first in group
 | 
|  |    319 | 		g = tg;						// fall through to add group to subscheduler
 | 
|  |    320 | 		}
 | 
|  |    321 | 	TInt cpu = -1;
 | 
|  |    322 | 	if (aMode & EUnPause)
 | 
|  |    323 | 		{
 | 
|  |    324 | 		cpu = (g->iEventState & EThreadCpuMask)>>EThreadCpuShift;
 | 
|  |    325 | 		if (CheckCpuAgainstAffinity(cpu, g->iCpuAffinity))
 | 
|  |    326 | 			goto cpu_ok;
 | 
|  |    327 | 		}
 | 
| 44 |    328 | 	else if (g->iFreezeCpu)
 | 
| 0 |    329 | 		{
 | 
|  |    330 | 		cpu = g->iLastCpu;
 | 
| 44 |    331 | 		if (!CheckCpuAgainstAffinity(cpu, g->iCpuAffinity))
 | 
|  |    332 | 			g->iCpuChange = TRUE;
 | 
| 0 |    333 | 		}
 | 
| 44 |    334 | 	else if (!(g->iCpuAffinity & NTHREADBASE_CPU_AFFINITY_MASK))
 | 
| 0 |    335 | 		cpu = g->iCpuAffinity;
 | 
| 44 |    336 | 	else if ((aMode & EPreferSameCpu) && (g->iCpuAffinity & ss0.iCpuMask))
 | 
| 0 |    337 | 		cpu = ss0.iCpuNum;
 | 
|  |    338 | 	if (cpu < 0)
 | 
|  |    339 | 		{
 | 
|  |    340 | 		// pick a cpu
 | 
| 44 |    341 | 		TScheduler& s = TheScheduler;
 | 
|  |    342 | 		TUint32 m = g->iCpuAffinity & s.iActiveCpus1;
 | 
|  |    343 | 		TInt i;
 | 
|  |    344 | 		TInt lowest_p = KMaxTInt;
 | 
|  |    345 | 		for (i=0; i<s.iNumCpus; ++i)
 | 
|  |    346 | 			{
 | 
|  |    347 | 			TSubScheduler& ss = *s.iSub[i];
 | 
|  |    348 | 			if (!(m & ss.iCpuMask))
 | 
|  |    349 | 				continue;
 | 
|  |    350 | 			TInt hp = ss.HighestPriority();
 | 
|  |    351 | 			if (hp < lowest_p)
 | 
| 0 |    352 | 				{
 | 
| 44 |    353 | 				lowest_p = hp;
 | 
|  |    354 | 				cpu = i;
 | 
|  |    355 | 				continue;
 | 
| 0 |    356 | 				}
 | 
| 44 |    357 | 			if (hp > lowest_p)
 | 
|  |    358 | 				continue;
 | 
|  |    359 | 			if (cpu>=0 && g->iLastCpu!=i)
 | 
|  |    360 | 				continue;
 | 
|  |    361 | 			lowest_p = hp;
 | 
|  |    362 | 			cpu = i;
 | 
| 0 |    363 | 			}
 | 
|  |    364 | 		}
 | 
|  |    365 | cpu_ok:
 | 
|  |    366 | 	__NK_ASSERT_ALWAYS(cpu>=0);
 | 
|  |    367 | 	if (g->TiedEventReadyInterlock(cpu))
 | 
|  |    368 | 		{
 | 
|  |    369 | 		__KTRACE_OPT(KSCHED2,DEBUGPRINT("ReadyT->CPU %dD",cpu));
 | 
|  |    370 | 		++g->iPauseCount;
 | 
| 44 |    371 | //		((TDfc*)g->i_IDfcMem)->Add();
 | 
|  |    372 | 		return;
 | 
| 0 |    373 | 		}
 | 
| 44 |    374 | 	__KTRACE_OPT(KSCHED2,DEBUGPRINT("ReadyT->CPU %d",cpu));
 | 
|  |    375 | 	TSubScheduler& ss = TheSubSchedulers[cpu];
 | 
|  |    376 | 	ss.iReadyListLock.LockOnly();
 | 
|  |    377 | 	TInt hp = ss.HighestPriority();
 | 
|  |    378 | 	if (g->iPriority>hp || (g->iPriority==hp && ss.iCurrentThread && ss.iCurrentThread->iTime==0))
 | 
| 0 |    379 | 		{
 | 
| 44 |    380 | 		if (&ss == &ss0)
 | 
|  |    381 | 			RescheduleNeeded();					// reschedule on this processor
 | 
|  |    382 | 		else
 | 
|  |    383 | 			ss0.iReschedIPIs |= ss.iCpuMask;	// will kick the other CPU when this CPU reenables preemption
 | 
| 0 |    384 | 		}
 | 
| 44 |    385 | 	ss.Add(g);
 | 
|  |    386 | 	g->iReady = TUint8(cpu | EReadyOffset);
 | 
|  |    387 | 	if ((aMode & ENewTimeslice) && iParent && t->iTime==0 && g->iNext!=g)
 | 
|  |    388 | 		t->iTime = t->iTimeslice;
 | 
|  |    389 | 	ss.iReadyListLock.UnlockOnly();
 | 
| 0 |    390 | 	}
 | 
|  |    391 | 
 | 
|  |    392 | 
 | 
|  |    393 | NThread* TSubScheduler::SelectNextThread()
 | 
|  |    394 | 	{
 | 
|  |    395 | 	NThread* ot = iCurrentThread;
 | 
|  |    396 | 	NThread* t = 0;
 | 
|  |    397 | 	TBool migrate = FALSE;
 | 
|  |    398 | 	TBool gmigrate = FALSE;
 | 
|  |    399 | 	TBool fmd_done = FALSE;
 | 
|  |    400 | 	TBool fmd_res = FALSE;
 | 
|  |    401 | 	if (!ot)
 | 
|  |    402 | 		{
 | 
|  |    403 | 		iReadyListLock.LockOnly();
 | 
|  |    404 | 		iRescheduleNeededFlag = FALSE;
 | 
|  |    405 | 		goto no_ot;
 | 
|  |    406 | 		}
 | 
|  |    407 | 	ot->AcqSLock();
 | 
|  |    408 | 	if (ot->iNewParent)
 | 
|  |    409 | 		ot->iNewParent->AcqSLock();
 | 
|  |    410 | 	SaveTimesliceTimer(ot);	// remember how much of current thread's timeslice remains
 | 
|  |    411 | 	if (ot->iCsFunction==NThreadBase::ECSDivertPending && ot->iWaitState.iWtC.iWtStFlags)
 | 
|  |    412 | 		{
 | 
|  |    413 | 		// thread about to exit so cancel outstanding wait
 | 
|  |    414 | 		ot->DoReleaseT(KErrDied,0);
 | 
|  |    415 | 		}
 | 
|  |    416 | 	if (ot->iWaitState.iWtC.iWtStFlags==0)
 | 
|  |    417 | 		{
 | 
|  |    418 | 		// ASSUMPTION: If iNewParent set, ot can't hold a fast mutex (assertion in JoinGroup)
 | 
|  |    419 | 		TBool pfmd = (ot->iParent!=ot && !ot->iFastMutexDefer);
 | 
|  |    420 | 		if (ot->iTime==0 || pfmd)
 | 
|  |    421 | 			{
 | 
|  |    422 | 			// ot's timeslice has expired
 | 
|  |    423 | 			fmd_res = ot->CheckFastMutexDefer();
 | 
|  |    424 | 			fmd_done = TRUE;
 | 
|  |    425 | 			if (fmd_res)
 | 
|  |    426 | 				{
 | 
|  |    427 | 				if (ot->iTime == 0)
 | 
|  |    428 | 					ot->iTime = 0x80000000;	// mark deferred timeslice expiry
 | 
|  |    429 | 				if (pfmd)
 | 
|  |    430 | 					{
 | 
|  |    431 | 					ot->iFastMutexDefer = 1;
 | 
|  |    432 | 					++ot->iParent->iFreezeCpu;
 | 
|  |    433 | 					}
 | 
|  |    434 | 				}
 | 
|  |    435 | 			}
 | 
|  |    436 | 		}
 | 
|  |    437 | 	iReadyListLock.LockOnly();
 | 
|  |    438 | 	iRescheduleNeededFlag = FALSE;
 | 
|  |    439 | 
 | 
|  |    440 | 	//	process outstanding suspend/kill/CPU change on ot
 | 
|  |    441 | 
 | 
|  |    442 | 	__NK_ASSERT_DEBUG(!(ot->iWaitState.iWtC.iWtStFlags & NThreadWaitState::EWtStWaitActive));
 | 
|  |    443 | 	if (ot->iWaitState.iWtC.iWtStFlags || ot->iPauseCount || ot->iSuspended)
 | 
|  |    444 | 		{
 | 
|  |    445 | 		// ot is no longer ready to run
 | 
|  |    446 | 		__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T WS: %02x %02x (%08x) P:%02x S:%1x", ot, 
 | 
|  |    447 | 							ot->iWaitState.iWtC.iWtStFlags, ot->iWaitState.iWtC.iWtObjType, ot->iWaitState.iWtC.iWtObj, ot->iPauseCount, ot->iSuspended));
 | 
|  |    448 | 		TInt wtst = ot->iWaitState.DoWait();
 | 
|  |    449 | 		if (wtst>=0 && wtst!=NThread::EWaitFastMutex)
 | 
|  |    450 | 			ot->iTime = ot->iTimeslice;
 | 
|  |    451 | 		ot->UnReadyT();
 | 
|  |    452 | 		if (ot->iNewParent)
 | 
|  |    453 | 			{
 | 
|  |    454 | 			ot->iParent = ot->iNewParent, ++((NThreadGroup*)ot->iParent)->iThreadCount;
 | 
|  |    455 | 			wmb();	// must make sure iParent is updated before iNewParent is cleared
 | 
|  |    456 | 			ot->iNewParent = 0;
 | 
|  |    457 | 			}
 | 
|  |    458 | 		ot->iCpuChange = FALSE;
 | 
|  |    459 | 		}
 | 
|  |    460 | 	else if (ot->iNewParent)
 | 
|  |    461 | 		{
 | 
|  |    462 | 		__NK_ASSERT_ALWAYS(ot->iParent==ot && !ot->iHeldFastMutex && !ot->iFreezeCpu);
 | 
|  |    463 | 		ot->UnReadyT();
 | 
|  |    464 | 		migrate = TRUE;
 | 
|  |    465 | 		ot->iParent = ot->iNewParent;
 | 
|  |    466 | 		ot->iCpuChange = FALSE;
 | 
|  |    467 | 		++((NThreadGroup*)ot->iParent)->iThreadCount;
 | 
|  |    468 | 		wmb();	// must make sure iParent is updated before iNewParent is cleared
 | 
|  |    469 | 		ot->iNewParent = 0;
 | 
|  |    470 | 		}
 | 
| 44 |    471 | 	else if (ot->iParent->iCpuChange && !ot->iParent->iFreezeCpu)
 | 
| 0 |    472 | 		{
 | 
| 44 |    473 | 		if (!CheckCpuAgainstAffinity(iCpuNum, ot->iParent->iCpuAffinity))
 | 
| 0 |    474 | 			{
 | 
| 44 |    475 | 			if (ot->iParent==ot)
 | 
| 0 |    476 | 				{
 | 
| 44 |    477 | 				if (!fmd_done)
 | 
|  |    478 | 					fmd_res = ot->CheckFastMutexDefer(), fmd_done = TRUE;
 | 
|  |    479 | 				if (!fmd_res)
 | 
| 0 |    480 | 					{
 | 
| 44 |    481 | 					__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T A:%08x",ot,ot->iParent->iCpuAffinity));
 | 
|  |    482 | 					ot->UnReadyT();
 | 
|  |    483 | 					migrate = TRUE;
 | 
|  |    484 | 					ot->iCpuChange = FALSE;
 | 
| 0 |    485 | 					}
 | 
|  |    486 | 				}
 | 
|  |    487 | 			else
 | 
|  |    488 | 				{
 | 
| 44 |    489 | 				__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T GA:%08x",ot,ot->iParent->iCpuAffinity));
 | 
|  |    490 | 				Remove(ot->iParent);
 | 
|  |    491 | 				ot->iParent->iReady = 0;
 | 
|  |    492 | 				gmigrate = TRUE;
 | 
| 0 |    493 | 				ot->iCpuChange = FALSE;
 | 
|  |    494 | 				ot->iParent->iCpuChange = FALSE;
 | 
|  |    495 | 				}
 | 
|  |    496 | 			}
 | 
| 44 |    497 | 		else
 | 
| 0 |    498 | 			{
 | 
|  |    499 | 			ot->iCpuChange = FALSE;
 | 
|  |    500 | 			ot->iParent->iCpuChange = FALSE;
 | 
|  |    501 | 			}
 | 
|  |    502 | 		}
 | 
|  |    503 | no_ot:
 | 
| 44 |    504 | 	NSchedulable* g = (NSchedulable*)First();
 | 
| 0 |    505 | 	TBool rrcg = FALSE;
 | 
|  |    506 | 	if (g && g->IsGroup())
 | 
|  |    507 | 		{
 | 
|  |    508 | 		t = (NThread*)((NThreadGroup*)g)->iNThreadList.First();
 | 
|  |    509 | 		if (g->iNext!=g)
 | 
|  |    510 | 			rrcg = TRUE;
 | 
|  |    511 | 		}
 | 
|  |    512 | 	else
 | 
|  |    513 | 		t = (NThread*)g;
 | 
|  |    514 | 	TBool rrct = (t && t->iNext!=t);
 | 
|  |    515 | 	if (t && t->iTime==0 && (rrcg || rrct))
 | 
|  |    516 | 		{
 | 
|  |    517 | 		// candidate thread's timeslice has expired and there is another at the same priority
 | 
|  |    518 | 		if (t==ot)
 | 
|  |    519 | 			{
 | 
|  |    520 | 			if (ot->iParent!=ot)
 | 
|  |    521 | 				{
 | 
|  |    522 | 				((NThreadGroup*)ot->iParent)->iNThreadList.iQueue[ot->iPriority] = ot->iNext;
 | 
| 44 |    523 | 				iQueue[ot->iParent->iPriority] = ot->iParent->iNext;
 | 
| 0 |    524 | 				}
 | 
|  |    525 | 			else
 | 
| 44 |    526 | 				iQueue[ot->iPriority] = ot->iNext;
 | 
| 0 |    527 | 			ot->iTime = ot->iTimeslice;
 | 
| 44 |    528 | 			NSchedulable* g2 = (NSchedulable*)First();
 | 
| 0 |    529 | 			if (g2->IsGroup())
 | 
|  |    530 | 				t = (NThread*)((NThreadGroup*)g2)->iNThreadList.First();
 | 
|  |    531 | 			else
 | 
|  |    532 | 				t = (NThread*)g2;
 | 
|  |    533 | 			if (t->iTime==0)
 | 
|  |    534 | 				{
 | 
|  |    535 | 				// loop again since we need to lock t before round robining it
 | 
|  |    536 | 				__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T RRL",ot));
 | 
|  |    537 | 				iRescheduleNeededFlag = TRUE;
 | 
|  |    538 | 				}
 | 
|  |    539 | 			else
 | 
|  |    540 | 				{
 | 
|  |    541 | 				__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T RR",ot));
 | 
|  |    542 | 				}
 | 
| 44 |    543 | /*			if (ot->iCpuAffinity & NTHREADBASE_CPU_AFFINITY_MASK)
 | 
|  |    544 | 				{
 | 
|  |    545 | 				ot->UnReadyT();
 | 
|  |    546 | 				migrate = TRUE;
 | 
|  |    547 | 				}
 | 
|  |    548 | 			else
 | 
|  |    549 | 				ot->iTime = ot->iTimeslice;
 | 
|  |    550 | */
 | 
| 0 |    551 | 			}
 | 
|  |    552 | 		else	// loop again since we need to lock t before round robining it
 | 
|  |    553 | 			{
 | 
|  |    554 | 			__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T LL",ot));
 | 
|  |    555 | 			iRescheduleNeededFlag = TRUE;
 | 
|  |    556 | 			}
 | 
|  |    557 | 		}
 | 
|  |    558 | 	if (t != ot)
 | 
|  |    559 | 		{
 | 
|  |    560 | 		if (ot)
 | 
|  |    561 | 			{
 | 
|  |    562 | 			ot->iCurrent = 0;
 | 
|  |    563 | 			ot->iParent->iCurrent = 0;
 | 
|  |    564 | 			ot->CompleteContextSave();
 | 
|  |    565 | 			}
 | 
|  |    566 | 		if (t)
 | 
|  |    567 | 			{
 | 
|  |    568 | 			t->iLastCpu = iCpuNum;
 | 
|  |    569 | 			t->iParent->iLastCpu = iCpuNum;
 | 
|  |    570 | 			t->iCurrent = TUint8(iCpuNum | NSchedulable::EReadyOffset);
 | 
|  |    571 | 			t->iParent->iCurrent = t->iCurrent;
 | 
|  |    572 | 			}
 | 
|  |    573 | 		iCurrentThread = t;
 | 
|  |    574 | 		}
 | 
|  |    575 | 	UpdateThreadTimes(ot,t);		// update ot's run time and set up the timeslice timer for t
 | 
|  |    576 | 	iReadyListLock.UnlockOnly();
 | 
|  |    577 | 	if (migrate)
 | 
|  |    578 | 		ot->ReadyT(NThreadBase::ENewTimeslice);	// new timeslice if it's queued behind another thread at same priority
 | 
|  |    579 | 	if (gmigrate)
 | 
|  |    580 | 		ot->iParent->ReadyT(0);	// new timeslice if it's queued behind another thread at same priority
 | 
|  |    581 | 	if (ot)
 | 
|  |    582 | 		{
 | 
|  |    583 | 		ot->RelSLock();
 | 
|  |    584 | 
 | 
|  |    585 | 		// DFC to signal thread is now dead
 | 
| 44 |    586 | 		if (ot->iWaitState.ThreadIsDead() && ot->iWaitState.iWtC.iKillDfc)
 | 
| 0 |    587 | 			ot->iWaitState.iWtC.iKillDfc->DoEnque();
 | 
|  |    588 | 		}
 | 
|  |    589 | 	__KTRACE_OPT(KSCHED,DEBUGPRINT("Rschd->%T",t));
 | 
|  |    590 | 	__NK_ASSERT_ALWAYS(!t || t->iParent);	// must be a thread not a group
 | 
|  |    591 | 	return t;	// could return NULL
 | 
|  |    592 | 	}
 | 
|  |    593 | 
 | 
|  |    594 | 
 | 
|  |    595 | void NThreadBase::UnReadyT()
 | 
|  |    596 | 	{
 | 
|  |    597 | 	if (iParent!=this)
 | 
|  |    598 | 		{
 | 
|  |    599 | 		NThreadGroup& g = *(NThreadGroup*)iParent;
 | 
|  |    600 | 		TPriListBase& l = g.iNThreadList;
 | 
|  |    601 | 		l.Remove(this);
 | 
|  |    602 | 		if (g.iReady)
 | 
|  |    603 | 			{
 | 
|  |    604 | 			TSubScheduler& ss = TheSubSchedulers[g.iReady & EReadyCpuMask];
 | 
|  |    605 | 			if (l.IsEmpty())
 | 
|  |    606 | 				{
 | 
| 44 |    607 | //				__KTRACE_OPT(KNKERN,DEBUGPRINT("%T UnReadyT (G=%G-)",this,&g));
 | 
|  |    608 | 				ss.Remove(&g);
 | 
| 0 |    609 | 				g.iReady = 0;
 | 
|  |    610 | 				g.iPriority = 0;
 | 
|  |    611 | 				}
 | 
|  |    612 | 			else
 | 
|  |    613 | 				{
 | 
| 44 |    614 | //				__KTRACE_OPT(KNKERN,DEBUGPRINT("%T UnReadyT (G=%G)",this,&g));
 | 
|  |    615 | 				ss.ChangePriority(&g, l.HighestPriority());
 | 
| 0 |    616 | 				}
 | 
|  |    617 | 			}
 | 
|  |    618 | 		}
 | 
|  |    619 | 	else
 | 
|  |    620 | 		{
 | 
| 44 |    621 | //		__KTRACE_OPT(KNKERN,DEBUGPRINT("%T UnReadyT",this));
 | 
|  |    622 | 		TheSubSchedulers[iReady & EReadyCpuMask].Remove(this);
 | 
| 0 |    623 | 		}
 | 
|  |    624 | 	iReady = 0;
 | 
|  |    625 | 	}
 | 
|  |    626 | 
 | 
|  |    627 | 
 | 
|  |    628 | void NThreadBase::ChangeReadyThreadPriority()
 | 
|  |    629 | 	{
 | 
|  |    630 | 	TInt newp = iMutexPri>iBasePri ? iMutexPri : iBasePri;
 | 
|  |    631 | 	TInt oldp = iPriority;
 | 
|  |    632 | 	TSubScheduler* ss0 = &SubScheduler();
 | 
|  |    633 | 	TSubScheduler* ss = 0;
 | 
|  |    634 | 	if (iParent->iReady)
 | 
|  |    635 | 		{
 | 
|  |    636 | 		ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
 | 
|  |    637 | 		ss->iReadyListLock.LockOnly();
 | 
|  |    638 | 		}
 | 
|  |    639 | 	TBool resched = FALSE;
 | 
|  |    640 | 	NSchedulable* g = iParent;
 | 
|  |    641 | 	if (g!=this)
 | 
|  |    642 | 		{
 | 
|  |    643 | 		NThreadGroup* tg = (NThreadGroup*)g;
 | 
|  |    644 | 		tg->iNThreadList.ChangePriority(this, newp);
 | 
|  |    645 | 		if (ss)
 | 
|  |    646 | 			{
 | 
|  |    647 | 			TInt ngp = tg->iNThreadList.HighestPriority();
 | 
|  |    648 | 			if (ngp!=tg->iPriority)
 | 
| 44 |    649 | 				ss->ChangePriority(tg, ngp);
 | 
| 0 |    650 | 			}
 | 
|  |    651 | 		}
 | 
|  |    652 | 	else
 | 
| 44 |    653 | 		ss->ChangePriority(this, newp);
 | 
| 0 |    654 | 	if (iCurrent)	// can't be current if parent not ready
 | 
|  |    655 | 		{
 | 
|  |    656 | 		TInt nhp = ss->HighestPriority();
 | 
|  |    657 | 		if (newp<oldp && (newp<nhp || (newp==nhp && iTime==0)))
 | 
|  |    658 | 			resched = TRUE;
 | 
|  |    659 | 		}
 | 
|  |    660 | 	else if (ss)
 | 
|  |    661 | 		{
 | 
|  |    662 | 		NThreadBase* ct = ss->iCurrentThread;
 | 
|  |    663 | 		TInt cp = ct ? ct->iPriority : -1;
 | 
|  |    664 | 		if (newp>cp || (newp==cp && ct->iTime==0))
 | 
|  |    665 | 			resched = TRUE;
 | 
|  |    666 | 		}
 | 
|  |    667 | 	if (resched)
 | 
|  |    668 | 		{
 | 
|  |    669 | 		if (ss == ss0)
 | 
|  |    670 | 			RescheduleNeeded();
 | 
|  |    671 | 		else
 | 
|  |    672 | 			ss0->iReschedIPIs |= ss->iCpuMask;	// will kick the other CPU when this CPU reenables preemption
 | 
|  |    673 | 		}
 | 
|  |    674 | 	if (ss)
 | 
|  |    675 | 		ss->iReadyListLock.UnlockOnly();
 | 
|  |    676 | 	}
 | 
|  |    677 | 
 | 
|  |    678 | 
 | 
|  |    679 | /** Changes the priority of a nanokernel thread.
 | 
|  |    680 | 
 | 
|  |    681 | 	For use by RTOS personality layers.
 | 
|  |    682 | 	Do not use this function directly on a Symbian OS thread.
 | 
|  |    683 | 
 | 
|  |    684 | 	The thread's unknown state handler will be invoked with function EChangePriority
 | 
|  |    685 | 	and parameter newp if the current NState is not recognised and the new priority
 | 
|  |    686 | 	is not equal to the original priority.
 | 
|  |    687 | 	
 | 
|  |    688 | 	@param	newp  The new nanokernel priority (0 <= newp < KNumPriorities).
 | 
|  |    689 | 
 | 
|  |    690 | 	@pre	Kernel must be locked.
 | 
|  |    691 | 	@pre	Call in a thread context.
 | 
|  |    692 | 	
 | 
|  |    693 | 	@post	Kernel is locked.
 | 
|  |    694 |  */
 | 
|  |    695 | EXPORT_C void NThreadBase::SetPriority(TInt newp)
 | 
|  |    696 | 	{
 | 
|  |    697 | 	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_IDFC|MASK_NOT_ISR,"NThreadBase::SetPriority");
 | 
|  |    698 | 	AcqSLock();
 | 
| 44 |    699 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSetPri %d(%d)->%d(%d)",this,iPriority,iBasePri,newp,iMutexPri));
 | 
| 0 |    700 | 	iBasePri = TUint8(newp);
 | 
| 44 |    701 | 	if (iMutexPri > iBasePri)
 | 
| 0 |    702 | 		newp = iMutexPri;
 | 
|  |    703 | 	TInt oldp = iPriority;
 | 
|  |    704 | 	if (newp == oldp)
 | 
|  |    705 | 		{
 | 
|  |    706 | 		RelSLock();
 | 
|  |    707 | 		return;
 | 
|  |    708 | 		}
 | 
|  |    709 | 	NFastMutex* wfm = 0;
 | 
|  |    710 | 	if (iLinkedObj && iLinkedObjType==EWaitFastMutex)
 | 
|  |    711 | 		wfm = (NFastMutex*)iLinkedObj;
 | 
|  |    712 | 	if (wfm)
 | 
|  |    713 | 		{
 | 
|  |    714 | 		// if thread is attached to/waiting on a fast mutex, need to acquire mutex lock
 | 
|  |    715 | 		++iPauseCount;
 | 
|  |    716 | 		RelSLock();
 | 
|  |    717 | 		wfm->iMutexLock.LockOnly();
 | 
|  |    718 | 		AcqSLock();
 | 
|  |    719 | 		UnPauseT();
 | 
|  |    720 | 		wfm->iWaitQ.ChangePriority(&iWaitLink, newp);	// change position of this thread on mutex wait queue
 | 
|  |    721 | 		}
 | 
|  |    722 | 	if (iReady)
 | 
|  |    723 | 		{
 | 
|  |    724 | 		ChangeReadyThreadPriority();
 | 
|  |    725 | 		RelSLock();
 | 
|  |    726 | 		if (wfm && newp<=wfm->iWaitQ.HighestPriority())
 | 
|  |    727 | 			{
 | 
|  |    728 | 			// this thread was contending for the mutex but they may be other waiting threads
 | 
|  |    729 | 			// with higher or equal priority, so wake up the first thread on the list.
 | 
|  |    730 | 			NThreadBase* pT = _LOFF(wfm->iWaitQ.First(), NThreadBase, iWaitLink);
 | 
|  |    731 | 			pT->AcqSLock();
 | 
|  |    732 | 
 | 
|  |    733 | 			// if thread is still blocked on this fast mutex, release it but leave it on the wait queue
 | 
|  |    734 | 			// NOTE: it can't be suspended
 | 
|  |    735 | 			pT->iWaitState.UnBlockT(NThreadBase::EWaitFastMutex, wfm, KErrNone);
 | 
|  |    736 | 			pT->RelSLock();
 | 
|  |    737 | 			}
 | 
|  |    738 | 		}
 | 
|  |    739 | 	else
 | 
|  |    740 | 		{
 | 
|  |    741 | 		iPriority = (TUint8)newp;
 | 
|  |    742 | 		if (wfm && newp>oldp)
 | 
|  |    743 | 			{
 | 
|  |    744 | 			NThreadBase* pT = _LOFF(wfm->iWaitQ.First(), NThreadBase, iWaitLink);	// highest priority waiting thread
 | 
|  |    745 | 			if (pT==this)
 | 
|  |    746 | 				{
 | 
|  |    747 | 				// this is now highest priority waiting thread so wake it up
 | 
|  |    748 | 				iWaitState.UnBlockT(NThreadBase::EWaitFastMutex, wfm, KErrNone);
 | 
|  |    749 | 				}
 | 
|  |    750 | 			}
 | 
|  |    751 | 		RelSLock();
 | 
|  |    752 | 		}
 | 
|  |    753 | 	if (wfm)
 | 
|  |    754 | 		{
 | 
|  |    755 | 		NThreadBase* t = (NThreadBase*)(TLinAddr(wfm->iHoldingThread)&~3);
 | 
|  |    756 | 		if (t)
 | 
|  |    757 | 			t->SetMutexPriority(wfm);
 | 
|  |    758 | 		wfm->iMutexLock.UnlockOnly();
 | 
|  |    759 | 		}
 | 
|  |    760 | 	}
 | 
|  |    761 | 
 | 
|  |    762 | 
 | 
|  |    763 | /** Set the inherited priority of a nanokernel thread.
 | 
|  |    764 | 
 | 
|  |    765 | 	@pre	Kernel must be locked.
 | 
|  |    766 | 	@pre	Call in a thread context.
 | 
|  |    767 | 	@pre	The thread holds a fast mutex
 | 
|  |    768 | 	
 | 
|  |    769 | 	@post	Kernel is locked.
 | 
|  |    770 |  */
 | 
|  |    771 | void NThreadBase::SetMutexPriority(NFastMutex* aM)
 | 
|  |    772 | 	{
 | 
|  |    773 | 	TInt newp = aM->iWaitQ.HighestPriority();
 | 
|  |    774 | 	if (newp<0)
 | 
|  |    775 | 		newp = 0;
 | 
|  |    776 | 	AcqSLock();
 | 
|  |    777 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSetMPri %d->%d Base %d (mutex %08x)",this,iMutexPri,newp,iBasePri,aM));
 | 
|  |    778 | 	iMutexPri = TUint8(newp);
 | 
|  |    779 | 	if (iMutexPri < iBasePri)
 | 
|  |    780 | 		newp = iBasePri;
 | 
|  |    781 | 	TInt oldp = iPriority;
 | 
|  |    782 | 	if (newp == oldp)
 | 
|  |    783 | 		{
 | 
|  |    784 | 		RelSLock();
 | 
|  |    785 | 		return;
 | 
|  |    786 | 		}
 | 
|  |    787 | 	if (iReady)
 | 
|  |    788 | 		ChangeReadyThreadPriority();
 | 
|  |    789 | 	else
 | 
|  |    790 | 		iPriority = (TUint8)newp;
 | 
|  |    791 | 	RelSLock();
 | 
|  |    792 | 	}
 | 
|  |    793 | 
 | 
|  |    794 | 
 | 
|  |    795 | void NThreadBase::LoseInheritedPriorityT()
 | 
|  |    796 | 	{
 | 
|  |    797 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nLoseInhPri %d->%d",this,iPriority,iBasePri));
 | 
|  |    798 | 	TSubScheduler* ss = &SubScheduler();
 | 
|  |    799 | 	TInt newp = iBasePri;
 | 
|  |    800 | 	NSchedulable* g = iParent;
 | 
|  |    801 | 	ss->iReadyListLock.LockOnly();
 | 
|  |    802 | 	if (g!=this)
 | 
|  |    803 | 		{
 | 
|  |    804 | 		NThreadGroup* tg = (NThreadGroup*)g;
 | 
|  |    805 | 		tg->iNThreadList.ChangePriority(this, newp);
 | 
|  |    806 | 		TInt hp = tg->iNThreadList.HighestPriority();
 | 
|  |    807 | 		if (hp == tg->iPriority)
 | 
|  |    808 | 			{
 | 
|  |    809 | 			if (newp <= hp)
 | 
|  |    810 | 				RescheduleNeeded();
 | 
|  |    811 | 			goto out;
 | 
|  |    812 | 			}
 | 
|  |    813 | 		newp = hp;
 | 
|  |    814 | 		g = tg;
 | 
|  |    815 | 		}
 | 
|  |    816 | 	if (newp <= ss->HighestPriority())
 | 
|  |    817 | 		RescheduleNeeded();
 | 
| 44 |    818 | 	ss->ChangePriority(g, newp);
 | 
| 0 |    819 | out:
 | 
|  |    820 | 	ss->iReadyListLock.UnlockOnly();
 | 
|  |    821 | 	}
 | 
|  |    822 | 
 | 
|  |    823 | 
 |