kernel/eka/nkernsmp/nkern.cpp
branchRCL_3
changeset 257 3e88ff8f41d5
parent 256 c1f20ce4abcf
equal deleted inserted replaced
256:c1f20ce4abcf 257:3e88ff8f41d5
    96 			// this forces priority changes to wait for the mutex lock
    96 			// this forces priority changes to wait for the mutex lock
    97 			pC->iLinkedObjType = NThreadBase::EWaitFastMutex;
    97 			pC->iLinkedObjType = NThreadBase::EWaitFastMutex;
    98 			pC->iLinkedObj = this;
    98 			pC->iLinkedObj = this;
    99 			pC->iWaitState.SetUpWait(NThreadBase::EWaitFastMutex, NThreadWaitState::EWtStObstructed, this);
    99 			pC->iWaitState.SetUpWait(NThreadBase::EWaitFastMutex, NThreadWaitState::EWtStObstructed, this);
   100 			pC->iWaitLink.iPriority = pC->iPriority;
   100 			pC->iWaitLink.iPriority = pC->iPriority;
   101 			if (waited)
   101 			iWaitQ.Add(&pC->iWaitLink);
   102 				iWaitQ.AddHead(&pC->iWaitLink);	// we were next at this priority
       
   103 			else
       
   104 				iWaitQ.Add(&pC->iWaitLink);
       
   105 			pC->RelSLock();
   102 			pC->RelSLock();
   106 			if (pH)
   103 			if (pH)
   107 				pH->SetMutexPriority(this);
   104 				pH->SetMutexPriority(this);
   108 do_pause:
   105 do_pause:
   109 			iMutexLock.UnlockOnly();
   106 			iMutexLock.UnlockOnly();
   881 	else
   878 	else
   882 		{
   879 		{
   883 		iCsFunction = ECSDivertPending;
   880 		iCsFunction = ECSDivertPending;
   884 		iSuspendCount = 0;
   881 		iSuspendCount = 0;
   885 		iSuspended = 0;
   882 		iSuspended = 0;
   886 
       
   887 		// If thread is killed before first resumption, set iACount=1
       
   888 		__e32_atomic_tau_ord8(&iACount, 1, 0, 1);
       
   889 		if (aS)
   883 		if (aS)
   890 			aS->iReadyListLock.UnlockOnly();
   884 			aS->iReadyListLock.UnlockOnly();
   891 		DoReleaseT(KErrDied,0);
   885 		DoReleaseT(KErrDied,0);
   892 		if (!iReady && !iPauseCount)
   886 		if (!iReady && !iPauseCount)
   893 			ReadyT(0);
   887 			ReadyT(0);
   898 // If aCount>=0 suspend the thread aCount times
   892 // If aCount>=0 suspend the thread aCount times
   899 // If aCount<0 kill the thread
   893 // If aCount<0 kill the thread
   900 TBool NThreadBase::SuspendOrKill(TInt aCount)
   894 TBool NThreadBase::SuspendOrKill(TInt aCount)
   901 	{
   895 	{
   902 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSuspendOrKill %d", this, aCount));
   896 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSuspendOrKill %d", this, aCount));
   903 	if (aCount==0 || i_NThread_Initial)
   897 	if (aCount==0)
   904 		return FALSE;
   898 		return FALSE;
   905 	TBool result = FALSE;
   899 	TBool result = FALSE;
   906 	TBool concurrent = FALSE;
   900 	TBool concurrent = FALSE;
   907 	TSubScheduler* ss = 0;
   901 	TSubScheduler* ss = 0;
   908 	AcqSLock();
   902 	AcqSLock();
  1051 			--iSuspendCount;
  1045 			--iSuspendCount;
  1052 		if (!iSuspendCount)
  1046 		if (!iSuspendCount)
  1053 			{
  1047 			{
  1054 			result = TRUE;
  1048 			result = TRUE;
  1055 			iSuspended = 0;
  1049 			iSuspended = 0;
  1056 
       
  1057 			// On first resumption set iACount=1
       
  1058 			// From then on the thread must be killed before being deleted
       
  1059 			__e32_atomic_tau_ord8(&iACount, 1, 0, 1);
       
  1060 			if (!iPauseCount && !iReady && !iWaitState.iWtC.iWtStFlags)
  1050 			if (!iPauseCount && !iReady && !iWaitState.iWtC.iWtStFlags)
  1061 				ReadyT(0);
  1051 				ReadyT(0);
  1062 			}
  1052 			}
  1063 		}
  1053 		}
  1064 
  1054 
  1220 	TDfc* pD = NULL;
  1210 	TDfc* pD = NULL;
  1221 	NThreadExitHandler xh = iHandlers->iExitHandler;
  1211 	NThreadExitHandler xh = iHandlers->iExitHandler;
  1222 	if (xh)
  1212 	if (xh)
  1223 		pD = (*xh)((NThread*)this);		// call exit handler
  1213 		pD = (*xh)((NThread*)this);		// call exit handler
  1224 
  1214 
  1225 	// if CPU freeze still active, remove it
       
  1226 	NKern::EndFreezeCpu(0);
       
  1227 
       
  1228 	// detach any tied events
  1215 	// detach any tied events
  1229 	DetachTiedEvents();
  1216 	DetachTiedEvents();
  1230 
  1217 
  1231 	NKern::LeaveGroup();	// detach from group if exit handler didn't do it
  1218 	NKern::LeaveGroup();	// detach from group if exit handler didn't do it
  1232 
  1219 
  1294 
  1281 
  1295 	@param	The number of the CPU to which this thread should be locked, or
  1282 	@param	The number of the CPU to which this thread should be locked, or
  1296 			KCpuAny if it should be able to run on any CPU.
  1283 			KCpuAny if it should be able to run on any CPU.
  1297 	@return The previous affinity mask.
  1284 	@return The previous affinity mask.
  1298 */
  1285 */
  1299 TUint32 NSchedulable::SetCpuAffinityT(TUint32 aAffinity)
  1286 TUint32 NThreadBase::SetCpuAffinity(TUint32 aAffinity)
  1300 	{
  1287 	{
  1301 	// check aAffinity is valid
  1288 	// check aAffinity is valid
  1302 	NThreadBase* t = 0;
  1289 	AcqSLock();
  1303 	NThreadGroup* g = 0;
  1290 	TUint32 old_aff = iParent->iCpuAffinity;
  1304 	NSchedulable* p = iParent;
  1291 	TBool migrate = FALSE;
  1305 	if (!p)
       
  1306 		g = (NThreadGroup*)this, p=g;
       
  1307 	else
       
  1308 		t = (NThreadBase*)this;
       
  1309 	if (iParent && iParent!=this)
       
  1310 		g = (NThreadGroup*)iParent;
       
  1311 	TUint32 old_aff = p->iCpuAffinity;
       
  1312 	TBool make_ready = FALSE;
  1292 	TBool make_ready = FALSE;
  1313 	TSubScheduler* ss0 = &SubScheduler();
  1293 	TSubScheduler* ss0 = &SubScheduler();
  1314 	TSubScheduler* ss = 0;
  1294 	TSubScheduler* ss = 0;
  1315 #ifdef KNKERN
  1295 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSetCpu %08x->%08x, F:%d R:%02x PR:%02x",this,iParent->iCpuAffinity,aAffinity,iParent->iFreezeCpu,iReady,iParent->iReady));
  1316 	if (iParent)
  1296 	if (i_NThread_Initial)
  1317 		{
       
  1318 		__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSetCpu %08x->%08x, F:%d R:%02x PR:%02x",this,iParent->iCpuAffinity,aAffinity,iParent->iFreezeCpu,iReady,iParent->iReady));
       
  1319 		}
       
  1320 	else
       
  1321 		{
       
  1322 		__KTRACE_OPT(KNKERN,DEBUGPRINT("%G nSetCpu %08x->%08x, F:%d R:%02x",this,iCpuAffinity,aAffinity,iFreezeCpu,iReady));
       
  1323 		}
       
  1324 #endif
       
  1325 	if (t && t->i_NThread_Initial)
       
  1326 		goto done;	// can't change affinity of initial thread
  1297 		goto done;	// can't change affinity of initial thread
  1327 	if (aAffinity == NTHREADBASE_CPU_AFFINITY_MASK)
  1298 	iParent->iCpuAffinity = aAffinity;		// set new affinity, might not take effect yet
  1328 		{
  1299 	if (!iParent->iReady)
  1329 		p->iTransientCpu = 0;
       
  1330 		}
       
  1331 	else if ( (aAffinity & (KCpuAffinityPref|NTHREADBASE_CPU_AFFINITY_MASK)) == KCpuAffinityPref)
       
  1332 		{
       
  1333 		p->iTransientCpu = 0;
       
  1334 		p->iPreferredCpu = TUint8((aAffinity & (EReadyCpuMask|EReadyCpuSticky)) | EReadyOffset);
       
  1335 		}
       
  1336 	else if ( (aAffinity & (KCpuAffinityTransient|KCpuAffinityPref|NTHREADBASE_CPU_AFFINITY_MASK)) == KCpuAffinityTransient)
       
  1337 		{
       
  1338 		p->iTransientCpu = TUint8(aAffinity & EReadyCpuMask) | EReadyOffset;
       
  1339 		}
       
  1340 	else
       
  1341 		p->iCpuAffinity = NSchedulable::PreprocessCpuAffinity(aAffinity);		// set new affinity, might not take effect yet
       
  1342 	if (!p->iReady)
       
  1343 		goto done;	// thread/group not currently on a ready list so can just change affinity
  1300 		goto done;	// thread/group not currently on a ready list so can just change affinity
  1344 
  1301 	migrate = !CheckCpuAgainstAffinity(iParent->iReady & EReadyCpuMask, aAffinity);	// TRUE if thread's current CPU is incompatible with the new affinity
  1345 	// Check if the thread needs to migrate or can stay where it is
  1302 	if (!migrate)
  1346 	if (!p->ShouldMigrate(p->iReady & EReadyCpuMask))
       
  1347 		goto done;	// don't need to move thread, so just change affinity
  1303 		goto done;	// don't need to move thread, so just change affinity
  1348 	ss = TheSubSchedulers + (p->iReady & EReadyCpuMask);
  1304 	ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
  1349 	ss->iReadyListLock.LockOnly();
  1305 	ss->iReadyListLock.LockOnly();
  1350 	if (p->iCurrent)
  1306 	if (iParent->iCurrent)
  1351 		{
  1307 		{
  1352 		p->iCpuChange = TRUE;			// mark CPU change pending
  1308 		iParent->iCpuChange = TRUE;			// mark CPU change pending
  1353 		if (ss == ss0)
  1309 		if (ss == ss0)
  1354 			RescheduleNeeded();
  1310 			RescheduleNeeded();
  1355 		else
  1311 		else
  1356 			// kick other CPU now so migration happens before acquisition of fast mutex
  1312 			// kick other CPU now so migration happens before acquisition of fast mutex
  1357 			send_resched_ipi_and_wait(p->iReady & EReadyCpuMask);
  1313 			send_resched_ipi_and_wait(iParent->iReady & EReadyCpuMask);
  1358 		}
  1314 		}
  1359 	else
  1315 	else
  1360 		{
  1316 		{
  1361 		// Note: Need to know here if any thread in group would return TRUE from CheckFastMutexDefer()
  1317 		// Note: Need to know here if any thread in group would return TRUE from CheckFastMutexDefer()
  1362 		// This is handled by the scheduler - when a thread belonging to a group is context switched
  1318 		// This is handled by the scheduler - when a thread belonging to a group is context switched
  1363 		// out while holding a fast mutex its iFastMutexDefer is set to 1 and the group's iFreezeCpu
  1319 		// out while holding a fast mutex its iFastMutexDefer is set to 1 and the group's iFreezeCpu
  1364 		// is incremented.
  1320 		// is incremented.
  1365 		if (p->iFreezeCpu || (iParent==this && t->CheckFastMutexDefer()))
  1321 		if (iParent->iFreezeCpu || (iParent==this && CheckFastMutexDefer()))
  1366 			p->iCpuChange = TRUE;	// CPU frozen or fast mutex held so just mark deferred CPU migration
  1322 			iParent->iCpuChange = TRUE;	// CPU frozen or fast mutex held so just mark deferred CPU migration
  1367 		else
  1323 		else
  1368 			{
  1324 			{
  1369 			ss->SSRemoveEntry(p);
  1325 			ss->Remove(iParent);
  1370 			p->iReady = 0;
  1326 			iParent->iReady = 0;
  1371 			make_ready = TRUE;
  1327 			make_ready = TRUE;
  1372 			}
  1328 			}
  1373 		}
  1329 		}
  1374 	ss->iReadyListLock.UnlockOnly();
  1330 	ss->iReadyListLock.UnlockOnly();
  1375 	if (make_ready)
  1331 	if (make_ready)
  1376 		p->ReadyT(0);
  1332 		iParent->ReadyT(0);
  1377 done:
  1333 done:
       
  1334 	RelSLock();
  1378 	return old_aff;
  1335 	return old_aff;
  1379 	}
       
  1380 
       
  1381 /** Force the current thread onto a particular CPU
       
  1382 
       
  1383 	@pre	Kernel must not be locked.
       
  1384 	@pre	Call in a thread context.
       
  1385 	@pre	Current thread must not be in a group
       
  1386 	@pre	Current thread must not hold a fast mutex
       
  1387 	@pre	Current thread must have an active CPU freeze
       
  1388 	@pre	Current thread must not be an initial thread
       
  1389 
       
  1390 	@param	The number of the CPU to which this thread should be moved
       
  1391 */
       
  1392 void NKern::JumpTo(TInt aCpu)
       
  1393 	{
       
  1394 	// check aAffinity is valid
       
  1395 	NThreadBase* t = NKern::CurrentThread();
       
  1396 	__KTRACE_OPT(KNKERN,DEBUGPRINT("%T NJumpTo %d", t, aCpu));
       
  1397 	if (NKern::HeldFastMutex())
       
  1398 		__crash();
       
  1399 	t->LAcqSLock();
       
  1400 	if (t->iParent!=t)
       
  1401 		__crash();
       
  1402 	if (!t->iFreezeCpu)
       
  1403 		__crash();
       
  1404 	if (t->i_NThread_Initial)
       
  1405 		__crash();
       
  1406 	if (TUint(aCpu) >= (TUint)NKern::NumberOfCpus())
       
  1407 		__crash();
       
  1408 	TUint8 fc = (TUint8)(aCpu | NSchedulable::EReadyOffset);
       
  1409 	if (t->iCurrent != fc)
       
  1410 		{
       
  1411 		t->iForcedCpu = fc;
       
  1412 		t->iCpuChange = TRUE;
       
  1413 		RescheduleNeeded();
       
  1414 		}
       
  1415 	t->RelSLockU();		// reschedules and jumps to new CPU
       
  1416 	}
       
  1417 
       
  1418 TBool NSchedulable::ShouldMigrate(TInt aCpu)
       
  1419 	{
       
  1420 	// Check if the thread's current CPU is compatible with the new affinity
       
  1421 	TUint32 active = TheScheduler.iThreadAcceptCpus;
       
  1422 
       
  1423 	// If it can't stay where it is, migrate
       
  1424 	if (!CheckCpuAgainstAffinity(aCpu, iCpuAffinity, active))
       
  1425 		return TRUE;
       
  1426 
       
  1427 	TInt cpu = iTransientCpu ? iTransientCpu : iPreferredCpu;
       
  1428 
       
  1429 	// No preferred or transient CPU, so can stay where it is
       
  1430 	if (!cpu)
       
  1431 		return FALSE;
       
  1432 
       
  1433 	// If thread isn't on preferred CPU but could be, migrate
       
  1434 	cpu &= EReadyCpuMask;
       
  1435 	if (cpu!=aCpu && CheckCpuAgainstAffinity(cpu, iCpuAffinity, active))
       
  1436 		return TRUE;
       
  1437 	return FALSE;
       
  1438 	}
  1336 	}
  1439 
  1337 
  1440 
  1338 
  1441 /******************************************************************************
  1339 /******************************************************************************
  1442  * Thread wait state
  1340  * Thread wait state
  1512 		CancelTimerT();
  1410 		CancelTimerT();
  1513 	if (oldws64 & EWtStWaitActive)
  1411 	if (oldws64 & EWtStWaitActive)
  1514 		{
  1412 		{
  1515 		NThreadBase* t = Thread();
  1413 		NThreadBase* t = Thread();
  1516 		if (!t->iPauseCount && !t->iSuspended)
  1414 		if (!t->iPauseCount && !t->iSuspended)
  1517 			t->ReadyT(oldws64 & EWtStObstructed);
  1415 			t->ReadyT(0);
  1518 		}
  1416 		}
  1519 	return KErrNone;
  1417 	return KErrNone;
  1520 	}
  1418 	}
  1521 
  1419 
  1522 TUint32 NThreadWaitState::ReleaseT(TAny*& aWaitObj, TInt aReturnValue)
  1420 TUint32 NThreadWaitState::ReleaseT(TAny*& aWaitObj, TInt aReturnValue)
  1856 	aMutex->Signal();
  1754 	aMutex->Signal();
  1857 	NKern::Unlock();
  1755 	NKern::Unlock();
  1858 	}
  1756 	}
  1859 
  1757 
  1860 
  1758 
  1861 /** Changes the nominal priority of a thread.
       
  1862 
       
  1863 	This function is intended to be used by the EPOC layer and personality layers.
       
  1864 	Do not use this function directly on a Symbian OS thread - use Kern::ThreadSetPriority().
       
  1865 
       
  1866     @param aThread Thread to receive the new priority.
       
  1867     @param aPriority New inherited priority for aThread.
       
  1868     
       
  1869 	@see Kern::SetThreadPriority()
       
  1870 */
       
  1871 void NKern::ThreadSetNominalPriority(NThread* aThread, TInt aPriority)
       
  1872 	{
       
  1873 	NKern::Lock();
       
  1874 	aThread->SetNominalPriority(aPriority);
       
  1875 	NKern::Unlock();
       
  1876 	}
       
  1877 
       
  1878 
       
  1879 /** Atomically signals the request semaphore of a nanothread and a fast mutex.
  1759 /** Atomically signals the request semaphore of a nanothread and a fast mutex.
  1880 
  1760 
  1881 	This function is intended to be used by the EPOC layer and personality
  1761 	This function is intended to be used by the EPOC layer and personality
  1882 	layers.  Device drivers should use Kern::RequestComplete instead.
  1762 	layers.  Device drivers should use Kern::RequestComplete instead.
  1883 
  1763 
  2065 		{
  1945 		{
  2066 		NKern::Unlock();
  1946 		NKern::Unlock();
  2067 		return 1;
  1947 		return 1;
  2068 		}
  1948 		}
  2069 	pC->iFreezeCpu = 1;
  1949 	pC->iFreezeCpu = 1;
  2070 	__e32_atomic_add_rlx32(&ss.iDeferShutdown, 1);
       
  2071 	if (pC->iParent != pC)
  1950 	if (pC->iParent != pC)
  2072 		{
  1951 		{
  2073 		pC->AcqSLock();
  1952 		pC->AcqSLock();
  2074 		++pC->iParent->iFreezeCpu;
  1953 		++pC->iParent->iFreezeCpu;
  2075 		pC->RelSLock();
  1954 		pC->RelSLock();
  2105 				RescheduleNeeded();
  1984 				RescheduleNeeded();
  2106 			pC->RelSLock();
  1985 			pC->RelSLock();
  2107 			}
  1986 			}
  2108 		else if (pC->iCpuChange)		// deferred CPU change?
  1987 		else if (pC->iCpuChange)		// deferred CPU change?
  2109 			RescheduleNeeded();
  1988 			RescheduleNeeded();
  2110 		__e32_atomic_add_rlx32(&ss.iDeferShutdown, TUint32(-1));
       
  2111 		}
  1989 		}
  2112 	NKern::Unlock();
  1990 	NKern::Unlock();
  2113 	}
  1991 	}
  2114 
  1992 
  2115 
  1993 
  2120 	@param	The new CPU affinity mask
  1998 	@param	The new CPU affinity mask
  2121 	@return The old affinity mask
  1999 	@return The old affinity mask
  2122  */
  2000  */
  2123 EXPORT_C TUint32 NKern::ThreadSetCpuAffinity(NThread* aThread, TUint32 aAffinity)
  2001 EXPORT_C TUint32 NKern::ThreadSetCpuAffinity(NThread* aThread, TUint32 aAffinity)
  2124 	{
  2002 	{
  2125 	aThread->LAcqSLock();
  2003 	NKern::Lock();
  2126 	TUint32 r = aThread->SetCpuAffinityT(aAffinity);
  2004 	TUint32 r = aThread->SetCpuAffinity(aAffinity);
  2127 	aThread->RelSLockU();
  2005 	NKern::Unlock();
  2128 	return r;
  2006 	return r;
  2129 	}
  2007 	}
  2130 
  2008 
  2131 
  2009 
  2132 /** Modify a thread's timeslice
  2010 /** Modify a thread's timeslice
  2447 		pC->AcqSLock();
  2325 		pC->AcqSLock();
  2448 		ss.iReadyListLock.LockOnly();
  2326 		ss.iReadyListLock.LockOnly();
  2449 		pC->UnReadyT();
  2327 		pC->UnReadyT();
  2450 		pC->iParent = pC;
  2328 		pC->iParent = pC;
  2451 		g->iCurrent = 0;	// since current thread is no longer in g
  2329 		g->iCurrent = 0;	// since current thread is no longer in g
  2452 		TUint64 now = NKern::Timestamp();
  2330 		ss.AddHead(pC);
  2453 		g->iLastRunTime.i64 = now;
       
  2454 		g->iTotalCpuTime.i64 += (now - g->iLastStartTime.i64);
       
  2455 		if (--g->iActiveState == 0)
       
  2456 			{
       
  2457 			// group no longer active
       
  2458 			g->iTotalActiveTime.i64 += (now - g->iLastActivationTime.i64);
       
  2459 			}
       
  2460 		ss.SSAddEntryHead(pC);
       
  2461 		pC->iReady = TUint8(ss.iCpuNum | NSchedulable::EReadyOffset);
  2331 		pC->iReady = TUint8(ss.iCpuNum | NSchedulable::EReadyOffset);
  2462 		pC->iCpuAffinity = g->iCpuAffinity;	// keep same CPU affinity
  2332 		pC->iCpuAffinity = g->iCpuAffinity;	// keep same CPU affinity
  2463 		// if we're frozen, the group's freeze count was incremented
  2333 		// if we're frozen, the group's freeze count was incremented
  2464 		if (pC->iFreezeCpu)
  2334 		if (pC->iFreezeCpu)
  2465 			--g->iFreezeCpu;
  2335 			--g->iFreezeCpu;
  2479 				// we were the last thread in the group stopping it from moving
  2349 				// we were the last thread in the group stopping it from moving
  2480 				// but there may be no other threads left after UnReadyT'ing this one
  2350 				// but there may be no other threads left after UnReadyT'ing this one
  2481 				g->iCpuChange = FALSE;
  2351 				g->iCpuChange = FALSE;
  2482 				if (g->iReady)
  2352 				if (g->iReady)
  2483 					{
  2353 					{
  2484 					ss.SSRemoveEntry(g);
  2354 					ss.Remove(g);
  2485 					g->iReady = 0;
  2355 					g->iReady = 0;
  2486 					make_group_ready = TRUE;
  2356 					make_group_ready = TRUE;
  2487 					}
  2357 					}
  2488 				}
  2358 				}
  2489 			}
  2359 			}
  2521 	__ASSERT_WITH_MESSAGE_DEBUG(!pC->i_NThread_Initial, "Not idle thread", "NKern::JoinGroup");
  2391 	__ASSERT_WITH_MESSAGE_DEBUG(!pC->i_NThread_Initial, "Not idle thread", "NKern::JoinGroup");
  2522 	__NK_ASSERT_ALWAYS(pC->iParent==pC && !pC->iFreezeCpu);
  2392 	__NK_ASSERT_ALWAYS(pC->iParent==pC && !pC->iFreezeCpu);
  2523 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NJoinGroup %T->%G",pC,aGroup));
  2393 	__KTRACE_OPT(KNKERN,DEBUGPRINT("NJoinGroup %T->%G",pC,aGroup));
  2524 	pC->AcqSLock();
  2394 	pC->AcqSLock();
  2525 	aGroup->AcqSLock();
  2395 	aGroup->AcqSLock();
  2526 
  2396 	TBool migrate = !CheckCpuAgainstAffinity(ss.iCpuNum, aGroup->iCpuAffinity);	// TRUE if thread's current CPU is incompatible with the group's affinity
  2527 	// Check if current CPU is compatible with group's affinity
       
  2528 	TBool migrate = !CheckCpuAgainstAffinity(ss.iCpuNum, aGroup->iCpuAffinity);
       
  2529 	if (!aGroup->iReady || aGroup->iReady==pC->iReady)
  2397 	if (!aGroup->iReady || aGroup->iReady==pC->iReady)
  2530 		{
  2398 		{
  2531 		// group not ready or ready on this CPU
  2399 		// group not ready or ready on this CPU
  2532 		if (!migrate)
  2400 		if (!migrate)
  2533 			{
  2401 			{
  2536 			pC->iParent = aGroup;
  2404 			pC->iParent = aGroup;
  2537 			aGroup->iNThreadList.AddHead(pC);
  2405 			aGroup->iNThreadList.AddHead(pC);
  2538 			if (!aGroup->iReady)
  2406 			if (!aGroup->iReady)
  2539 				{
  2407 				{
  2540 				aGroup->iPriority = pC->iPriority;
  2408 				aGroup->iPriority = pC->iPriority;
  2541 				ss.SSAddEntryHead(aGroup);
  2409 				ss.AddHead(aGroup);
  2542 				aGroup->iReady = TUint8(ss.iCpuNum | NSchedulable::EReadyOffset);
  2410 				aGroup->iReady = TUint8(ss.iCpuNum | NSchedulable::EReadyOffset);
  2543 				}
  2411 				}
  2544 			else if (pC->iPriority > aGroup->iPriority)
  2412 			else if (pC->iPriority > aGroup->iPriority)
  2545 				ss.SSChgEntryP(aGroup, pC->iPriority);
  2413 				{
       
  2414 				ss.ChangePriority(aGroup, pC->iPriority);
       
  2415 				}
  2546 			pC->iReady = NSchedulable::EReadyGroup;
  2416 			pC->iReady = NSchedulable::EReadyGroup;
  2547 			aGroup->iCurrent = aGroup->iReady;
  2417 			aGroup->iCurrent = aGroup->iReady;
  2548 			ss.iReadyListLock.UnlockOnly();
  2418 			ss.iReadyListLock.UnlockOnly();
  2549 			++aGroup->iThreadCount;
  2419 			++aGroup->iThreadCount;
  2550 			TUint64 now = NKern::Timestamp();
       
  2551 			aGroup->iLastStartTime.i64 = now;
       
  2552 			if (++aGroup->iActiveState == 1)
       
  2553 				aGroup->iLastActivationTime.i64 = now;
       
  2554 			goto done;
  2420 			goto done;
  2555 			}
  2421 			}
  2556 		}
  2422 		}
  2557 	// this thread needs to migrate to another CPU
  2423 	// this thread needs to migrate to another CPU
  2558 	pC->iNewParent = aGroup;
  2424 	pC->iNewParent = aGroup;
  2576 	NKern::Unlock();
  2442 	NKern::Unlock();
  2577 	}
  2443 	}
  2578 
  2444 
  2579 
  2445 
  2580 /******************************************************************************
  2446 /******************************************************************************
  2581  * Iterable Doubly Linked List
       
  2582  ******************************************************************************/
       
  2583 TInt SIterDQIterator::Step(SIterDQLink*& aObj, TInt aMaxSteps)
       
  2584 	{
       
  2585 	if (aMaxSteps <= 0)
       
  2586 		aMaxSteps = KMaxCpus + 3;
       
  2587 	SIterDQLink* p = Next();
       
  2588 	SIterDQLink* q = p;
       
  2589 	__NK_ASSERT_DEBUG(p!=0);
       
  2590 	for(; p->IsIterator() && --aMaxSteps>0; p=p->Next())
       
  2591 		{}
       
  2592 	if (p->IsObject())
       
  2593 		{
       
  2594 		// found object
       
  2595 		Deque();
       
  2596 		InsertAfter(p);
       
  2597 		aObj = p;
       
  2598 		return KErrNone;
       
  2599 		}
       
  2600 	if (p->IsAnchor())
       
  2601 		{
       
  2602 		// reached end of list
       
  2603 		if (p != q)
       
  2604 			{
       
  2605 			Deque();
       
  2606 			InsertBefore(p);	// put at the end
       
  2607 			}
       
  2608 		aObj = 0;
       
  2609 		return KErrEof;
       
  2610 		}
       
  2611 	// Maximum allowed number of other iterators skipped
       
  2612 	Deque();
       
  2613 	InsertAfter(p);
       
  2614 	aObj = 0;
       
  2615 	return KErrGeneral;
       
  2616 	}
       
  2617 
       
  2618 
       
  2619 /******************************************************************************
       
  2620  * Priority Lists
  2447  * Priority Lists
  2621  ******************************************************************************/
  2448  ******************************************************************************/
  2622 
  2449 
  2623 #ifndef __PRI_LIST_MACHINE_CODED__
  2450 #ifndef __PRI_LIST_MACHINE_CODED__
  2624 /** Returns the priority of the highest priority item present on a priority list.
  2451 /** Returns the priority of the highest priority item present on a priority list.
  2625 
  2452 
  2626 	@return	The highest priority present or -1 if the list is empty.
  2453 	@return	The highest priority present or -1 if the list is empty.
  2627  */
  2454  */
  2628 EXPORT_C TInt TPriListBase::HighestPriority()
  2455 EXPORT_C TInt TPriListBase::HighestPriority()
  2629 	{
  2456 	{
       
  2457 //	TUint64 present = MAKE_TUINT64(iPresent[1], iPresent[0]);
       
  2458 //	return __e32_find_ms1_64(present);
  2630 	return __e32_find_ms1_64(iPresent64);
  2459 	return __e32_find_ms1_64(iPresent64);
  2631 	}
  2460 	}
  2632 
  2461 
  2633 
  2462 
  2634 /** Finds the highest priority item present on a priority list.
  2463 /** Finds the highest priority item present on a priority list.
  2733 
  2562 
  2734 /******************************************************************************
  2563 /******************************************************************************
  2735  * Generic IPIs
  2564  * Generic IPIs
  2736  ******************************************************************************/
  2565  ******************************************************************************/
  2737 
  2566 
       
  2567 TGenIPIList::TGenIPIList()
       
  2568 	:	iGenIPILock(TSpinLock::EOrderGenericIPIList)
       
  2569 	{
       
  2570 	}
       
  2571 
       
  2572 TGenIPIList GenIPIList;
       
  2573 
  2738 extern "C" {
  2574 extern "C" {
  2739 extern void send_generic_ipis(TUint32);
  2575 extern void send_generic_ipis(TUint32);
  2740 
  2576 
  2741 void generic_ipi_isr(TSubScheduler* aS)
  2577 void generic_ipi_isr(TSubScheduler* aS)
  2742 	{
  2578 	{
  2743 	TScheduler& s = TheScheduler;
       
  2744 	TGenericIPI* ipi = aS->iNextIPI;
  2579 	TGenericIPI* ipi = aS->iNextIPI;
  2745 	if (!ipi)
  2580 	if (!ipi)
  2746 		return;
  2581 		return;
  2747 	TUint32 m = aS->iCpuMask;
  2582 	TUint32 m = aS->iCpuMask;
  2748 	SDblQueLink* anchor = &s.iGenIPIList.iA;
  2583 	SDblQueLink* anchor = &GenIPIList.iA;
  2749 	while (ipi != anchor)
  2584 	while (ipi != anchor)
  2750 		{
  2585 		{
  2751 		__e32_atomic_and_acq32(&ipi->iCpusIn, ~m);
  2586 		__e32_atomic_and_acq32(&ipi->iCpusIn, ~m);
  2752 		(*ipi->iFunc)(ipi);
  2587 		(*ipi->iFunc)(ipi);
  2753 		TInt irq = s.iGenIPILock.LockIrqSave();
  2588 		TInt irq = GenIPIList.iGenIPILock.LockIrqSave();
  2754 		TGenericIPI* n = (TGenericIPI*)ipi->iNext;
  2589 		TGenericIPI* n = (TGenericIPI*)ipi->iNext;
  2755 		ipi->iCpusOut &= ~m;
  2590 		ipi->iCpusOut &= ~m;
  2756 		if (ipi->iCpusOut == 0)
  2591 		if (ipi->iCpusOut == 0)
  2757 			{
  2592 			{
  2758 			ipi->Deque();
  2593 			ipi->Deque();
  2762 		ipi = n;
  2597 		ipi = n;
  2763 		while (ipi!=anchor && !(ipi->iCpusIn & m))
  2598 		while (ipi!=anchor && !(ipi->iCpusIn & m))
  2764 			ipi = (TGenericIPI*)ipi->iNext;
  2599 			ipi = (TGenericIPI*)ipi->iNext;
  2765 		if (ipi == anchor)
  2600 		if (ipi == anchor)
  2766 			aS->iNextIPI = 0;
  2601 			aS->iNextIPI = 0;
  2767 		s.iGenIPILock.UnlockIrqRestore(irq);
  2602 		GenIPIList.iGenIPILock.UnlockIrqRestore(irq);
  2768 		}
  2603 		}
  2769 	}
  2604 	}
  2770 }
  2605 }
  2771 
  2606 
  2772 void TGenericIPI::Queue(TGenericIPIFn aFunc, TUint32 aCpuMask)
  2607 void TGenericIPI::Queue(TGenericIPIFn aFunc, TUint32 aCpuMask)
  2774 	__KTRACE_OPT(KSCHED2,DEBUGPRINT("GenIPI F=%08x M=%08x", aFunc, aCpuMask));
  2609 	__KTRACE_OPT(KSCHED2,DEBUGPRINT("GenIPI F=%08x M=%08x", aFunc, aCpuMask));
  2775 	iFunc = aFunc;
  2610 	iFunc = aFunc;
  2776 	TScheduler& s = TheScheduler;
  2611 	TScheduler& s = TheScheduler;
  2777 	TInt i;
  2612 	TInt i;
  2778 	TUint32 ipis = 0;
  2613 	TUint32 ipis = 0;
  2779 	TInt irq = s.iGenIPILock.LockIrqSave();
  2614 	TInt irq = GenIPIList.iGenIPILock.LockIrqSave();
  2780 	if (aCpuMask & 0x80000000u)
  2615 	if (aCpuMask & 0x80000000u)
  2781 		{
  2616 		{
  2782 		if (aCpuMask==0xffffffffu)
  2617 		if (aCpuMask==0xffffffffu)
  2783 			aCpuMask = s.iIpiAcceptCpus;
  2618 			aCpuMask = s.iActiveCpus2;
  2784 		else if (aCpuMask==0xfffffffeu)
  2619 		else if (aCpuMask==0xfffffffeu)
  2785 			aCpuMask = s.iIpiAcceptCpus &~ SubScheduler().iCpuMask;
  2620 			aCpuMask = s.iActiveCpus2 &~ SubScheduler().iCpuMask;
  2786 		else
  2621 		else
  2787 			aCpuMask = 0;
  2622 			aCpuMask = 0;
  2788 		}
  2623 		}
  2789 	iCpusIn = aCpuMask;
  2624 	iCpusIn = aCpuMask;
  2790 	iCpusOut = aCpuMask;
  2625 	iCpusOut = aCpuMask;
  2791 	if (!aCpuMask)
  2626 	if (!aCpuMask)
  2792 		{
  2627 		{
  2793 		s.iGenIPILock.UnlockIrqRestore(irq);
  2628 		GenIPIList.iGenIPILock.UnlockIrqRestore(irq);
  2794 		iNext = 0;
  2629 		iNext = 0;
  2795 		return;
  2630 		return;
  2796 		}
  2631 		}
  2797 	s.iGenIPIList.Add(this);
  2632 	GenIPIList.Add(this);
  2798 	for (i=0; i<s.iNumCpus; ++i)
  2633 	for (i=0; i<s.iNumCpus; ++i)
  2799 		{
  2634 		{
  2800 		if (!(aCpuMask & (1<<i)))
  2635 		if (!(aCpuMask & (1<<i)))
  2801 			continue;
  2636 			continue;
  2802 		TSubScheduler& ss = *s.iSub[i];
  2637 		TSubScheduler& ss = *s.iSub[i];
  2805 			ss.iNextIPI = this;
  2640 			ss.iNextIPI = this;
  2806 			ipis |= (1<<i);
  2641 			ipis |= (1<<i);
  2807 			}
  2642 			}
  2808 		}
  2643 		}
  2809 	send_generic_ipis(ipis);
  2644 	send_generic_ipis(ipis);
  2810 	s.iGenIPILock.UnlockIrqRestore(irq);
  2645 	GenIPIList.iGenIPILock.UnlockIrqRestore(irq);
  2811 	__KTRACE_OPT(KSCHED2,DEBUGPRINT("GenIPI ipis=%08x", ipis));
  2646 	__KTRACE_OPT(KSCHED2,DEBUGPRINT("GenIPI ipis=%08x", ipis));
  2812 	}
  2647 	}
  2813 
  2648 
  2814 void TGenericIPI::QueueAll(TGenericIPIFn aFunc)
  2649 void TGenericIPI::QueueAll(TGenericIPIFn aFunc)
  2815 	{
  2650 	{
  2844 	mb();
  2679 	mb();
  2845 	}
  2680 	}
  2846 
  2681 
  2847 /**	Stop all other CPUs
  2682 /**	Stop all other CPUs
  2848 
  2683 
  2849 Call with kernel unlocked, returns with kernel locked.
  2684 	Call with kernel locked
  2850 Returns mask of CPUs halted plus current CPU.
  2685 */
  2851 */
  2686 void TStopIPI::StopCPUs()
  2852 TUint32 TStopIPI::StopCPUs()
  2687 	{
  2853 	{
       
  2854 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"TStopIPI::StopCPUs()");
       
  2855 	TScheduler& s = TheScheduler;
       
  2856 	iFlag = 0;
  2688 	iFlag = 0;
  2857 	NKern::ThreadEnterCS();
       
  2858 
       
  2859 	// Stop any cores powering up or down for now
       
  2860 	// A core already on the way down will stop just before the transition to SHUTDOWN_FINAL
       
  2861 	// A core already on the way up will carry on powering up
       
  2862 	TInt irq = s.iGenIPILock.LockIrqSave();
       
  2863 	++s.iCCDeferCount;	// stops bits in iIpiAcceptCpus being cleared, but doesn't stop them being set
       
  2864 						// but iIpiAcceptCpus | s.iCpusComingUp is constant
       
  2865 	TUint32 act2 = s.iIpiAcceptCpus;		// CPUs still accepting IPIs
       
  2866 	TUint32 cu = s.iCpusComingUp;			// CPUs powering up
       
  2867 	s.iGenIPILock.UnlockIrqRestore(irq);
       
  2868 	TUint32 cores = act2 | cu;
       
  2869 	if (cu)
       
  2870 		{
       
  2871 		// wait for CPUs coming up to start accepting IPIs
       
  2872 		while (cores & ~s.iIpiAcceptCpus)
       
  2873 			{
       
  2874 			__snooze();	// snooze until cores have come up
       
  2875 			}
       
  2876 		}
       
  2877 	NKern::Lock();
       
  2878 	QueueAllOther(&Isr);	// send IPIs to all other CPUs
  2689 	QueueAllOther(&Isr);	// send IPIs to all other CPUs
  2879 	WaitEntry();			// wait for other CPUs to reach the ISR
  2690 	WaitEntry();			// wait for other CPUs to reach the ISR
  2880 	return cores;
  2691 	}
  2881 	}
  2692 
  2882 
       
  2883 
       
  2884 /**	Release the stopped CPUs
       
  2885 
       
  2886 Call with kernel locked, returns with kernel unlocked.
       
  2887 */
       
  2888 void TStopIPI::ReleaseCPUs()
  2693 void TStopIPI::ReleaseCPUs()
  2889 	{
  2694 	{
  2890 	__e32_atomic_store_rel32(&iFlag, 1);	// allow other CPUs to proceed
  2695 	iFlag = 1;				// allow other CPUs to proceed
  2891 	WaitCompletion();		// wait for them to finish with this IPI
  2696 	WaitCompletion();		// wait for them to finish with this IPI
  2892 	NKern::Unlock();
       
  2893 	TheScheduler.CCUnDefer();
       
  2894 	NKern::ThreadLeaveCS();
       
  2895 	}
  2697 	}
  2896 
  2698 
  2897 void TStopIPI::Isr(TGenericIPI* a)
  2699 void TStopIPI::Isr(TGenericIPI* a)
  2898 	{
  2700 	{
  2899 	TStopIPI* s = (TStopIPI*)a;
  2701 	TStopIPI* s = (TStopIPI*)a;
  2900 	while (!__e32_atomic_load_acq32(&s->iFlag))
  2702 	while (!s->iFlag)
  2901 		{
  2703 		{
  2902 		__chill();
  2704 		__chill();
  2903 		}
  2705 		}
  2904 	__e32_io_completion_barrier();
  2706 	}
  2905 	}
  2707 
  2906 
  2708 
  2907 
       
  2908 /******************************************************************************
       
  2909  * TCoreCycler - general method to execute something on all active cores
       
  2910  ******************************************************************************/
       
  2911 TCoreCycler::TCoreCycler()
       
  2912 	{
       
  2913 	iCores = 0;
       
  2914 	iG = 0;
       
  2915 	}
       
  2916 
       
  2917 void TCoreCycler::Init()
       
  2918 	{
       
  2919 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"TCoreCycler::Init()");
       
  2920 	TScheduler& s = TheScheduler;
       
  2921 	NKern::ThreadEnterCS();
       
  2922 	iG = NKern::LeaveGroup();
       
  2923 	NThread* t = NKern::CurrentThread();
       
  2924 	if (t->iCoreCycling)
       
  2925 		{
       
  2926 		__crash();
       
  2927 		}
       
  2928 	t->iCoreCycling = TRUE;
       
  2929 
       
  2930 	// Stop any cores powering up or down for now
       
  2931 	// A core already on the way down will stop just before the transition to SHUTDOWN_FINAL
       
  2932 	// A core already on the way up will carry on powering up
       
  2933 	TInt irq = s.iGenIPILock.LockIrqSave();
       
  2934 	++s.iCCDeferCount;	// stops bits in iIpiAcceptCpus being cleared, but doesn't stop them being set
       
  2935 						// but iIpiAcceptCpus | s.iCpusComingUp is constant
       
  2936 	TUint32 act2 = s.iIpiAcceptCpus;		// CPUs still accepting IPIs
       
  2937 	TUint32 cu = s.iCpusComingUp;			// CPUs powering up
       
  2938 	TUint32 gd = s.iCpusGoingDown;			// CPUs no longer accepting IPIs on the way down
       
  2939 	s.iGenIPILock.UnlockIrqRestore(irq);
       
  2940 	if (gd)
       
  2941 		{
       
  2942 		// wait for CPUs going down to reach INACTIVE state
       
  2943 		TUint32 remain = gd;
       
  2944 		FOREVER
       
  2945 			{
       
  2946 			TInt i;
       
  2947 			for (i=0; i<KMaxCpus; ++i)
       
  2948 				{
       
  2949 				if (remain & (1u<<i))
       
  2950 					{
       
  2951 					// platform specific function returns TRUE when core has detached from SMP cluster
       
  2952 					if (s.iSub[i]->Detached())
       
  2953 						remain &= ~(1u<<i);	// core is now down
       
  2954 					}
       
  2955 				}
       
  2956 			if (!remain)
       
  2957 				break;		// all done
       
  2958 			else
       
  2959 				{
       
  2960 				__snooze();	// snooze until cores have gone down
       
  2961 				}
       
  2962 			}
       
  2963 		}
       
  2964 	iCores = act2 | cu;
       
  2965 	if (cu)
       
  2966 		{
       
  2967 		// wait for CPUs coming up to start accepting IPIs
       
  2968 		while (iCores & ~s.iIpiAcceptCpus)
       
  2969 			{
       
  2970 			__snooze();	// snooze until cores have come up
       
  2971 			}
       
  2972 		}
       
  2973 	iFrz = NKern::FreezeCpu();
       
  2974 	if (iFrz)
       
  2975 		__crash();	// already frozen so won't be able to migrate :-(
       
  2976 	iInitialCpu = NKern::CurrentCpu();
       
  2977 	iCurrentCpu = iInitialCpu;
       
  2978 	iRemain = iCores;
       
  2979 	}
       
  2980 
       
  2981 TInt TCoreCycler::Next()
       
  2982 	{
       
  2983 	NThread* t = NKern::CurrentThread();
       
  2984 	if (iCores == 0)
       
  2985 		{
       
  2986 		Init();
       
  2987 		return KErrNone;
       
  2988 		}
       
  2989 	if (NKern::CurrentCpu() != iCurrentCpu)
       
  2990 		__crash();
       
  2991 	iRemain &= ~(1u<<iCurrentCpu);
       
  2992 	TInt nextCpu = iRemain ? __e32_find_ms1_32(iRemain) : iInitialCpu;
       
  2993 	if (nextCpu != iCurrentCpu)
       
  2994 		{
       
  2995 		NKern::JumpTo(nextCpu);
       
  2996 		iCurrentCpu = nextCpu;
       
  2997 		if (NKern::CurrentCpu() != iCurrentCpu)
       
  2998 			__crash();
       
  2999 		}
       
  3000 	if (iRemain)
       
  3001 		{
       
  3002 		return KErrNone;
       
  3003 		}
       
  3004 	NKern::EndFreezeCpu(iFrz);
       
  3005 	iCores = 0;
       
  3006 	TScheduler& s = TheScheduler;
       
  3007 	s.CCUnDefer();
       
  3008 	t->iCoreCycling = FALSE;
       
  3009 	if (iG)
       
  3010 		NKern::JoinGroup(iG);
       
  3011 	NKern::ThreadLeaveCS();
       
  3012 	return KErrEof;
       
  3013 	}