kernel/eka/nkernsmp/arm/ncutils.cpp
branchRCL_3
changeset 44 3e88ff8f41d5
parent 43 c1f20ce4abcf
equal deleted inserted replaced
43:c1f20ce4abcf 44:3e88ff8f41d5
    20 #include <arm_scu.h>
    20 #include <arm_scu.h>
    21 #include <arm_tmr.h>
    21 #include <arm_tmr.h>
    22 #include <nk_irq.h>
    22 #include <nk_irq.h>
    23 
    23 
    24 extern "C" {
    24 extern "C" {
    25 extern TUint KernCoreStats_EnterIdle(TUint aCore);
    25 extern SVariantInterfaceBlock* VIB;
    26 extern void KernCoreStats_LeaveIdle(TInt aCookie,TUint aCore);
       
    27 
       
    28 extern void DetachComplete();
       
    29 extern void send_irq_ipi(TSubScheduler*, TInt);
       
    30 }
    26 }
    31 
       
    32 TInt ClockFrequenciesChanged();
       
    33 
       
    34 
    27 
    35 /******************************************************************************
    28 /******************************************************************************
    36  * Spin lock
    29  * Spin lock
    37  ******************************************************************************/
    30  ******************************************************************************/
    38 /** Create a spin lock
    31 /** Create a spin lock
    86 #endif
    79 #endif
    87 
    80 
    88 void NKern::Init0(TAny* a)
    81 void NKern::Init0(TAny* a)
    89 	{
    82 	{
    90 	__KTRACE_OPT(KBOOT,DEBUGPRINT("VIB=%08x", a));
    83 	__KTRACE_OPT(KBOOT,DEBUGPRINT("VIB=%08x", a));
    91 	SVariantInterfaceBlock* v = (SVariantInterfaceBlock*)a;
    84 	VIB = (SVariantInterfaceBlock*)a;
    92 	TheScheduler.iVIB = v;
    85 	__NK_ASSERT_ALWAYS(VIB && VIB->iVer==0 && VIB->iSize==sizeof(SVariantInterfaceBlock));
    93 	__NK_ASSERT_ALWAYS(v && v->iVer==0 && v->iSize==sizeof(SVariantInterfaceBlock));
    86 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iVer=%d iSize=%d", VIB->iVer, VIB->iSize));
    94 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iVer=%d iSize=%d", v->iVer, v->iSize));
    87 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iMaxCpuClock=%08x %08x", I64HIGH(VIB->iMaxCpuClock), I64LOW(VIB->iMaxCpuClock)));
    95 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iMaxCpuClock=%08x %08x", I64HIGH(v->iMaxCpuClock), I64LOW(v->iMaxCpuClock)));
    88 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iMaxTimerClock=%u", VIB->iMaxTimerClock));
    96 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iMaxTimerClock=%u", v->iMaxTimerClock));
    89 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iScuAddr=%08x", VIB->iScuAddr));
    97 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iScuAddr=%08x", v->iScuAddr));
    90 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iGicDistAddr=%08x", VIB->iGicDistAddr));
    98 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iGicDistAddr=%08x", v->iGicDistAddr));
    91 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iGicCpuIfcAddr=%08x", VIB->iGicCpuIfcAddr));
    99 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iGicCpuIfcAddr=%08x", v->iGicCpuIfcAddr));
    92 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iLocalTimerAddr=%08x", VIB->iLocalTimerAddr));
   100 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iLocalTimerAddr=%08x", v->iLocalTimerAddr));
       
   101 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iGlobalTimerAddr=%08x", v->iGlobalTimerAddr));
       
   102 
    93 
   103 	TScheduler& s = TheScheduler;
    94 	TScheduler& s = TheScheduler;
   104 	s.iSX.iScuAddr = (ArmScu*)v->iScuAddr;
    95 	s.i_ScuAddr = (TAny*)VIB->iScuAddr;
   105 	s.iSX.iGicDistAddr = (GicDistributor*)v->iGicDistAddr;
    96 	s.i_GicDistAddr = (TAny*)VIB->iGicDistAddr;
   106 	s.iSX.iGicCpuIfcAddr = (GicCpuIfc*)v->iGicCpuIfcAddr;
    97 	s.i_GicCpuIfcAddr = (TAny*)VIB->iGicCpuIfcAddr;
   107 	s.iSX.iLocalTimerAddr = (ArmLocalTimer*)v->iLocalTimerAddr;
    98 	s.i_LocalTimerAddr = (TAny*)VIB->iLocalTimerAddr;
   108 	s.iSX.iTimerMax = (v->iMaxTimerClock / 1);		// use prescaler value of 1
    99 	s.i_TimerMax = (TAny*)(VIB->iMaxTimerClock / 1);		// use prescaler value of 1
   109 #ifdef	__CPU_ARM_HAS_GLOBAL_TIMER_BLOCK
       
   110 	s.iSX.iGlobalTimerAddr = (ArmGlobalTimer*)v->iGlobalTimerAddr;
       
   111 	s.iSX.iGTimerFreqRI.Set(v->iGTimerFreqR);
       
   112 	v->iGTimerFreqR = 0;
       
   113 #endif
       
   114 
   100 
   115 	TInt i;
   101 	TInt i;
   116 	for (i=0; i<KMaxCpus; ++i)
   102 	for (i=0; i<KMaxCpus; ++i)
   117 		{
   103 		{
   118 		TSubScheduler& ss = TheSubSchedulers[i];
   104 		TSubScheduler& ss = TheSubSchedulers[i];
   119 		ss.iSSX.iCpuFreqRI.Set(v->iCpuFreqR[i]);
   105 		ss.i_TimerMultF = (TAny*)KMaxTUint32;
   120 		ss.iSSX.iTimerFreqRI.Set(v->iTimerFreqR[i]);
   106 		ss.i_TimerMultI = (TAny*)0x01000000u;
   121 
   107 		ss.i_CpuMult = (TAny*)KMaxTUint32;
   122 		v->iCpuFreqR[i] = 0;
   108 		ss.i_LastTimerSet = (TAny*)KMaxTInt32;
   123 		v->iTimerFreqR[i] = 0;
   109 		ss.i_TimestampError = (TAny*)0;
   124 		UPerCpuUncached* u = v->iUncached[i];
   110 		ss.i_TimerGap = (TAny*)16;
   125 		ss.iUncached = u;
   111 		ss.i_MaxCorrection = (TAny*)64;
   126 		u->iU.iDetachCount = 0;
   112 		VIB->iTimerMult[i] = (volatile STimerMult*)&ss.i_TimerMultF;
   127 		u->iU.iAttachCount = 0;
   113 		VIB->iCpuMult[i] = (volatile TUint32*)&ss.i_CpuMult;
   128 		u->iU.iPowerOffReq = FALSE;
       
   129 		u->iU.iDetachCompleteFn = &DetachComplete;
       
   130 		}
   114 		}
   131 	v->iFrqChgFn = &ClockFrequenciesChanged;
       
   132 	__e32_io_completion_barrier();
       
   133 	InterruptInit0();
   115 	InterruptInit0();
   134 	}
   116 	}
   135 
   117 
   136 /** Register the global IRQ handler
   118 /** Register the global IRQ handler
   137 	Called by the base port at boot time to bind the top level IRQ dispatcher
   119 	Called by the base port at boot time to bind the top level IRQ dispatcher
   167 	@param	aHandler The address of the top level FIQ dispatcher routine
   149 	@param	aHandler The address of the top level FIQ dispatcher routine
   168  */
   150  */
   169 EXPORT_C void Arm::SetFiqHandler(TLinAddr aHandler)
   151 EXPORT_C void Arm::SetFiqHandler(TLinAddr aHandler)
   170 	{
   152 	{
   171 	ArmInterruptInfo.iFiqHandler=aHandler;
   153 	ArmInterruptInfo.iFiqHandler=aHandler;
   172 	}
       
   173 
       
   174 /** Register the global Idle handler
       
   175 	Called by the base port at boot time to register a handler containing a pointer to
       
   176 	a function that is called by the Kernel when each core reaches idle.
       
   177 	Should not be called at any other time.
       
   178 
       
   179 	@param	aHandler Pointer to idle handler function
       
   180 	@param	aPtr Idle handler function argument
       
   181  */
       
   182 EXPORT_C void Arm::SetIdleHandler(TCpuIdleHandlerFn aHandler, TAny* aPtr)
       
   183 	{
       
   184 	ArmInterruptInfo.iCpuIdleHandler.iHandler = aHandler;
       
   185 	ArmInterruptInfo.iCpuIdleHandler.iPtr = aPtr;
       
   186 	ArmInterruptInfo.iCpuIdleHandler.iPostambleRequired = EFalse;
       
   187 	}
   154 	}
   188 
   155 
   189 extern void initialiseState(TInt aCpu, TSubScheduler* aSS);
   156 extern void initialiseState(TInt aCpu, TSubScheduler* aSS);
   190 
   157 
   191 void Arm::Init1Interrupts()
   158 void Arm::Init1Interrupts()
   262 TUint32 NKern::IdleGenerationCount()
   229 TUint32 NKern::IdleGenerationCount()
   263 	{
   230 	{
   264 	return TheScheduler.iIdleGenerationCount;
   231 	return TheScheduler.iIdleGenerationCount;
   265 	}
   232 	}
   266 
   233 
   267 void NKern::DoIdle()
   234 void NKern::Idle()
   268 	{
   235 	{
   269 	TScheduler& s = TheScheduler;
   236 	TScheduler& s = TheScheduler;
   270 	TSubScheduler& ss = SubScheduler();	// OK since idle thread locked to CPU
   237 	TSubScheduler& ss = SubScheduler();	// OK since idle thread locked to CPU
   271 	SPerCpuUncached* u0 = &((UPerCpuUncached*)ss.iUncached)->iU;
       
   272 	TUint32 m = ss.iCpuMask;
   238 	TUint32 m = ss.iCpuMask;
   273 	TUint32 retire = 0;
       
   274 	TBool global_defer = FALSE;
       
   275 	TBool event_kick = FALSE;
       
   276 	s.iIdleSpinLock.LockIrq();
   239 	s.iIdleSpinLock.LockIrq();
   277 	TUint32 orig_cpus_not_idle = __e32_atomic_and_acq32(&s.iCpusNotIdle, ~m);
   240 	TUint32 orig_cpus_not_idle = __e32_atomic_and_acq32(&s.iCpusNotIdle, ~m);
   278 	if (orig_cpus_not_idle == m)
   241 	if (orig_cpus_not_idle == m)
   279 		{
   242 		{
   280 		// all CPUs idle
   243 		// all CPUs idle
   290 			NKern::Lock();
   253 			NKern::Lock();
   291 			NKern::Unlock();	// process idle DFCs here
   254 			NKern::Unlock();	// process idle DFCs here
   292 			return;
   255 			return;
   293 			}
   256 			}
   294 		}
   257 		}
   295 	TBool shutdown_check = !((s.iThreadAcceptCpus|s.iCCReactivateCpus) & m);
       
   296 	if (shutdown_check)
       
   297 		{
       
   298 		// check whether this CPU is ready to be powered off
       
   299 		s.iGenIPILock.LockOnly();
       
   300 		ss.iEventHandlerLock.LockOnly();
       
   301 		if ( !((s.iThreadAcceptCpus|s.iCCReactivateCpus) & m) && !ss.iDeferShutdown && !ss.iNextIPI && !ss.iEventHandlersPending)
       
   302 			{
       
   303 			for(;;)
       
   304 				{
       
   305 				if (s.iCCDeferCount)
       
   306 					{
       
   307 					global_defer = TRUE;
       
   308 					break;
       
   309 					}
       
   310 				if (s.iPoweringOff)
       
   311 					{
       
   312 					// another CPU might be in the process of powering off
       
   313 					SPerCpuUncached* u = &((UPerCpuUncached*)s.iPoweringOff->iUncached)->iU;
       
   314 					if (u->iDetachCount == s.iDetachCount)
       
   315 						{
       
   316 						// still powering off so we must wait
       
   317 						global_defer = TRUE;
       
   318 						break;
       
   319 						}
       
   320 					}
       
   321 				TUint32 more = s.CpuShuttingDown(ss);
       
   322 				retire = SCpuIdleHandler::ERetire;
       
   323 				if (more)
       
   324 					retire |= SCpuIdleHandler::EMore;
       
   325 				s.iPoweringOff = &ss;
       
   326 				s.iDetachCount = u0->iDetachCount;
       
   327 				break;
       
   328 				}
       
   329 			}
       
   330 		ss.iEventHandlerLock.UnlockOnly();
       
   331 		s.iGenIPILock.UnlockOnly();
       
   332 		}
       
   333 	if (!retire && ss.iCurrentThread->iSavedSP)
       
   334 		{
       
   335 		// rescheduled between entry to NKern::Idle() and here
       
   336 		// go round again to see if any more threads to pull from other CPUs
       
   337 		__e32_atomic_ior_ord32(&s.iCpusNotIdle, m);	// we aren't idle after all
       
   338 		s.iIdleSpinLock.UnlockIrq();
       
   339 		return;
       
   340 		}
       
   341 	if (global_defer)
       
   342 		{
       
   343 		// Don't WFI if we're only waiting for iCCDeferCount to reach zero or for
       
   344 		// another CPU to finish powering down since we might not get another IPI.
       
   345 		__e32_atomic_ior_ord32(&s.iCpusNotIdle, m);	// we aren't idle after all
       
   346 		s.iIdleSpinLock.UnlockIrq();
       
   347 		__snooze();
       
   348 		return;
       
   349 		}
       
   350 
   258 
   351 	// postamble happens here - interrupts cannot be reenabled
   259 	// postamble happens here - interrupts cannot be reenabled
   352 	TUint32 arg = orig_cpus_not_idle & ~m;
       
   353 	if (arg == 0)
       
   354 		s.AllCpusIdle();
       
   355 	s.iIdleSpinLock.UnlockOnly();
   260 	s.iIdleSpinLock.UnlockOnly();
   356 
   261 	NKIdle(orig_cpus_not_idle & ~m);
   357 	TUint cookie = KernCoreStats_EnterIdle((TUint8)ss.iCpuNum);
       
   358 
       
   359 	arg |= retire;
       
   360 	NKIdle(arg);
       
   361 
   262 
   362 	// interrupts have not been reenabled
   263 	// interrupts have not been reenabled
   363 	s.iIdleSpinLock.LockOnly();
   264 	s.iIdleSpinLock.LockOnly();
   364 
   265 	__e32_atomic_ior_ord32(&s.iCpusNotIdle, m);
   365 	if (retire)
       
   366 		{
       
   367 		// we just came back from power down
       
   368 		SPerCpuUncached* u = &((UPerCpuUncached*)ss.iUncached)->iU;
       
   369 		u->iPowerOnReq = 0;
       
   370 		__e32_io_completion_barrier();
       
   371 		s.iGenIPILock.LockOnly();
       
   372 		ss.iEventHandlerLock.LockOnly();
       
   373 		s.iIpiAcceptCpus |= m;
       
   374 		s.iCCReactivateCpus |= m;
       
   375 		s.iCpusGoingDown &= ~m;
       
   376 		if (s.iPoweringOff == &ss)
       
   377 			s.iPoweringOff = 0;
       
   378 		if (ss.iEventHandlersPending)
       
   379 			event_kick = TRUE;
       
   380 		ss.iEventHandlerLock.UnlockOnly();
       
   381 		s.iGenIPILock.UnlockOnly();
       
   382 		}
       
   383 
       
   384 	TUint32 ci = __e32_atomic_ior_ord32(&s.iCpusNotIdle, m);
       
   385 	if (ArmInterruptInfo.iCpuIdleHandler.iPostambleRequired)
   266 	if (ArmInterruptInfo.iCpuIdleHandler.iPostambleRequired)
   386 		{
   267 		{
   387 		ArmInterruptInfo.iCpuIdleHandler.iPostambleRequired = FALSE;
   268 		ArmInterruptInfo.iCpuIdleHandler.iPostambleRequired = FALSE;
   388 		NKIdle(ci|m|SCpuIdleHandler::EPostamble);
   269 		NKIdle(-1);
   389 		}
       
   390 	if (ci == 0)
       
   391 		s.FirstBackFromIdle();
       
   392 
       
   393 	KernCoreStats_LeaveIdle(cookie, (TUint8)ss.iCpuNum);
       
   394 
       
   395 	if (retire)
       
   396 		{
       
   397 		s.iCCReactivateDfc.RawAdd();	// kick load balancer to give us some work
       
   398 		if (event_kick)
       
   399 			send_irq_ipi(&ss, EQueueEvent_Kick);	// so that we will process pending events
       
   400 		}
   270 		}
   401 	s.iIdleSpinLock.UnlockIrq();	// reenables interrupts
   271 	s.iIdleSpinLock.UnlockIrq();	// reenables interrupts
   402 	}
   272 	}
   403 
   273 
   404 TBool TSubScheduler::Detached()
       
   405 	{
       
   406 	SPerCpuUncached* u = &((UPerCpuUncached*)iUncached)->iU;
       
   407 	return u->iDetachCount != u->iAttachCount;
       
   408 	}
       
   409 
       
   410 TBool TScheduler::CoreControlSupported()
       
   411 	{
       
   412 	return TheScheduler.iVIB->iCpuPowerUpFn != 0;
       
   413 	}
       
   414 
       
   415 void TScheduler::CCInitiatePowerUp(TUint32 aCores)
       
   416 	{
       
   417 	TCpuPowerUpFn pUp = TheScheduler.iVIB->iCpuPowerUpFn;
       
   418 	if (pUp && aCores)
       
   419 		{
       
   420 		TInt i;
       
   421 		for (i=0; i<KMaxCpus; ++i)
       
   422 			{
       
   423 			if (aCores & (1u<<i))
       
   424 				{
       
   425 				TSubScheduler& ss = TheSubSchedulers[i];
       
   426 				SPerCpuUncached& u = ((UPerCpuUncached*)ss.iUncached)->iU;
       
   427 				u.iPowerOnReq = TRUE;
       
   428 				__e32_io_completion_barrier();
       
   429 				pUp(i, &u);
       
   430 
       
   431 				// wait for core to reattach
       
   432 				while (u.iDetachCount != u.iAttachCount)
       
   433 					{
       
   434 					__snooze();
       
   435 					}
       
   436 				}
       
   437 			}
       
   438 		}
       
   439 	}
       
   440 
       
   441 void TScheduler::CCIndirectPowerDown(TAny*)
       
   442 	{
       
   443 	TCpuPowerDownFn pDown = TheScheduler.iVIB->iCpuPowerDownFn;
       
   444 	if (pDown)
       
   445 		{
       
   446 		TInt i;
       
   447 		for (i=0; i<KMaxCpus; ++i)
       
   448 			{
       
   449 			TSubScheduler& ss = TheSubSchedulers[i];
       
   450 			SPerCpuUncached& u = ((UPerCpuUncached*)ss.iUncached)->iU;
       
   451 			if (u.iPowerOffReq)
       
   452 				{
       
   453 				pDown(i, &u);
       
   454 				__e32_io_completion_barrier();
       
   455 				u.iPowerOffReq = FALSE;
       
   456 				__e32_io_completion_barrier();
       
   457 				}
       
   458 			}
       
   459 		}
       
   460 	}
       
   461 
       
   462 // Called on any CPU which receives an indirect power down IPI
       
   463 extern "C" void handle_indirect_powerdown_ipi()
       
   464 	{
       
   465 	TScheduler& s = TheScheduler;
       
   466 	TSubScheduler& ss = SubScheduler();
       
   467 	if (s.iIpiAcceptCpus & ss.iCpuMask)
       
   468 		s.iCCPowerDownDfc.Add();
       
   469 	}
       
   470 
   274 
   471 EXPORT_C TUint32 NKern::CpuTimeMeasFreq()
   275 EXPORT_C TUint32 NKern::CpuTimeMeasFreq()
   472 	{
   276 	{
   473 	return NKern::TimestampFrequency();
   277 	return NKern::TimestampFrequency();
   474 	}
   278 	}
   482  	@pre aMicroseconds should be nonnegative
   286  	@pre aMicroseconds should be nonnegative
   483 	@pre any context
   287 	@pre any context
   484  */
   288  */
   485 EXPORT_C TInt NKern::TimesliceTicks(TUint32 aMicroseconds)
   289 EXPORT_C TInt NKern::TimesliceTicks(TUint32 aMicroseconds)
   486 	{
   290 	{
   487 	TUint32 mf32 = TheScheduler.iSX.iTimerMax;
   291 	TUint32 mf32 = (TUint32)TheScheduler.i_TimerMax;
   488 	TUint64 mf(mf32);
   292 	TUint64 mf(mf32);
   489 	TUint64 ticks = mf*TUint64(aMicroseconds) + UI64LIT(999999);
   293 	TUint64 ticks = mf*TUint64(aMicroseconds) + UI64LIT(999999);
   490 	ticks /= UI64LIT(1000000);
   294 	ticks /= UI64LIT(1000000);
   491 	if (ticks > TUint64(TInt(KMaxTInt)))
   295 	if (ticks > TUint64(TInt(KMaxTInt)))
   492 		return KMaxTInt;
   296 		return KMaxTInt;
   493 	else
   297 	else
   494 		return (TInt)ticks;
   298 		return (TInt)ticks;
   495 	}
   299 	}
   496 
   300 
   497 
   301 
   498 #if defined(__NKERN_TIMESTAMP_USE_LOCAL_TIMER__)
       
   499 	// Assembler
       
   500 #elif defined(__NKERN_TIMESTAMP_USE_SCU_GLOBAL_TIMER__)
       
   501 	// Assembler
       
   502 #elif defined(__NKERN_TIMESTAMP_USE_INLINE_BSP_CODE__)
       
   503 #define __DEFINE_NKERN_TIMESTAMP_CPP__
       
   504 #include <variant_timestamp.h>
       
   505 #undef __DEFINE_NKERN_TIMESTAMP_CPP__
       
   506 #elif defined(__NKERN_TIMESTAMP_USE_BSP_CALLOUT__)
       
   507 	// Assembler
       
   508 #else
       
   509 #error No definition for NKern::Timestamp()
       
   510 #endif
       
   511 
       
   512 /** Get the frequency of counter queried by NKern::Timestamp().
   302 /** Get the frequency of counter queried by NKern::Timestamp().
   513 
   303 
   514 @publishedPartner
   304 @publishedPartner
   515 @prototype
   305 @prototype
   516 */
   306 */
   517 EXPORT_C TUint32 NKern::TimestampFrequency()
   307 EXPORT_C TUint32 NKern::TimestampFrequency()
   518 	{
   308 	{
   519 #if defined(__NKERN_TIMESTAMP_USE_LOCAL_TIMER__)
   309 	return (TUint32)TheScheduler.i_TimerMax;
   520 	// Use per-CPU local timer in Cortex A9 or ARM11MP
   310 	}
   521 	return TheScheduler.iSX.iTimerMax;
   311 
   522 #elif defined(__NKERN_TIMESTAMP_USE_SCU_GLOBAL_TIMER__)
       
   523 	// Use global timer in Cortex A9 r1p0
       
   524 	return TheScheduler.iSX.iTimerMax;
       
   525 #elif defined(__NKERN_TIMESTAMP_USE_INLINE_BSP_CODE__)
       
   526 	// Use code in <variant_timestamp.h> supplied by BSP
       
   527 	return KTimestampFrequency;
       
   528 #elif defined(__NKERN_TIMESTAMP_USE_BSP_CALLOUT__)
       
   529 	// Call function defined in variant
       
   530 #else
       
   531 #error No definition for NKern::TimestampFrequency()
       
   532 #endif
       
   533 	}
       
   534 
       
   535 /******************************************************************************
       
   536  * Notify frequency changes
       
   537  ******************************************************************************/
       
   538 
       
   539 struct SFrequencies
       
   540 	{
       
   541 	void Populate();
       
   542 	void Apply();
       
   543 	TBool AddToQueue();
       
   544 
       
   545 	SFrequencies*	iNext;
       
   546 	TUint32			iWhich;
       
   547 	SRatioInv		iNewCpuRI[KMaxCpus];
       
   548 	SRatioInv		iNewTimerRI[KMaxCpus];
       
   549 	SRatioInv		iNewGTimerRI;
       
   550 	NFastSemaphore*	iSem;
       
   551 
       
   552 	static SFrequencies* volatile Head;
       
   553 	};
       
   554 
       
   555 SFrequencies* volatile SFrequencies::Head;
       
   556 
       
   557 TBool SFrequencies::AddToQueue()
       
   558 	{
       
   559 	SFrequencies* h = Head;
       
   560 	do	{
       
   561 		iNext = h;
       
   562 		} while(!__e32_atomic_cas_rel_ptr(&Head, &h, this));
       
   563 	return !h;	// TRUE if list was empty
       
   564 	}
       
   565 
       
   566 
       
   567 void SFrequencies::Populate()
       
   568 	{
       
   569 	TScheduler& s = TheScheduler;
       
   570 	TInt cpu;
       
   571 	iWhich = 0;
       
   572 	SRatio* ri = (SRatio*)__e32_atomic_swp_ord_ptr(&s.iVIB->iGTimerFreqR, 0);
       
   573 	if (ri)
       
   574 		{
       
   575 		iNewGTimerRI.Set(ri);
       
   576 		iWhich |= 0x80000000u;
       
   577 		}
       
   578 	for (cpu=0; cpu<s.iNumCpus; ++cpu)
       
   579 		{
       
   580 		TSubScheduler& ss = *s.iSub[cpu];
       
   581 		ri = (SRatio*)__e32_atomic_swp_ord_ptr(&s.iVIB->iCpuFreqR[cpu], 0);
       
   582 		if (ri)
       
   583 			{
       
   584 			iNewCpuRI[cpu].Set(ri);
       
   585 			iWhich |= ss.iCpuMask;
       
   586 			}
       
   587 		ri = (SRatio*)__e32_atomic_swp_ord_ptr(&s.iVIB->iTimerFreqR[cpu], 0);
       
   588 		if (ri)
       
   589 			{
       
   590 			iNewTimerRI[cpu].Set(ri);
       
   591 			iWhich |= (ss.iCpuMask<<8);
       
   592 			}
       
   593 		}
       
   594 	}
       
   595 
       
   596 #if defined(__NKERN_TIMESTAMP_USE_SCU_GLOBAL_TIMER__)
       
   597 extern void ArmGlobalTimerFreqChg(const SRatioInv* /*aNewGTimerFreqRI*/);
       
   598 #endif
       
   599 
       
   600 void SFrequencies::Apply()
       
   601 	{
       
   602 	if (!iWhich)
       
   603 		return;
       
   604 	TScheduler& s = TheScheduler;
       
   605 	TStopIPI ipi;
       
   606 	TUint32 stopped = ipi.StopCPUs();
       
   607 	TInt cpu;
       
   608 	TUint32 wait = 0;
       
   609 	for (cpu=0; cpu<s.iNumCpus; ++cpu)
       
   610 		{
       
   611 		TSubScheduler& ss = *s.iSub[cpu];
       
   612 		TUint32 m = 1u<<cpu;
       
   613 		TUint32 m2 = m | (m<<8);
       
   614 		if (stopped & m)
       
   615 			{
       
   616 			// CPU is running so let it update
       
   617 			if (iWhich & m2)
       
   618 				{
       
   619 				if (iWhich & m)
       
   620 					ss.iSSX.iNewCpuFreqRI = &iNewCpuRI[cpu];
       
   621 				if (iWhich & (m<<8))
       
   622 					ss.iSSX.iNewTimerFreqRI = &iNewTimerRI[cpu];
       
   623 				ss.iRescheduleNeededFlag = 1;
       
   624 				wait |= m;
       
   625 				}
       
   626 			}
       
   627 		else
       
   628 			{
       
   629 			// CPU is not running so update directly
       
   630 			if (iWhich & m)
       
   631 				{
       
   632 				ss.iSSX.iCpuFreqRI = iNewCpuRI[cpu];
       
   633 				}
       
   634 			if (iWhich & (m<<8))
       
   635 				{
       
   636 				ss.iSSX.iTimerFreqRI = iNewTimerRI[cpu];
       
   637 				}
       
   638 			}
       
   639 		}
       
   640 #if defined(__NKERN_TIMESTAMP_USE_SCU_GLOBAL_TIMER__)
       
   641 	if (iWhich & 0x80000000u)
       
   642 		{
       
   643 		ArmGlobalTimerFreqChg(&iNewGTimerRI);
       
   644 		}
       
   645 #endif
       
   646 	ipi.ReleaseCPUs();	// this CPU handled here
       
   647 	while(wait)
       
   648 		{
       
   649 		cpu = __e32_find_ls1_32(wait);
       
   650 		TSubScheduler& ss = *s.iSub[cpu];
       
   651 		if (!ss.iSSX.iNewCpuFreqRI && !ss.iSSX.iNewTimerFreqRI)
       
   652 			wait &= ~ss.iCpuMask;
       
   653 		__chill();
       
   654 		}
       
   655 	}
       
   656 
       
   657 void TScheduler::DoFrequencyChanged(TAny*)
       
   658 	{
       
   659 	SFrequencies* list = (SFrequencies*)__e32_atomic_swp_ord_ptr(&SFrequencies::Head, 0);
       
   660 	if (!list)
       
   661 		return;
       
   662 	list->Populate();
       
   663 	list->Apply();
       
   664 	SFrequencies* rev = 0;
       
   665 	while (list)
       
   666 		{
       
   667 		SFrequencies* next = list->iNext;
       
   668 		list->iNext = rev;
       
   669 		rev = list;
       
   670 		list = next;
       
   671 		}
       
   672 	while (rev)
       
   673 		{
       
   674 		NFastSemaphore* s = rev->iSem;
       
   675 		rev = rev->iNext;
       
   676 		NKern::FSSignal(s);
       
   677 		}
       
   678 	}
       
   679 
       
   680 TInt ClockFrequenciesChanged()
       
   681 	{
       
   682 	TScheduler& s = TheScheduler;
       
   683 	NFastSemaphore sem(0);
       
   684 	SFrequencies f;
       
   685 	f.iSem = &sem;
       
   686 	NThread* ct = NKern::CurrentThread();
       
   687 	NThread* lbt = TScheduler::LBThread();
       
   688 	NKern::ThreadEnterCS();
       
   689 	TBool first = f.AddToQueue();
       
   690 	if (!lbt || lbt == ct)
       
   691 		TScheduler::DoFrequencyChanged(&s);
       
   692 	else if (first)
       
   693 		s.iFreqChgDfc.Enque();
       
   694 	NKern::FSWait(&sem);
       
   695 	NKern::ThreadLeaveCS();
       
   696 	return KErrNone;
       
   697 	}
       
   698