|
1 /* |
|
2 * Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
3 * All rights reserved. |
|
4 * This component and the accompanying materials are made available |
|
5 * under the terms of "Eclipse Public License v1.0" |
|
6 * which accompanies this distribution, and is available |
|
7 * at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
8 * |
|
9 * Initial Contributors: |
|
10 * Nokia Corporation - initial contribution. |
|
11 * |
|
12 * Contributors: |
|
13 * |
|
14 * Description: |
|
15 * os\kernelhwsrv\kernel\eka\drivers\power\smppower\idlehelper.cpp |
|
16 * Impelentation of helper classes required to implement CPU idle |
|
17 * functionality in a SMP BSP. |
|
18 * |
|
19 */ |
|
20 |
|
21 |
|
22 /** |
|
23 @file |
|
24 @prototype |
|
25 */ |
|
26 |
|
27 #include <kernel/arm/arm.h> |
|
28 #include <smppower/idlehelper.h> |
|
29 |
|
30 #ifdef __SMP__ |
|
31 //-/-/-/-/-/-/-/-/-/ class TIdleSupport/-/-/-/-/-/-/-/-/-/ |
|
32 |
|
33 TUint TIdleSupport::iGlobalIntDistAddress=0; |
|
34 TUint TIdleSupport::iBaseIntIfAddress=0; |
|
35 volatile TUint32* TIdleSupport::iTimerCount=0; |
|
36 volatile TUint32 TIdleSupport::iIdlingCpus=0; |
|
37 volatile TUint32 TIdleSupport::iAllEngagedCpusMask=0; |
|
38 volatile TUint32 TIdleSupport::iRousingCpus=0; |
|
39 volatile TUint32 TIdleSupport::iExitRequired=EFalse; |
|
40 |
|
41 /** |
|
42 Setup interrupt access for static library by setting up |
|
43 interrupt distributor and CPU interrupt interface addresses |
|
44 aGlobalIntDistAddress = interrupt distributor base address |
|
45 aBaseIntIfAddress = CPU interrupt base address |
|
46 aTimerCount = optional pointer to hw timer counter reg from bsp (only used for btrace) |
|
47 @pre |
|
48 */ |
|
49 |
|
50 void TIdleSupport::SetupIdleSupport(TUint32 aGlobalIntDistAddress, TUint32 aBaseIntIfAddress, TUint32* aTimerCount) |
|
51 { |
|
52 iGlobalIntDistAddress=aGlobalIntDistAddress; |
|
53 iBaseIntIfAddress=aBaseIntIfAddress; |
|
54 iTimerCount=aTimerCount; /*NULL by default*/ |
|
55 iAllEngagedCpusMask=AllCpusMask(); |
|
56 } |
|
57 /** |
|
58 Returns the current HW timer count reg value by default |
|
59 Only used for btrace. If this is not set NKern::FastCounter is |
|
60 returned. |
|
61 */ |
|
62 |
|
63 TUint32 TIdleSupport::GetTimerCount() |
|
64 { |
|
65 if(iTimerCount) |
|
66 return *iTimerCount; |
|
67 else |
|
68 return NKern::FastCounter(); |
|
69 } |
|
70 |
|
71 /** |
|
72 Returns TRUE if any interrupt is pending,FALSE otherwise |
|
73 */ |
|
74 |
|
75 TBool TIdleSupport::IsIntPending() |
|
76 { |
|
77 return (IntPending()!=KNoInterruptsPending); |
|
78 } |
|
79 |
|
80 /** |
|
81 Set the piroity of the Idle IPI to be the highest |
|
82 @pre |
|
83 */ |
|
84 |
|
85 void TIdleSupport::SetIdleIPIToHighestPriority() |
|
86 { |
|
87 // Set Idle IPI to highest priority |
|
88 NKern::ThreadEnterCS(); |
|
89 TInt frz = NKern::FreezeCpu(); |
|
90 __PM_IDLE_ASSERT_ALWAYS(!frz); |
|
91 TInt orig_cpu = NKern::CurrentCpu(); |
|
92 TInt ncpu = NKern::NumberOfCpus(); |
|
93 TInt cpu = orig_cpu; |
|
94 TUint32 orig_affinity = 0; |
|
95 do |
|
96 { |
|
97 TUint32 affinity = NKern::ThreadSetCpuAffinity(NKern::CurrentThread(), (TUint32)cpu); |
|
98 if (cpu == orig_cpu) |
|
99 { |
|
100 orig_affinity = affinity; |
|
101 NKern::EndFreezeCpu(frz); |
|
102 } |
|
103 TInt cpu_now = NKern::CurrentCpu(); |
|
104 __PM_IDLE_ASSERT_ALWAYS(cpu_now == cpu); |
|
105 |
|
106 // here we can set the priority of the IPI vector for each CPU in turn |
|
107 GicDistributor* theGIC = (GicDistributor*) TIdleSupport::iGlobalIntDistAddress; |
|
108 TUint8* priorities = (TUint8*) &(theGIC->iPriority); |
|
109 priorities[IDLE_WAKEUP_IPI_VECTOR]=0x0; |
|
110 __e32_io_completion_barrier(); |
|
111 if (++cpu == ncpu) |
|
112 cpu = 0; |
|
113 } while (cpu != orig_cpu); |
|
114 NKern::ThreadSetCpuAffinity(NKern::CurrentThread(), orig_affinity); |
|
115 NKern::ThreadLeaveCS(); |
|
116 } |
|
117 |
|
118 |
|
119 /** |
|
120 Atomically clears the current cpu idle mask bit to indicate current core has woken |
|
121 up from an interrupt or IPI. |
|
122 return TRUE only if all other cores are in idle and we were woken from an IPI from the last |
|
123 core going idle (otherwisw FALSE). |
|
124 aCpuMask- Bit mask with only current CPU bit set |
|
125 Normal usage:use in idle handler after waking from all cores down IPI |
|
126 |
|
127 @pre |
|
128 */ |
|
129 TBool TIdleSupport::ClearLocalAndCheckGlobalIdle(TUint32 aCpuMask) |
|
130 { |
|
131 return (__e32_atomic_and_ord32(&iIdlingCpus,~aCpuMask) & KGlobalIdleFlag); |
|
132 } |
|
133 |
|
134 |
|
135 /** |
|
136 Atomically sets the cpu bit rousing mask only to indicate current CPU has woken. |
|
137 return TRUE only if this is first CPU awake.(otherwise FALSE). |
|
138 aCMask- Bit mask with only current CPU bit set |
|
139 Normal usage: use in idle handler just after core is woken |
|
140 |
|
141 @pre */ |
|
142 |
|
143 |
|
144 TBool TIdleSupport::FirstCoreAwake(TUint32 aCMask) |
|
145 { |
|
146 //TInt c = NKern::CurrentCpu(); |
|
147 //TUint32 cMask = (1<<c);//only current cpu mask is set |
|
148 return (!__e32_atomic_ior_acq32(&iRousingCpus,aCMask)); |
|
149 } |
|
150 |
|
151 /** |
|
152 Sets the exit required flag in TIdleSupport. Exit required is |
|
153 normaly required be set if an interrupt is pending on a Core |
|
154 aBreakSyncPoint- TBreakableSyncPoint* that all cores were waiting on |
|
155 before interrupt occured. Normal usage: after interrupt pending check |
|
156 |
|
157 @pre */ |
|
158 |
|
159 void TIdleSupport::SetExitRequired(TBreakableSyncPoint* aBreakSyncPoint) |
|
160 { |
|
161 iExitRequired=ETrue; |
|
162 if(aBreakSyncPoint) |
|
163 aBreakSyncPoint->Break(); |
|
164 } |
|
165 |
|
166 /** |
|
167 Sets the exit required flag in TIdleSupport. Exit required is |
|
168 normaly required be set if an interrupt is pending on a Core |
|
169 aBreakSyncPoint- TBreakableSyncPoint that all cores were waiting on |
|
170 before interrupt occured. |
|
171 |
|
172 @pre */ |
|
173 |
|
174 TBool TIdleSupport::GetExitRequired() |
|
175 { |
|
176 return iExitRequired; |
|
177 } |
|
178 |
|
179 /** |
|
180 Resets all the control flags/syncpoints. This is normally done by the |
|
181 last core when all cores are confirmed to be idle. |
|
182 |
|
183 |
|
184 @pre */ |
|
185 |
|
186 void TIdleSupport::ResetLogic() |
|
187 { |
|
188 iIdlingCpus = 0; // clear idle CPUs |
|
189 iRousingCpus = 0; // clear rousing CPUs |
|
190 iExitRequired = EFalse; |
|
191 } |
|
192 |
|
193 |
|
194 /** |
|
195 mark a core as retired |
|
196 |
|
197 @pre called by idle handler |
|
198 */ |
|
199 void TIdleSupport::MarkCoreRetired(TUint32 aCpuMask) |
|
200 { |
|
201 __e32_atomic_and_rlx32(&iAllEngagedCpusMask,~aCpuMask); |
|
202 PMBTRACE4(KRetireCore,KRetireMarkCoreRetired,aCpuMask); |
|
203 } |
|
204 |
|
205 /** |
|
206 mark a core as enaged |
|
207 @pre called outside idle handler |
|
208 */ |
|
209 void TIdleSupport::MarkCoreEngaged(TUint32 aCpuMask) |
|
210 { |
|
211 __e32_atomic_ior_rlx32(&iAllEngagedCpusMask,aCpuMask); |
|
212 PMBTRACE4(KEngageCore,KEngageMarkCoreEngaged,aCpuMask); |
|
213 } |
|
214 |
|
215 /** |
|
216 Returns the current cpu idling bit mask |
|
217 @pre */ |
|
218 |
|
219 TUint32 TIdleSupport::GetCpusIdleMask() |
|
220 { |
|
221 return iIdlingCpus; |
|
222 } |
|
223 |
|
224 /** |
|
225 Returns address of enaged cpus mask, needed for synch point construction |
|
226 |
|
227 */ |
|
228 |
|
229 volatile TUint32* TIdleSupport::EngagedCpusMaskAddr() |
|
230 { |
|
231 return &iAllEngagedCpusMask; |
|
232 } |
|
233 |
|
234 /** |
|
235 Returns address of enaged cpus mask, needed for synch point construction |
|
236 |
|
237 */ |
|
238 |
|
239 TUint32 TIdleSupport::AllCpusMask() |
|
240 { |
|
241 return ((0x1<<NKern::NumberOfCpus())-1); |
|
242 } |
|
243 |
|
244 /** |
|
245 clears IPI and asserts so in |
|
246 @pre */ |
|
247 #ifdef _DEBUG |
|
248 void TIdleSupport::ClearIdleIPI() |
|
249 { |
|
250 __PM_IDLE_ASSERT_ALWAYS((DoClearIdleIPI()&0x1ff)==IDLE_WAKEUP_IPI_VECTOR); |
|
251 } |
|
252 #endif |
|
253 |
|
254 |
|
255 //-/-/-/-/-/-/-/-/-/ class TSyncPointBase /-/-/-/-/-/-/-/-/-/ |
|
256 TSyncPointBase::TSyncPointBase() |
|
257 :iStageAndCPUWaitingMask(0), |
|
258 iAllEnagedCpusMask(TIdleSupport::EngagedCpusMaskAddr()) |
|
259 { |
|
260 } |
|
261 |
|
262 |
|
263 #ifdef _DEBUG |
|
264 void TSyncPointBase::SignalAndWait(TUint32 aStage) |
|
265 { |
|
266 PMBTRACE8(KSyncPoint,KSignalAndWaitEntry,aStage,*iAllEnagedCpusMask); |
|
267 #else |
|
268 void TSyncPointBase::SignalAndWait() |
|
269 { |
|
270 #endif |
|
271 TInt c = NKern::CurrentCpu(); |
|
272 DoSW(1<<c); |
|
273 #ifdef _DEBUG |
|
274 PMBTRACE0(KSyncPoint,KSignalAndWaiteXit); |
|
275 #endif |
|
276 } |
|
277 |
|
278 |
|
279 /** |
|
280 Resets a syncpoint. |
|
281 No barriers are used in function so add them if required. For breakable synchpoints this must be called before sync point can be used, |
|
282 for normal syncpoints this must be called whenever a CPU gets enaged |
|
283 @pre Should be called from one CPU. |
|
284 */ |
|
285 void TSyncPointBase::Reset() |
|
286 { |
|
287 // Could assert it is already broken // not using atomics because this must be called from only one cpu before |
|
288 // and be synchronised |
|
289 iStageAndCPUWaitingMask = 0; |
|
290 } |
|
291 |
|
292 |
|
293 //-/-/-/-/-/-/-/-/-/ class TBreakableSyncPoint /-/-/-/-/-/-/-/-/-/ |
|
294 |
|
295 /** |
|
296 Breaks the sync point until it is reset again. Any attempt to wait on the point will return inmediatelly until the point is reset |
|
297 */ |
|
298 void TBreakableSyncPoint::Break() |
|
299 { |
|
300 __e32_atomic_ior_ord32(&iStageAndCPUWaitingMask,0x80000000); |
|
301 } |
|
302 |
|
303 |
|
304 #endif //__SMP__ |