0
|
1 |
// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
// e32\kernel\arm\ckernel.cpp
|
|
15 |
//
|
|
16 |
//
|
|
17 |
|
|
18 |
#include <arm_mem.h>
|
|
19 |
#include <arm_vfp.h>
|
|
20 |
|
|
21 |
#define iMState iWaitLink.iSpare1
|
|
22 |
#define iExiting iWaitLink.iSpare2
|
|
23 |
|
|
24 |
GLREF_C void __ArmVectorReset();
|
|
25 |
GLREF_C void __ArmVectorUndef();
|
|
26 |
GLREF_C void __ArmVectorSwi();
|
|
27 |
GLREF_C void __ArmVectorAbortPrefetch();
|
|
28 |
GLREF_C void __ArmVectorAbortData();
|
|
29 |
GLREF_C void __ArmVectorReserved();
|
|
30 |
GLREF_C void __ArmVectorIrq();
|
|
31 |
GLREF_C void __ArmVectorFiq();
|
|
32 |
|
|
33 |
extern "C" void ExcFault(TAny* aExcInfo);
|
|
34 |
|
|
35 |
/********************************************
|
|
36 |
* Thread
|
|
37 |
********************************************/
|
|
38 |
|
|
39 |
DArmPlatThread::~DArmPlatThread()
|
|
40 |
{
|
|
41 |
DThread::Destruct();
|
|
42 |
}
|
|
43 |
|
|
44 |
TInt DArmPlatThread::Context(TDes8& aDes)
|
|
45 |
{
|
|
46 |
TArmRegSet& s=*(TArmRegSet*)aDes.Ptr();
|
|
47 |
aDes.SetLength(sizeof(s));
|
|
48 |
TInt r=KErrNone;
|
|
49 |
if (iThreadType!=EThreadUser || this==TheCurrentThread)
|
|
50 |
r=KErrAccessDenied;
|
|
51 |
else if (iExiting)
|
|
52 |
r=KErrDied;
|
|
53 |
else
|
|
54 |
{
|
|
55 |
TUint32 unused;
|
|
56 |
NKern::ThreadGetUserContext(&iNThread,&s,unused);
|
|
57 |
}
|
|
58 |
return r;
|
|
59 |
}
|
|
60 |
|
|
61 |
DProcess* P::NewProcess()
|
|
62 |
{
|
|
63 |
return new DArmPlatProcess;
|
|
64 |
}
|
|
65 |
|
|
66 |
#ifdef __CPU_HAS_VFP
|
|
67 |
#ifdef __VFP_V3
|
|
68 |
const TInt KVfpContextSize = (1 + (32 * 2)) * 4; // FPSCR + 32 dword registers
|
|
69 |
#else
|
|
70 |
const TInt KVfpContextSize = (3 + (16 * 2)) * 4; // FPSCR + FPINST + FPINST2 + 16 dword registers
|
|
71 |
#endif
|
|
72 |
#endif
|
|
73 |
TInt DArmPlatProcess::GetNewThread(DThread*& aThread, SThreadCreateInfo& aInfo)
|
|
74 |
//
|
|
75 |
// Create a new DArmPlatThread object
|
|
76 |
// If aThread=NULL on entry, allocate it on the kernel heap,
|
|
77 |
// otherwise do in-place construction.
|
|
78 |
//
|
|
79 |
{
|
|
80 |
DArmPlatThread* pT=(DArmPlatThread*)aThread;
|
|
81 |
if (!pT)
|
|
82 |
{
|
|
83 |
TInt size = sizeof(DArmPlatThread);
|
|
84 |
#ifdef __CPU_HAS_VFP
|
|
85 |
size += KVfpContextSize;
|
|
86 |
#endif
|
|
87 |
__KTRACE_OPT(KTHREAD,Kern::Printf("GetNewThread size=%04x",size));
|
|
88 |
pT = (DArmPlatThread*)Kern::AllocZ(size);
|
|
89 |
}
|
|
90 |
if (!pT)
|
|
91 |
return KErrNoMemory;
|
|
92 |
new (pT) DArmPlatThread;
|
|
93 |
aThread = pT;
|
|
94 |
pT->iOwningProcess=this;
|
|
95 |
|
|
96 |
#ifdef __CPU_HAS_VFP
|
|
97 |
pT->iNThread.iExtraContext = (TUint8*)pT + sizeof(DArmPlatThread);
|
|
98 |
*(TUint32*)(pT->iNThread.iExtraContext) = Arm::VfpDefaultFpScr;
|
|
99 |
// Inherit parent VFP FPSCR value if applicable
|
|
100 |
if ((TInt)aInfo.iType != (TInt)EThreadInitial)
|
|
101 |
{
|
|
102 |
if (pT->iOwningProcess == Kern::CurrentThread().iOwningProcess)
|
|
103 |
{
|
|
104 |
if (Arm::FpExc() & VFP_FPEXC_EN)
|
|
105 |
{
|
|
106 |
*(TUint32*)(pT->iNThread.iExtraContext) = Arm::FpScr() & VFP_FPSCR_MODE_MASK;
|
|
107 |
}
|
|
108 |
}
|
|
109 |
}
|
|
110 |
#endif
|
|
111 |
|
|
112 |
return KErrNone;
|
|
113 |
}
|
|
114 |
|
|
115 |
extern void DumpExcInfo(TArmExcInfo& a);
|
|
116 |
void DumpExcInfoX(TArmExcInfo& a)
|
|
117 |
{
|
|
118 |
DumpExcInfo(a);
|
|
119 |
NThread* nthread = NCurrentThread();
|
|
120 |
if (nthread == NULL)
|
|
121 |
Kern::Printf("No current thread");
|
|
122 |
else
|
|
123 |
{
|
|
124 |
DThread* thread = Kern::NThreadToDThread(NCurrentThread());
|
|
125 |
if (thread)
|
|
126 |
{
|
|
127 |
TFullName thread_name;
|
|
128 |
thread->TraceAppendFullName(thread_name, EFalse);
|
|
129 |
Kern::Printf("Thread full name=%S", &thread_name);
|
|
130 |
Kern::Printf("Thread ID=%d, KernCSLocked=%d",TheCurrentThread->iId,NKern::KernelLocked());
|
|
131 |
}
|
|
132 |
else
|
|
133 |
Kern::Printf("Thread N/A, KernCSLocked=%d",NKern::KernelLocked());
|
|
134 |
}
|
|
135 |
}
|
|
136 |
|
|
137 |
extern void DumpFullRegSet(SFullArmRegSet& a);
|
|
138 |
extern void GetUndefinedInstruction(TArmExcInfo* /*aContext*/);
|
|
139 |
extern void PushExcInfoOnUserStack(TArmExcInfo*, TInt);
|
|
140 |
extern "C" {
|
|
141 |
extern SFullArmRegSet DefaultRegSet;
|
|
142 |
}
|
|
143 |
|
|
144 |
#ifdef __CPU_HAS_VFP
|
|
145 |
GLREF_C TInt HandleVFPOperation(TAny* aPtr);
|
|
146 |
#endif
|
|
147 |
|
|
148 |
void Exc::Dispatch(TAny* aPtr, NThread*)
|
|
149 |
{
|
|
150 |
#ifdef __CPU_ARM_ABORT_MODEL_UPDATED
|
|
151 |
#error Processors implementing the 'Base Register Updated' Abort Model are no longer supported
|
|
152 |
#endif
|
|
153 |
|
|
154 |
TArmExcInfo* pR=(TArmExcInfo*)aPtr;
|
|
155 |
TInt mode=pR->iCpsr & EMaskMode;
|
|
156 |
|
|
157 |
TBool faultHandled = EFalse;
|
|
158 |
|
|
159 |
#ifdef __DEMAND_PAGING__
|
|
160 |
faultHandled |= M::DemandPagingFault(aPtr) == KErrNone;
|
|
161 |
#endif
|
|
162 |
|
|
163 |
if(!faultHandled)
|
|
164 |
faultHandled |= M::RamDefragFault(aPtr) == KErrNone;
|
|
165 |
|
|
166 |
if(faultHandled)
|
|
167 |
{
|
|
168 |
#ifdef __ATOMIC64_USE_SLOW_EXEC__
|
|
169 |
if (mode==ESvcMode && pR->iExcCode==EArmExceptionDataAbort && IsMagicAtomic64(pR->iR15))
|
|
170 |
{
|
|
171 |
// Magic atomic instruction so return to next instruction to stop any
|
|
172 |
// writes to memory being executed and ensure interrupts are enabled.
|
|
173 |
pR->iR15 += 4;
|
|
174 |
pR->iCpsr &= ~KAllInterruptsMask;
|
|
175 |
}
|
|
176 |
#endif
|
|
177 |
return;
|
|
178 |
}
|
|
179 |
|
|
180 |
if (mode==ESvcMode && pR->iExcCode==EArmExceptionDataAbort && IsMagic(pR->iR15))
|
|
181 |
{
|
|
182 |
// skip instruction that caused the exception, set the zero flag and place faulted address in r12
|
|
183 |
__KTRACE_OPT(KPANIC,DumpExcInfoX(*pR));
|
|
184 |
pR->iR15 += 4;
|
|
185 |
pR->iCpsr |= ECpuZf;
|
|
186 |
pR->iR12 = pR->iFaultAddress;
|
|
187 |
return;
|
|
188 |
}
|
|
189 |
|
|
190 |
DThread* pT=TheCurrentThread;
|
|
191 |
TExcTrap* xt=pT->iExcTrap;
|
|
192 |
if (xt)
|
|
193 |
{
|
|
194 |
__KTRACE_OPT(KPANIC,DumpExcInfoX(*pR));
|
|
195 |
// current thread wishes to handle exceptions
|
|
196 |
(*xt->iHandler)(xt,pT,aPtr);
|
|
197 |
}
|
|
198 |
|
|
199 |
if (NKern::HeldFastMutex()) // thread held fast mutex when exception occurred
|
|
200 |
Exc::Fault(aPtr);
|
|
201 |
|
|
202 |
#ifdef __CPU_HAS_VFP
|
|
203 |
if (pR->iExcCode==EArmExceptionUndefinedOpcode)
|
|
204 |
{
|
|
205 |
// Get the undefined instruction
|
|
206 |
GetUndefinedInstruction(pR);
|
|
207 |
|
|
208 |
const TUint32 opcode = pR->iFaultAddress;
|
|
209 |
TInt cpnum = -1;
|
|
210 |
|
|
211 |
#ifdef __SUPPORT_THUMB_INTERWORKING
|
|
212 |
if (!(pR->iCpsr & ECpuThumb)) {
|
|
213 |
#endif
|
|
214 |
// check for coprocessor instructions
|
|
215 |
// 10987654321098765432109876543210
|
|
216 |
// CDP: cond1110op1 CRn CRd cp_#op20CRm
|
|
217 |
// LDC: cond110PUNW1Rn CRd cp_#offset
|
|
218 |
// STC: cond110PUNW0Rn CRd cp_#offset
|
|
219 |
// MRC: cond1110op11CRn Rd cp_#op21CRm
|
|
220 |
// MCR: cond1110op10CRn Rd cp_#op21CRm
|
|
221 |
// ext: cond11000x0xRn CRd cp_#offset
|
|
222 |
//CDP2: 11111110xxxxxxxxxxxxxxxxxxx0xxxx
|
|
223 |
//LDC2: 1111110xxxx1xxxxxxxxxxxxxxxxxxxx
|
|
224 |
//STC2: 1111110xxxx0xxxxxxxxxxxxxxxxxxxx
|
|
225 |
//MRC2: 11111110xxx1xxxxxxxxxxxxxxx1xxxx
|
|
226 |
//MCR2: 11111110xxx0xxxxxxxxxxxxxxx1xxxx
|
|
227 |
//MRRC: cond11100101Rn Rd cp_#opc CRm
|
|
228 |
//MCRR: cond11100100Rn Rd cp_#opc CRm
|
|
229 |
//
|
|
230 |
//NEON data processing:
|
|
231 |
// 1111001xxxxxxxxxxxxxxxxxxxxxxxxx
|
|
232 |
//NEON element/structure load/store:
|
|
233 |
// 11110100xxx0xxxxxxxxxxxxxxxxxxxx
|
|
234 |
//
|
|
235 |
// Coprocessor instructions have 2nd hex digit (bits 24-27) C,D,E.
|
|
236 |
// The coprocessor number is in bits 8-11.
|
|
237 |
// NEON instructions have 1st hex digits F2, F3, or F4x where x is even
|
|
238 |
// No coprocessor number, route to cp 10
|
|
239 |
|
|
240 |
TUint32 hex2 = (opcode>>24) & 0x0f;
|
|
241 |
|
|
242 |
if (hex2==0xc || hex2==0xd || hex2==0xe)
|
|
243 |
cpnum=(opcode>>8)&0x0f;
|
|
244 |
#ifdef __CPU_ARMV7
|
|
245 |
else if ((opcode>>28)==0xf && (hex2==2 || hex2==3 || (hex2==4 && ((opcode>>20)&1)==0)))
|
|
246 |
cpnum=VFP_CPID_S;
|
|
247 |
#endif
|
|
248 |
|
|
249 |
#ifdef __SUPPORT_THUMB_INTERWORKING
|
|
250 |
}
|
|
251 |
#endif
|
|
252 |
#ifdef __CPU_ARMV7
|
|
253 |
else
|
|
254 |
{
|
|
255 |
// Check for coprocessor instructions (thumb mode, so only first halfword)
|
|
256 |
// 5432109876543210 5432109876543210
|
|
257 |
// CDP: 11101110op1 CRn CRd cp_#op20CRm
|
|
258 |
// LDC: 1110110PUNW1Rn CRd cp_#offset
|
|
259 |
// STC: 1110110PUNW0Rn CRd cp_#offset
|
|
260 |
// MRC: 11101110op11CRn Rd cp_#op21CRm
|
|
261 |
// MCR: 11101110op10CRn Rd cp_#op21CRm
|
|
262 |
// CDP2: 11111110xxxxxxxx xxxxxxxxxx0xxxxx
|
|
263 |
// LDC2: 1111110xxxx1xxxx xxxxxxxxxxxxxxxx
|
|
264 |
// STC2: 1111110xxxx0xxxx xxxxxxxxxxxxxxxx
|
|
265 |
// MRC2: 11111110xxx1xxxx xxxxxxxxxx1xxxxx
|
|
266 |
// MCR2: 11111110xxx0xxxx xxxxxxxxxx1xxxxx
|
|
267 |
// MRRC: 111011100101Rn Rd cp_#opc CRm
|
|
268 |
// MCRR: 111011100100Rn Rd cp_#opc CRm
|
|
269 |
//
|
|
270 |
// Operations starting 1111 are not currently valid for VFP/NEON
|
|
271 |
// but are handled here in case of future development or
|
|
272 |
// alternative coprocessors
|
|
273 |
//
|
|
274 |
// NEON data processing:
|
|
275 |
// 111x1111xxxxxxxx xxxxxxxxxxxxxxxx
|
|
276 |
// NEON element/structure load/store:
|
|
277 |
// 11111001xxx0xxxx xxxxxxxxxxxxxxxx
|
|
278 |
//
|
|
279 |
// Coprocessor instructions have first hex digit E or F
|
|
280 |
// and second C, D or E
|
|
281 |
// The coprocessor number is in bits 8-11 of the second halfword
|
|
282 |
// NEON instructions have first 2 hex digits EF, FF or F9
|
|
283 |
// No coprocessor number, route to cp 10
|
|
284 |
|
|
285 |
const TUint32 hex12 = opcode >> 8;
|
|
286 |
|
|
287 |
if ((hex12 & 0xe0) == 0xe0)
|
|
288 |
{
|
|
289 |
const TUint32 hex2 = hex12 & 0xf;
|
|
290 |
if (hex2 == 0xc || hex2 == 0xd || hex2 == 0xe)
|
|
291 |
{
|
|
292 |
TArmExcInfo nextInstruction = *pR;
|
|
293 |
nextInstruction.iR15 += 2;
|
|
294 |
GetUndefinedInstruction(&nextInstruction);
|
|
295 |
cpnum = (nextInstruction.iFaultAddress >> 8) & 0x0f;
|
|
296 |
}
|
|
297 |
else
|
|
298 |
{
|
|
299 |
if (hex12 == 0xef || hex12 == 0xf9 || hex12 == 0xff)
|
|
300 |
cpnum = VFP_CPID_S;
|
|
301 |
}
|
|
302 |
}
|
|
303 |
}
|
|
304 |
#endif // __CPU_ARMV7
|
|
305 |
|
|
306 |
if (cpnum >= 0)
|
|
307 |
{
|
|
308 |
__KTRACE_OPT(KEVENT,Kern::Printf("VFP Instruction %08x", opcode));
|
|
309 |
TInt r = HandleVFPOperation(pR);
|
|
310 |
if (r==KErrNone)
|
|
311 |
return;
|
|
312 |
__KTRACE_OPT(KEVENT,Kern::Printf("VFP Instruction returned %d", r));
|
|
313 |
}
|
|
314 |
}
|
|
315 |
#endif // __CPU_HAS_VFP
|
|
316 |
|
|
317 |
NKern::LockSystem();
|
|
318 |
if (pT->iThreadType==EThreadUser && mode==EUserMode)
|
|
319 |
{
|
|
320 |
TExcType type=(TExcType)12;
|
|
321 |
if (pT->IsExceptionHandled(type))
|
|
322 |
{
|
|
323 |
pT->iFlags |= KThreadFlagLastChance;
|
|
324 |
NKern::UnlockSystem();
|
|
325 |
|
|
326 |
// tweak context to call exception handler
|
|
327 |
PushExcInfoOnUserStack(pR, type);
|
|
328 |
pR->iR15 = (TUint32)pT->iOwningProcess->iReentryPoint;
|
|
329 |
pR->iR4 = KModuleEntryReasonException;
|
|
330 |
#ifdef __SUPPORT_THUMB_INTERWORKING
|
|
331 |
pR->iCpsr &= ~ECpuThumb;
|
|
332 |
#endif
|
|
333 |
return;
|
|
334 |
}
|
|
335 |
}
|
|
336 |
// Fault system before attempting to signal kernel event handler as going to
|
|
337 |
// crash anyway so no point trying to handle the event. Also, stops
|
|
338 |
// D_EXC hanging system on crash of critical/permanent thread.
|
|
339 |
if (pT->iFlags & (KThreadFlagSystemCritical|KThreadFlagSystemPermanent))
|
|
340 |
Exc::Fault(aPtr);
|
|
341 |
NKern::UnlockSystem();
|
|
342 |
|
|
343 |
TUint m = DKernelEventHandler::Dispatch(EEventHwExc, pR, NULL);
|
|
344 |
if (m & (TUint)DKernelEventHandler::EExcHandled)
|
|
345 |
return;
|
|
346 |
|
|
347 |
// __KTRACE_OPT(KPANIC,DumpExcInfoX(*pR));
|
|
348 |
DumpExcInfoX(*pR);
|
|
349 |
|
|
350 |
// panic current thread
|
|
351 |
K::PanicKernExec(ECausedException);
|
|
352 |
}
|
|
353 |
|
|
354 |
EXPORT_C void Exc::Fault(TAny* aExcInfo)
|
|
355 |
{
|
|
356 |
#ifdef __SMP__
|
|
357 |
TSubScheduler* ss = &SubScheduler();
|
|
358 |
if (!ss)
|
|
359 |
ss = &TheSubSchedulers[0];
|
|
360 |
ss->i_ExcInfo = aExcInfo;
|
|
361 |
SFullArmRegSet* a = (SFullArmRegSet*)ss->i_Regs;
|
|
362 |
if (!a)
|
|
363 |
a = &DefaultRegSet;
|
|
364 |
#else
|
|
365 |
TheScheduler.i_ExcInfo = aExcInfo;
|
|
366 |
SFullArmRegSet* a = (SFullArmRegSet*)TheScheduler.i_Regs;
|
|
367 |
#endif
|
|
368 |
if (aExcInfo)
|
|
369 |
{
|
|
370 |
Arm::SaveState(*a);
|
|
371 |
Arm::UpdateState(*a, *(TArmExcInfo*)aExcInfo);
|
|
372 |
}
|
|
373 |
TExcInfo e;
|
|
374 |
e.iCodeAddress = (TAny*)a->iN.iR15;
|
|
375 |
e.iDataAddress = (TAny*)a->iB[0].iDFAR;
|
|
376 |
e.iExtraData = (TInt)a->iB[0].iDFSR;
|
|
377 |
// __KTRACE_OPT(KPANIC,DumpExcInfoX(*pR));
|
|
378 |
DumpFullRegSet(*a);
|
|
379 |
TheSuperPage().iKernelExcId = a->iExcCode;
|
|
380 |
TheSuperPage().iKernelExcInfo = e;
|
|
381 |
Kern::Fault("Exception", K::ESystemException);
|
|
382 |
}
|
|
383 |
|
|
384 |
extern "C" void ExcFault(TAny* aExcInfo)
|
|
385 |
{
|
|
386 |
Exc::Fault(aExcInfo);
|
|
387 |
}
|
|
388 |
|
|
389 |
void DArmPlatThread::DoExit2()
|
|
390 |
{
|
|
391 |
#ifdef __CPU_HAS_VFP
|
|
392 |
for (TInt cpu = 0; cpu < NKern::NumberOfCpus(); cpu++)
|
|
393 |
{
|
|
394 |
// Ensure that if this thread object is re-used then it gets a fresh context
|
|
395 |
if (Arm::VfpThread[cpu] == &iNThread)
|
|
396 |
Arm::VfpThread[cpu] = NULL;
|
|
397 |
#ifndef __SMP__
|
|
398 |
Arm::ModifyFpExc(VFP_FPEXC_EX | VFP_FPEXC_EN, 0); // Disable VFP here for unicore
|
|
399 |
#endif
|
|
400 |
}
|
|
401 |
#endif
|
|
402 |
}
|
|
403 |
|
|
404 |
|
|
405 |
/** Sets the function used to handle bounces from VFP hardware.
|
|
406 |
|
|
407 |
Used by a VFP coprocessor support kernel extension to register its
|
|
408 |
bounce handler.
|
|
409 |
|
|
410 |
@publishedPartner
|
|
411 |
@released
|
|
412 |
*/
|
|
413 |
EXPORT_C void Arm::SetVfpBounceHandler(TVfpBounceHandler aHandler)
|
|
414 |
{
|
|
415 |
Arm::VfpBounceHandler = aHandler;
|
|
416 |
}
|
|
417 |
|
|
418 |
|
|
419 |
/** Sets the default value of FPSCR in the VFP hardware.
|
|
420 |
|
|
421 |
Used by a VFP coprocessor support kernel extension to enable a
|
|
422 |
better default mode than RunFast.
|
|
423 |
|
|
424 |
@publishedPartner
|
|
425 |
@released
|
|
426 |
*/
|
|
427 |
EXPORT_C void Arm::SetVfpDefaultFpScr(TUint32 aFpScr)
|
|
428 |
{
|
|
429 |
#ifdef __CPU_HAS_VFP
|
|
430 |
VfpDefaultFpScr = aFpScr;
|
|
431 |
#endif
|
|
432 |
}
|
|
433 |
|
|
434 |
|
|
435 |
#ifdef __CPU_HAS_VFP
|
|
436 |
|
|
437 |
#ifndef __SMP__
|
|
438 |
extern void DoSaveVFP(void*);
|
|
439 |
#endif
|
|
440 |
extern void DoRestoreVFP(const void*);
|
|
441 |
|
|
442 |
GLDEF_C TInt HandleVFPOperation(TAny* aPtr)
|
|
443 |
{
|
|
444 |
NThread* pC = NCurrentThread();
|
|
445 |
|
|
446 |
if (Arm::FpExc() & VFP_FPEXC_EN)
|
|
447 |
{
|
|
448 |
// Coprocessor already enabled so it must be a real exception
|
|
449 |
if (Arm::VfpBounceHandler)
|
|
450 |
return Arm::VfpBounceHandler((TArmExcInfo*)aPtr);
|
|
451 |
else
|
|
452 |
return KErrGeneral;
|
|
453 |
}
|
|
454 |
|
|
455 |
NKern::Lock();
|
|
456 |
|
|
457 |
// Enable access for this thread, clear any exceptional condition
|
|
458 |
TUint32 oldFpExc = Arm::ModifyFpExc(VFP_FPEXC_EX, VFP_FPEXC_EN);
|
|
459 |
|
|
460 |
#ifndef __SMP__
|
|
461 |
if (Arm::VfpThread[0] != pC)
|
|
462 |
{
|
|
463 |
// Only for unicore - SMP explicitly saves the current context and disables VFP
|
|
464 |
// when a thread is descheduled in case it runs on a different core next time
|
|
465 |
if (Arm::VfpThread[0])
|
|
466 |
{
|
|
467 |
DoSaveVFP(Arm::VfpThread[0]->iExtraContext);
|
|
468 |
Arm::VfpThread[0]->ModifyFpExc(VFP_FPEXC_EN, 0); // Take access away from previous thread
|
|
469 |
}
|
|
470 |
DoRestoreVFP(pC->iExtraContext); // Restore this thread's context
|
|
471 |
Arm::VfpThread[0] = pC;
|
|
472 |
}
|
|
473 |
#else
|
|
474 |
const TInt currentCpu = NKern::CurrentCpu();
|
|
475 |
if (Arm::VfpThread[currentCpu] != pC)
|
|
476 |
{
|
|
477 |
DoRestoreVFP(pC->iExtraContext); // Restore this thread's context
|
|
478 |
Arm::VfpThread[currentCpu] = pC;
|
|
479 |
}
|
|
480 |
#endif
|
|
481 |
|
|
482 |
// Put FPEXC back how it was in case there was a pending exception, but keep enable bit on
|
|
483 |
Arm::SetFpExc(oldFpExc | VFP_FPEXC_EN);
|
|
484 |
NKern::Unlock();
|
|
485 |
|
|
486 |
return KErrNone;
|
|
487 |
}
|
|
488 |
#endif // __CPU_HAS_VFP
|
|
489 |
|
|
490 |
#ifdef _DEBUG
|
|
491 |
extern "C" void __FaultIpcClientNotNull()
|
|
492 |
{
|
|
493 |
K::Fault(K::EIpcClientNotNull);
|
|
494 |
}
|
|
495 |
#endif
|