|
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32\nkern\arm\vectors.cia |
|
15 // |
|
16 // |
|
17 |
|
18 #define __INCLUDE_NTHREADBASE_DEFINES__ |
|
19 #include <e32cia.h> |
|
20 #include <arm.h> |
|
21 |
|
22 void FastMutexNestAttempt(); |
|
23 void FastMutexSignalError(); |
|
24 extern "C" void ExcFault(TAny*); |
|
25 |
|
26 #ifdef __CPU_HAS_MMU |
|
27 #define __USE_CP15_FAULT_INFO__ |
|
28 #endif |
|
29 |
|
30 #ifdef _DEBUG |
|
31 #define __CHECK_LOCK_STATE__ |
|
32 #endif |
|
33 |
|
34 //#define __FAULT_ON_FIQ__ |
|
35 |
|
36 #ifdef __CHECK_LOCK_STATE__ |
|
37 // Check that the kernel is unlocked, no fast mutexes are held and that the thread is not in a |
|
38 // critical section. Called when returning to user mode |
|
39 __NAKED__ void CheckLockState() |
|
40 { |
|
41 asm("stmfd sp!, {r14}"); |
|
42 asm("ldr r12, __TheScheduler "); |
|
43 asm("ldr r14, [r12, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); |
|
44 asm("cmp r14, #0 "); |
|
45 asm("movne r12, #0xdd000000 "); |
|
46 asm("strne r12, [r12, #1] "); |
|
47 asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); |
|
48 asm("ldr r14, [r12, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); |
|
49 asm("cmp r14, #0 "); |
|
50 __CPOPRET(eq, ""); |
|
51 asm("badLockState: "); |
|
52 asm("mov r12, #0xd7 "); |
|
53 asm("msr cpsr, r12 "); |
|
54 asm("mov r12, #0xdd000000 "); |
|
55 asm("str r12, [r12, #3] "); |
|
56 } |
|
57 #endif |
|
58 |
|
59 __ASSERT_COMPILE(EUserModeCallbackRun == 0); |
|
60 |
|
61 __NAKED__ void CallUserModeCallbacks() |
|
62 { |
|
63 // called with interrupts disabled |
|
64 // preserves r0 and r1 in additional to usual registers |
|
65 // leaves current thread in r2 |
|
66 // the vast majority of times this is called with zero or one callback pending |
|
67 |
|
68 asm(".global callUserModeCallbacks "); |
|
69 asm("callUserModeCallbacks: "); |
|
70 |
|
71 asm("ldr ip, __TheScheduler "); |
|
72 asm("ldr r2, [ip, #%a0]" : : "i" _FOFF(TScheduler, iCurrentThread)); |
|
73 |
|
74 asm("callUserModeCallbacks2: "); |
|
75 |
|
76 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
77 asm("mrc p15, 0, r12, c3, c0, 0 "); |
|
78 asm("tst r12, #0xc0000000 "); |
|
79 asm("cdpne p15, 0, c0, c0, c0, 0 "); |
|
80 #endif |
|
81 #ifdef __CHECK_LOCK_STATE__ |
|
82 asm("ldr ip, [r2, #%a0]" : : "i" _FOFF(NThread,iCsCount)); |
|
83 asm("cmp ip, #0 "); |
|
84 asm("bne badLockState "); |
|
85 #endif |
|
86 |
|
87 asm("ldr ip, [r2, #%a0]" : : "i" _FOFF(NThread, iUserModeCallbacks)); |
|
88 asm("teq ip, #0"); |
|
89 asm("bne 1f"); |
|
90 __JUMP(,lr); |
|
91 |
|
92 asm("1: "); |
|
93 asm("stmfd sp!, {r0-r2, r4-r11, lr}"); |
|
94 asm("movs r4, r3"); |
|
95 // if r3 != 0 it is the user context type to set the thread to |
|
96 asm("strneb r3, [r2, #%a0]" : : "i" _FOFF(NThread, iUserContextType)); |
|
97 |
|
98 // Remove first callback and enter critical section - we can just set iCsCount to 1 as we are |
|
99 // guaranteed not be be in a critical section already |
|
100 asm("ldmia ip, {r1, r3} "); // HARDCODED: TUserModeCallback layout |
|
101 asm("mov r0, #1"); |
|
102 asm("str r0, [r2, #%a0]" : : "i" _FOFF(NThread, iCsCount)); |
|
103 asm("str r1, [r2, #%a0]" : : "i" _FOFF(NThread,iUserModeCallbacks)); |
|
104 |
|
105 // Re-enable interrupts and call callback |
|
106 SET_MODE(r0, MODE_SVC, INTS_ALL_ON); |
|
107 asm("mov r1, #%a0 " : : "i" ((TInt)KUserModeCallbackUnqueued)); |
|
108 asm("str r1, [ip, #%a0]" : : "i" _FOFF(TUserModeCallback, iNext)); |
|
109 asm("mov r0, ip"); |
|
110 asm("mov r1, #0 "); // 0 == EUserModeCallbackRun |
|
111 __JUMPL(3); |
|
112 |
|
113 SET_MODE(r0, MODE_SVC, INTS_ALL_OFF); |
|
114 |
|
115 asm("movs r3, r4"); |
|
116 // Leave critical section, avoid calling NKern::ThreadLeaveCS unless we have to |
|
117 asm("ldmfd sp!, {r0-r2, r4-r11, lr}"); |
|
118 // reset user context type to undefined if r3 != 0 |
|
119 asm("mov ip, #%a0" : : "i" (NThread::EContextUndefined)); |
|
120 asm("strneb ip, [r2, #%a0]" : : "i" _FOFF(NThread, iUserContextType)); |
|
121 asm("ldr ip, [r2, #%a0]" : : "i" _FOFF(NThread, iCsFunction)); |
|
122 asm("teq ip, #0"); |
|
123 asm("streq ip, [r2, #%a0]" : : "i" _FOFF(NThread, iCsCount)); |
|
124 asm("beq callUserModeCallbacks2 "); |
|
125 |
|
126 asm("leaveCS:"); |
|
127 asm("sub sp, sp, #48 "); |
|
128 SET_MODE(r0, MODE_SVC, INTS_ALL_ON); |
|
129 asm("bl " CSM_ZN5NKern13ThreadLeaveCSEv); |
|
130 SET_MODE(r0, MODE_SVC, INTS_ALL_OFF); |
|
131 asm("ldmfd sp!, {r0-r2, r4-r11, lr}"); |
|
132 asm("b callUserModeCallbacks2 "); |
|
133 } |
|
134 |
|
135 /*************************************************************************** |
|
136 * SWI Handler |
|
137 ***************************************************************************/ |
|
138 |
|
139 extern "C" __NAKED__ void __ArmVectorSwi() |
|
140 { |
|
141 // IRQs disabled, FIQs enabled here |
|
142 asm("ldr r12, [lr, #-4] "); // get SWI opcode |
|
143 asm("stmfd sp!, {r11, lr} "); // save return address, r11 for 8 byte align |
|
144 USER_MEMORY_GUARD_ON_IF_MODE_USR(r11); |
|
145 asm("ldr r11, __TheScheduler "); |
|
146 asm("adr lr, fast_swi_exit "); |
|
147 asm("movs r12, r12, lsl #9 "); // 512*SWI number into r12 |
|
148 asm("bcc slow_swi "); // bit 23=0 for slow/unprot |
|
149 asm("ldr r1, [r11, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); |
|
150 asm("beq wait_for_any_request "); // special case for Exec::WaitForAnyRequest |
|
151 #ifdef __CPU_ARM_HAS_CPS |
|
152 asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iFastExecTable)); |
|
153 CPSIDIF; // all interrupts off |
|
154 asm("ldr r3, [r2], r12, lsr #7 "); // r3=limit, r2->dispatch table entry |
|
155 asm("ldr r2, [r2] "); // r2->kernel function |
|
156 asm("cmp r3, r12, lsr #9 "); // r3-SWI number |
|
157 __JUMP(hi, r2); // if SWI number valid, call kernel function |
|
158 #else |
|
159 SET_INTS(r2, MODE_SVC, INTS_ALL_OFF); |
|
160 asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iFastExecTable)); |
|
161 asm("ldr r3, [r2], r12, lsr #7 "); // r3=limit, r2->dispatch table entry |
|
162 asm("cmp r3, r12, lsr #9 "); // r3-SWI number |
|
163 asm("ldrhi pc, [r2] "); // if SWI number valid, call kernel function |
|
164 #endif |
|
165 asm("mvn r12, #0 "); // put invalid SWI number into r12 |
|
166 asm("b slow_swi "); // go through slow SWI routine to call invalid SWI handler |
|
167 |
|
168 asm("fast_swi_exit: "); |
|
169 #ifdef __CHECK_LOCK_STATE__ |
|
170 asm("mrs r12, spsr "); |
|
171 asm("tst r12, #0x0f "); |
|
172 asm("bleq " CSM_Z14CheckLockStatev); |
|
173 #endif |
|
174 USER_MEMORY_GUARD_OFF_IF_MODE_USR(r11); |
|
175 ERRATUM_353494_MODE_CHANGE(,r11); |
|
176 asm("ldmfd sp!, {r11, pc}^ "); // return and restore cpsr |
|
177 |
|
178 asm("slow_swi: "); // IRQs off, FIQs on here |
|
179 asm("stmfd sp!, {r3-r10} "); // save nonvolatile registers, r3 for 8 byte align |
|
180 asm("ldr r9, [r11, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r9->current thread |
|
181 SET_INTS(lr, MODE_SVC, INTS_ALL_ON); // all interrupts on |
|
182 asm("mov r10, r11 "); // r10->scheduler |
|
183 asm("ldr r4, [r9, #%a0]" : : "i" _FOFF(NThread,iSlowExecTable)); |
|
184 asm("mrs r11, spsr "); // spsr_svc into r11 |
|
185 asm("adr lr, slow_swi_exit "); |
|
186 asm("add r6, r4, r12, lsr #6 "); // r6->dispatch table entry |
|
187 asm("ldr r5, [r4, #-12] "); // r5=limit |
|
188 SET_INTS_1(r7, MODE_SVC, INTS_ALL_OFF); |
|
189 asm("cmp r5, r12, lsr #9 "); // r5-SWI number |
|
190 asm("ldmhiia r6, {r5,r6} "); // if SWI number OK, flags into r5, function addr into r6 |
|
191 asm("ldrls pc, [r4, #-8] "); // if SWI number invalid, call invalid handler |
|
192 asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagExtraArgMask)); // extra arguments needed? |
|
193 asm("addne r2, sp, #4 "); // if so, point r2 at saved registers on stack |
|
194 asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagClaim)); // claim system lock? |
|
195 asm("beq slow_swi_no_wait "); // skip if not |
|
196 |
|
197 SET_INTS_2(r7, MODE_SVC, INTS_ALL_OFF); // interrupts off |
|
198 #ifdef _DEBUG |
|
199 asm("ldr r12, [r9, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); |
|
200 asm("cmp r12, #0 "); |
|
201 asm("bne " CSM_Z20FastMutexNestAttemptv); // debug check that current thread doesn't already hold a fast mutex |
|
202 #endif |
|
203 asm("ldr r12, [r10, #%a0]!" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // r12=iLock.iHoldingThread |
|
204 SET_INTS_1(r7, MODE_SVC, INTS_ALL_ON); |
|
205 asm("cmp r12, #0 "); // is system lock already held? |
|
206 asm("bne ss_fast_mutex_held "); // branch if it is |
|
207 asm("ss_fast_mutex_obtained: "); |
|
208 asm("str r10, [r9, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=&iLock |
|
209 asm("str r9, [r10], #-%a0" : : "i" _FOFF(TScheduler,iLock)); // iLock.iHoldingThread=current thread, r10->scheduler |
|
210 #ifdef BTRACE_FAST_MUTEX |
|
211 asm("ldrb r12, [r10,#%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter))); |
|
212 asm("cmp r12, #0"); |
|
213 asm("bne syslock_trace_wait"); |
|
214 asm("syslock_trace_wait_done:"); |
|
215 #endif |
|
216 SET_INTS_2(r7, MODE_SVC, INTS_ALL_ON); // all interrupts on |
|
217 |
|
218 asm("slow_swi_no_wait: "); |
|
219 asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagPreprocess)); // preprocess (handle lookup)? can use r4, r7, r8, r12, r0 |
|
220 asm("mov lr, pc "); |
|
221 asm("ldrne pc, [r4, #-4] "); // call preprocess handler if required |
|
222 asm("mov lr, pc "); |
|
223 __JUMP(,r6); // call exec function, preserve r5,r11 if release syslock not required |
|
224 // preserve r5,r9,r10,r11 if release required |
|
225 asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagRelease)); // release system lock? |
|
226 asm("beq slow_swi_exit "); // skip if not |
|
227 |
|
228 SET_INTS(r12, MODE_SVC, INTS_ALL_OFF); // disable interrupts |
|
229 #ifdef _DEBUG |
|
230 asm("add r8, r10, #%a0" : : "i" _FOFF(TScheduler,iLock)); |
|
231 asm("ldr r12, [r9, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); |
|
232 asm("cmp r12, r8 "); |
|
233 asm("bne " CSM_Z20FastMutexSignalErrorv); // debug check that current thread holds system lock |
|
234 #endif |
|
235 #ifdef BTRACE_FAST_MUTEX |
|
236 asm("ldrb r12, [r10,#%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter))); |
|
237 asm("cmp r12, #0"); |
|
238 asm("bne syslock_trace_signal"); |
|
239 asm("syslock_trace_signal_done:"); |
|
240 #endif |
|
241 asm("mov r12, #0 "); |
|
242 asm("str r12, [r10, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // iLock.iHoldingThread=NULL |
|
243 asm("str r12, [r9, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=NULL |
|
244 asm("ldr r3, [r10, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting)); // r3=iLock.iWaiting |
|
245 asm("str r12, [r10, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting)); // iLock.iWaiting=0 |
|
246 SET_INTS_1(r8, MODE_SVC, INTS_ALL_ON); |
|
247 asm("cmp r3, #0 "); // check waiting flag |
|
248 asm("bne ss_signal_check "); // branch if set |
|
249 asm("ss_signal_done: "); |
|
250 SET_INTS_2(r8, MODE_SVC, INTS_ALL_ON); // otherwise reenable interrupts |
|
251 |
|
252 asm("slow_swi_exit: "); |
|
253 #ifdef __CHECK_LOCK_STATE__ |
|
254 asm("tst r11, #0x0f "); |
|
255 asm("bleq " CSM_Z14CheckLockStatev); |
|
256 #endif |
|
257 SET_INTS(r12, MODE_SVC, INTS_ALL_OFF); // disable interrupts |
|
258 asm("msr spsr, r11 "); // restore spsr_svc |
|
259 asm("tst r11, #0x0f "); |
|
260 asm("mov r3, #0 "); |
|
261 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571622_FIXED) |
|
262 asm("nop "); // ARM Cortex-A9 MPCore erratum 571622 workaround |
|
263 // Insert nops so branch doesn't occur in 2nd or 3rd position after a msr spsr |
|
264 #endif |
|
265 asm("bleq callUserModeCallbacks "); // call user-mode callbacks |
|
266 USER_MEMORY_GUARD_OFF_IF_MODE_USR(r11); |
|
267 ERRATUM_353494_MODE_CHANGE(,r11); |
|
268 asm("ldmfd sp!, {r3-r11,pc}^ "); // return from EXEC function |
|
269 |
|
270 // Come here if we need to wait for the system lock |
|
271 // r9->current thread, r10=&iLock, r12=iLock.iHoldingThread |
|
272 asm("ss_fast_mutex_held: "); |
|
273 asm("mov r8, #1 "); |
|
274 asm("str r8, [r10, #%a0]" : : "i" (_FOFF(TScheduler,iKernCSLocked)-_FOFF(TScheduler,iLock))); // lock the kernel |
|
275 SET_INTS_2(r7, MODE_SVC, INTS_ALL_ON); // enable interrupts |
|
276 asm("str r8, [r10, #4] "); // iWaiting=1 |
|
277 asm("str r10, [r9, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // current thread->iWaitFastMutex=&iLock |
|
278 asm("stmfd sp!, {r0-r3} "); // save exec call arguments |
|
279 asm("mov r0, r12 "); // parameter for YieldTo |
|
280 ASM_DEBUG1(NKFMWaitYield,r0); |
|
281 asm("bl " CSM_ZN10TScheduler7YieldToEP11NThreadBase); // yield to the mutex holding thread |
|
282 // will not return until the mutex is free |
|
283 // on return r0=Scheduler,r1=0,r2!=0,r3=current thread, kernel unlocked, interrupts disabled |
|
284 asm("str r1, [r9, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // iWaitFastMutex=NULL |
|
285 asm("ldmfd sp!, {r0-r3} "); // retrieve exec call arguments |
|
286 asm("b ss_fast_mutex_obtained "); // branch back to main code path |
|
287 |
|
288 // Come here if we need to reschedule after releasing the system lock |
|
289 // kernel unlocked, interrupts enabled, r0 contains return value from Exec call |
|
290 // r9->current thread, r10=&TheScheduler, r3=1, r8=0x13 |
|
291 asm("ss_signal_check: "); |
|
292 asm("str r3, [r10, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel (assumes iWaiting always 0 or 1) |
|
293 SET_INTS_2(r8, MODE_SVC, INTS_ALL_ON); // reenable interrupts |
|
294 asm("strb r3, [r10, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); |
|
295 asm("ldr r3, [r9, #%a0]" : : "i" _FOFF(NThread,iCsFunction)); // r3=current thread->iCsFunction |
|
296 asm("ldr r2, [r9, #%a0]" : : "i" _FOFF(NThread,iCsCount)); // r2=current thread->iCsCount |
|
297 asm("mov r4, r0 "); // save return value |
|
298 asm("cmp r3, #0 "); // outstanding CS function? |
|
299 asm("beq 2f "); // branch if not |
|
300 asm("cmp r2, #0 "); // iCsCount!=0 ? |
|
301 asm("moveq r0, r9 "); // if iCsCount=0, DoCsFunction() |
|
302 asm("bleq " CSM_ZN11NThreadBase12DoCsFunctionEv); |
|
303 asm("2: "); |
|
304 asm("bl " CSM_ZN10TScheduler10RescheduleEv); // reschedule to allow waiting thread in |
|
305 asm("mov r0, r4 "); // recover return value |
|
306 asm("b ss_signal_done "); // branch back to main code path |
|
307 |
|
308 #ifdef BTRACE_FAST_MUTEX |
|
309 asm("syslock_trace_wait:"); |
|
310 asm("ldr r12, [sp,#9*4]"); // r12 = return address from SWI |
|
311 asm("mov r8, r3"); // save r3 |
|
312 asm("stmdb sp!,{r0-r2,r12}"); // 4th item on stack is PC value for trace |
|
313 asm("ldr r0, fmwait_trace_header"); |
|
314 asm("mov r2, r9"); // current thread |
|
315 asm("add r3, r10, #%a0" : : "i" _FOFF(TScheduler,iLock)); |
|
316 asm("mov lr, pc"); |
|
317 asm("ldr pc, [r10, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); |
|
318 asm("ldmia sp!,{r0-r2,r12}"); |
|
319 asm("mov r3, r8"); // restore r3 |
|
320 asm("b syslock_trace_wait_done"); |
|
321 |
|
322 asm("syslock_trace_signal:"); |
|
323 asm("ldr r12, [sp,#9*4]"); // r12 = return address from SWI |
|
324 asm("stmdb sp!,{r0-r2,r12}"); // 4th item on stack is PC value for trace |
|
325 asm("ldr r0, fmsignal_trace_header"); |
|
326 asm("mov r2, r9"); // current thread |
|
327 asm("add r3, r10, #%a0" : : "i" _FOFF(TScheduler,iLock)); |
|
328 asm("mov lr, pc"); |
|
329 asm("ldr pc, [r10, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); |
|
330 asm("ldmia sp!,{r0-r2,r12}"); |
|
331 asm("b syslock_trace_signal_done"); |
|
332 |
|
333 asm("fmsignal_trace_header:"); |
|
334 asm(".word %a0" : : "i" ((TInt)(16<<BTrace::ESizeIndex) + ((BTrace::EContextIdPresent|BTrace::EPcPresent) << BTrace::EFlagsIndex*8) + (BTrace::EFastMutex<< BTrace::ECategoryIndex*8) + (BTrace::EFastMutexSignal << BTrace::ESubCategoryIndex*8)) ); |
|
335 |
|
336 asm("fmwait_trace_header:"); |
|
337 asm(".word %a0" : : "i" ((TInt)(16<<BTrace::ESizeIndex) + ((BTrace::EContextIdPresent|BTrace::EPcPresent) << BTrace::EFlagsIndex*8) + (BTrace::EFastMutex << BTrace::ECategoryIndex*8) + (BTrace::EFastMutexWait << BTrace::ESubCategoryIndex*8)) ); |
|
338 #endif |
|
339 |
|
340 } |
|
341 |
|
342 /*************************************************************************** |
|
343 * IRQ Postamble |
|
344 * This routine is called after the IRQ has been dispatched |
|
345 * spsr_irq, r4-r11 are unmodified |
|
346 * spsr_irq,r0-r3,r12,return address are on the top of the IRQ stack |
|
347 ***************************************************************************/ |
|
348 |
|
349 extern "C" __NAKED__ void __ArmVectorIrq() |
|
350 { |
|
351 // FIQs enabled here but not IRQs |
|
352 asm("ldr r1, __TheScheduler "); |
|
353 asm("mrs r0, spsr "); // check interrupted mode |
|
354 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
355 asm("add r12, sp, #32 "); // r12=sp_irq+8 words |
|
356 #else |
|
357 asm("add r12, sp, #24 "); // r12=sp_irq+6 words |
|
358 #endif |
|
359 asm("and r2, r0, #0x1f "); |
|
360 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // r3=KernCSLocked |
|
361 asm("cmp r2, #0x10 "); // check for mode_usr |
|
362 asm("cmpne r2, #0x13 "); // or mode_svc |
|
363 asm("cmpeq r3, #0 "); // and then check if kernel locked |
|
364 asm("bne IrqExit0 "); // if wrong mode or locked, return immediately |
|
365 SET_INTS(r2, MODE_IRQ, INTS_ALL_OFF); // disable FIQs before we check for reschedule |
|
366 asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // r2=DfcPendingFlag/RescheduleNeededFlag |
|
367 asm("add r3, r3, #1 "); |
|
368 SET_MODE_1(lr, MODE_SVC, INTS_ALL_ON); |
|
369 asm("cmp r2, #0 "); // check if reschedule needed |
|
370 asm("beq IrqExit0 "); // if not, return immediately |
|
371 asm("str r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel |
|
372 SET_MODE_2(lr, MODE_SVC, INTS_ALL_ON); // mode_svc, interrupts back on |
|
373 |
|
374 asm("ldmdb r12!, {r1-r3} "); // move saved registers (r0-r3,r12,pc) over to mode_svc stack |
|
375 asm("stmfd sp!, {r1-r3} "); |
|
376 asm("ldmdb r12!, {r1-r3} "); |
|
377 asm("stmfd sp!, {r1-r3} "); |
|
378 asm("stmfd sp!, {r0,lr} "); // store lr_svc and interrupted cpsr on current mode_svc stack |
|
379 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
380 asm("ldmdb r12, {r1-r2} "); |
|
381 asm("stmfd sp!, {r1-r2} "); // move user guard over to mode_svc stack |
|
382 #endif |
|
383 |
|
384 SET_MODE_1(r2, MODE_SVC, INTS_ALL_ON); |
|
385 SET_MODE(lr, MODE_IRQ, INTS_IRQ_OFF); // mode_irq, IRQs off |
|
386 asm("add sp, r12, #24 "); // restore mode_irq stack balance |
|
387 SET_MODE_2(r2, MODE_SVC, INTS_ALL_ON); // back to mode_svc, IRQs on |
|
388 |
|
389 // reschedule - this also switches context if necessary |
|
390 // enter this function in mode_svc, interrupts on, kernel locked |
|
391 // exit this function in mode_svc, all interrupts off, kernel unlocked |
|
392 asm("irq_do_resched: "); |
|
393 asm("bl " CSM_ZN10TScheduler10RescheduleEv); |
|
394 asm(".global irq_resched_return "); |
|
395 asm("irq_resched_return: "); |
|
396 |
|
397 SET_MODE(r2, MODE_SVC, INTS_ALL_OFF); // all interrupts off |
|
398 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
399 asm("ldr r1, [sp, #8] " ); // get interrupted cpsr, don't unbalance stack |
|
400 #else |
|
401 asm("ldr r1, [sp] " ); // get interrupted cpsr, don't unbalance stack |
|
402 #endif |
|
403 #ifdef __CHECK_LOCK_STATE__ |
|
404 asm("mov r2, r12 "); |
|
405 asm("tst r1, #0x0f "); |
|
406 asm("bleq " CSM_Z14CheckLockStatev); |
|
407 asm("mov r12, r2 "); |
|
408 #endif |
|
409 asm("tst r1, #0x0f "); |
|
410 asm("mov r3, #%a0 " : : "i" (NThread::EContextUserIntrCallback)); |
|
411 asm("bleq callUserModeCallbacks "); // call user-mode callbacks |
|
412 |
|
413 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
414 asm("ldr r1, [sp], #8 "); |
|
415 USER_MEMORY_GUARD_RESTORE(r1,lr); |
|
416 #endif |
|
417 asm("ldmfd sp!, {r1, lr} "); // restore lr_svc |
|
418 asm("add sp, sp, #24 "); // restore mode_svc stack balance |
|
419 asm("mov r12, sp "); // r12=address of remaining saved registers |
|
420 |
|
421 SET_MODE(r2, MODE_IRQ, INTS_ALL_OFF); // back into mode_irq, all interrupts off |
|
422 |
|
423 asm("msr spsr, r1 "); // interrupted cpsr into spsr_irq |
|
424 ERRATUM_353494_MODE_CHANGE(,r12); |
|
425 asm("ldmdb r12, {r0-r3,r12,pc}^ "); // return from interrupt |
|
426 |
|
427 asm("IrqExit0: "); |
|
428 #ifdef __CHECK_LOCK_STATE__ |
|
429 asm("tst r0, #0x0f "); |
|
430 asm("bleq " CSM_Z14CheckLockStatev); |
|
431 #endif |
|
432 asm("IrqExit1: "); // entry point for __ArmVectorIrqPostambleNoResched() |
|
433 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
434 asm("ldr lr, [sp], #8 "); |
|
435 USER_MEMORY_GUARD_RESTORE(lr,r12); |
|
436 #endif |
|
437 #ifdef BTRACE_CPU_USAGE |
|
438 asm("ldrb r2, [r1,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter)); |
|
439 asm("mov r0, #%a0" : : "i" ((TInt)4 ) ); |
|
440 asm("add r0, r0, #%a0" : : "i" ((TInt)(BTrace::ECpuUsage<<BTrace::ECategoryIndex*8)+(BTrace::EIrqEnd<<BTrace::ESubCategoryIndex*8)) ); |
|
441 asm("cmp r2, #0"); |
|
442 asm("movne lr, pc"); |
|
443 asm("ldrne pc, [r1,#%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); |
|
444 #endif |
|
445 ERRATUM_353494_MODE_CHANGE(,r12); |
|
446 asm("ldmfd sp!, {r0-r3,r12,pc}^ "); // return from interrupt |
|
447 } |
|
448 |
|
449 /*************************************************************************** |
|
450 * IRQ Postamble which will not reschedule (can be returned to by co-resident OS). |
|
451 * This routine is called after the IRQ has been dispatched |
|
452 * spsr_irq, r4-r11 are unmodified |
|
453 * spsr_irq,r0-r3,r12,return address are on the top of the IRQ stack |
|
454 ***************************************************************************/ |
|
455 |
|
456 extern "C" EXPORT_C __NAKED__ void __ArmVectorIrqPostambleNoResched() |
|
457 { |
|
458 // FIQs enabled here but not IRQs |
|
459 asm("ldr r1, __TheScheduler "); |
|
460 asm("b IrqExit1 "); |
|
461 } |
|
462 |
|
463 |
|
464 /*************************************************************************** |
|
465 * FIQ Postamble |
|
466 * This routine is called after the FIQ has been dispatched |
|
467 * spsr_fiq, r0-r3 are unmodified |
|
468 * Return address is on the top of the FIQ stack |
|
469 ***************************************************************************/ |
|
470 |
|
471 extern "C" __NAKED__ void __ArmVectorFiq() |
|
472 { |
|
473 #ifdef __FAULT_ON_FIQ__ |
|
474 asm(".word 0xe7f10f10 "); |
|
475 #endif |
|
476 // IRQs and FIQs disabled here |
|
477 // r0-r7 are unaltered from when FIQ occurred |
|
478 asm("ldr r9, __TheScheduler "); |
|
479 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
480 asm("ldr r12, [sp], #4 "); |
|
481 #endif |
|
482 asm("mrs r8, spsr "); // check interrupted mode |
|
483 asm("and r10, r8, #0x1f "); |
|
484 asm("cmp r10, #0x10 "); // check for mode_usr |
|
485 asm("ldr r11, [r9, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); |
|
486 asm("cmpne r10, #0x13 "); // or mode_svc |
|
487 asm("ldreq r10, [r9, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); |
|
488 asm("cmpeq r11, #0 "); // and check if kernel locked |
|
489 asm("bne FiqExit0 "); // if wrong mode or kernel locked, return immediately |
|
490 asm("cmp r10, #0 "); // check if reschedule needed |
|
491 asm("beq FiqExit0 "); // if not, return from interrupt |
|
492 // we interrupted mode_usr or mode_svc, kernel unlocked, reschedule needed |
|
493 asm("add r11, r11, #1 "); |
|
494 asm("str r11, [r9, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel |
|
495 asm("stmfd sp!, {r1-r3} "); // save interrupted r1-r3 on FIQ stack |
|
496 asm("mov r1, r8 "); // r1=interrupted cpsr |
|
497 asm("mov r3, sp "); // r3 points to saved registers |
|
498 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
499 asm("mov r2, r12 "); // saved DACR into R2 |
|
500 #endif |
|
501 SET_MODE(lr, MODE_SVC, INTS_ALL_ON); // switch to mode_svc, IRQs and FIQs back on |
|
502 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
503 asm("str r2, [sp, #-40]! "); // save DACR and leave room for spare, cpsr, lr_svc, r0-r3, r12, pc |
|
504 asm("ldr r2, [r3, #12] "); // r2=return address |
|
505 asm("str r12, [sp, #32] "); // save r12 on mode_svc stack |
|
506 asm("str r2, [sp, #36] "); // save return address on mode_svc stack |
|
507 asm("add r12, sp, #8 "); |
|
508 #else |
|
509 asm("ldr r2, [r3, #12] "); // r2=return address |
|
510 asm("sub sp, sp, #32 "); // make room for saved registers on mode_svc stack |
|
511 asm("str r12, [sp, #24] "); // save r12 on mode_svc stack |
|
512 asm("str r2, [sp, #28] "); // save return address on mode_svc stack |
|
513 asm("mov r12, sp "); |
|
514 #endif |
|
515 asm("stmia r12!, {r1,lr} "); // save interrupted cpsr and lr_svc |
|
516 asm("ldmia r3, {r1,r2,lr} "); // retrieve original r1-r3 from mode_fiq stack |
|
517 asm("stmia r12, {r0-r2,lr} "); // save original r0-r3 - saved register order is now cpsr,lr_svc,r0-r3,r12,pc |
|
518 SET_MODE_1(r2, MODE_SVC, INTS_ALL_ON); |
|
519 SET_MODE(lr, MODE_FIQ, INTS_ALL_OFF); // mode_fiq, IRQs and FIQs off |
|
520 asm("add sp, r3, #16 "); // restore mode_fiq stack balance |
|
521 SET_MODE_2(r2, MODE_SVC, INTS_ALL_ON); // back to mode_svc, IRQs on |
|
522 asm("adr lr, irq_resched_return "); |
|
523 asm("b " CSM_ZN10TScheduler10RescheduleEv); // do reschedule and return to irq_resched_return |
|
524 |
|
525 asm("FiqExit0:"); // also entry point for __ArmVectorFiqPostambleNoResched() |
|
526 USER_MEMORY_GUARD_RESTORE(r12,lr); |
|
527 #ifndef BTRACE_CPU_USAGE |
|
528 ERRATUM_353494_MODE_CHANGE(,r11); |
|
529 asm("ldmfd sp!, {pc}^ "); // return from interrupt |
|
530 #else |
|
531 asm("ldrb r8, [r9,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter)); |
|
532 asm("mov r10, #%a0" : : "i" ((TInt)(BTrace::ECpuUsage<<BTrace::ECategoryIndex*8)+(BTrace::EFiqEnd<<BTrace::ESubCategoryIndex*8)) ); |
|
533 asm("adr lr, FiqTraceExit0"); |
|
534 asm("cmp r8, #0"); |
|
535 ERRATUM_353494_MODE_CHANGE(eq,r8); |
|
536 asm("ldmeqfd sp!, {pc}^ "); // return from interrupt if trace not enabled |
|
537 asm("stmfd sp!, {r0-r3} "); |
|
538 asm("add r0, r10, #%a0" : : "i" ((TInt)4 ) ); |
|
539 asm("ldr pc, [r9,#%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); |
|
540 asm("FiqTraceExit0:"); |
|
541 ERRATUM_353494_MODE_CHANGE(,r3); |
|
542 asm("ldmfd sp!, {r0-r3,pc}^ "); // return from interrupt |
|
543 #endif |
|
544 |
|
545 asm("__TheScheduler: "); |
|
546 asm(".word TheScheduler "); |
|
547 } |
|
548 |
|
549 /*************************************************************************** |
|
550 * FIQ Postamble which will not reschedule (can be returned to by co-resident OS). |
|
551 * This routine is called after the FIQ has been dispatched |
|
552 * spsr_fiq, r0-r3 are unmodified |
|
553 * Return address is on the top of the FIQ stack |
|
554 ***************************************************************************/ |
|
555 |
|
556 extern "C" EXPORT_C __NAKED__ void __ArmVectorFiqPostambleNoResched() |
|
557 { |
|
558 #ifdef __FAULT_ON_FIQ__ |
|
559 asm(".word 0xe7f10f10 "); |
|
560 #endif |
|
561 // IRQs and FIQs disabled here |
|
562 // r0-r7 are unaltered from when FIQ occurred |
|
563 asm("ldr r9, __TheScheduler "); |
|
564 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
565 asm("ldr r12, [sp], #4 "); |
|
566 #endif |
|
567 asm("b FiqExit0 "); |
|
568 } |
|
569 |
|
570 |
|
571 extern "C" __NAKED__ void __ArmVectorAbortData() |
|
572 // |
|
573 // Data abort |
|
574 // |
|
575 { |
|
576 #if defined(__CPU_CORTEX_A8__) && (!defined(__CPU_ARM_A8_ERRATUM_447862_FIXED) || !defined(__CPU_ARM_A8_ERRATUM_451027_FIXED)) |
|
577 ARM_DMBSH; // ARM Cortex-A8 erratum 447862/451027 workaround |
|
578 #endif |
|
579 asm("sub lr, lr, #8"); // lr now points to aborted instruction |
|
580 asm("stmfd sp!, {r0-r4,r12,lr}"); // save it along with r0-r4,r12 |
|
581 #if defined(__CPU_ARM_HAS_WORKING_CLREX) |
|
582 CLREX // reset exclusive monitor |
|
583 #elif defined(__CPU_ARM_HAS_LDREX_STREX) |
|
584 STREX(12,0,13); // dummy STREX to reset exclusivity monitor |
|
585 #endif |
|
586 asm("mov r1, #%a0 " : : "i" ((TInt)EArmExceptionDataAbort)); |
|
587 // generic exception handler |
|
588 // come here with r1=exception code, lr points to aborted instruction, r0-r4,r12,lr saved |
|
589 asm("handle_exception: "); |
|
590 asm("mrs r0, spsr "); // r0=value of cpsr when abort occurred |
|
591 |
|
592 asm("handle_exception2: "); |
|
593 asm("mrs r12, cpsr "); |
|
594 asm("and r3, r0, #0x1f "); // r3=processor mode when abort occurred |
|
595 asm("bic r12, r12, #0xc0 "); |
|
596 asm("cmp r3, #0x10 "); // aborted in user mode? |
|
597 asm("cmpne r3, #0x13 "); // if not, aborted in mode_svc? |
|
598 asm("bne fatal_exception_mode "); // if neither, fault |
|
599 asm("msr cpsr, r12 "); // reenable interrupts - rescheduling disabled by mode_abt/mode_und |
|
600 asm("ldr r2, __TheScheduler "); |
|
601 asm("mov r3, sp "); // r3 points to saved registers |
|
602 asm("ldr r4, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); |
|
603 asm("cmp r4, #0 "); // exception with kernel locked? |
|
604 asm("bne fatal_exception_mode "); // if so, fault |
|
605 asm("add r4, r4, #1 "); // lock the kernel |
|
606 asm("str r4, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); |
|
607 asm("mov r4, #0x13 "); |
|
608 asm("msr cpsr, r4 "); // mode_svc, interrupts on, kernel locked |
|
609 |
|
610 asm("ldr r4, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); |
|
611 asm("tst r0, #0x0f "); // check if exception in mode_usr |
|
612 asm("mov r2, #%a0 " : : "i" ((TInt)NThread::EContextException)); |
|
613 asm("streqb r2, [r4, #%a0]" : : "i" _FOFF(NThread,iSpare3)); // if so, set iUserContextType = EContextException |
|
614 asm("add r4, r4, #%a0" : : "i" _FOFF(NThread,iStackBase)); |
|
615 asm("ldmia r4, {r2,r4} "); // r2=supervisor stack area base, r4=size |
|
616 asm("subs r2, sp, r2 "); // r2=amount of mode_svc stack remaining |
|
617 asm("blo fatal_exception_stack "); // if stack pointer invalid, fault |
|
618 asm("cmp r2, r4 "); |
|
619 asm("bhi fatal_exception_stack "); |
|
620 asm("cmp r2, #128 "); // check enough stack to handle exception |
|
621 asm("blo fatal_exception_stack "); // if not, fault |
|
622 |
|
623 // At this point we are in mode_svc with interrupts enabled and the kernel locked. |
|
624 // We know the supervisor stack is valid and has enough free space to store the exception info. |
|
625 // Registers: R0=aborted cpsr, R1=exception type, R2,R4 scratch, R3 points to saved registers |
|
626 // on mode_abt or mode_und stack, R12 holds mode of exception (mode_abt or mode_und). |
|
627 |
|
628 asm("ldr r4, [r3, #16] "); // restore original r4 |
|
629 asm("mov r2, sp "); // r2=sp_svc when abort occurred |
|
630 asm("sub sp, sp, #92 "); // push 23 words onto mode_svc stack |
|
631 asm("stmia sp, {r0-r2,r4-r11,lr} "); // save cpsr, exc id, sp_svc, r4-r11, lr_svc |
|
632 asm("ldmia r3!, {r4-r10} "); // get registers from mode_abt or mode_und stack |
|
633 asm("stmdb r2!, {r4-r7,r9,r10} "); // transfer saved registers from exception stack except r4 |
|
634 asm("stmdb r2, {r13,r14}^ "); // save sp_usr and lr_usr |
|
635 asm("sub r2, r2, #20 "); |
|
636 |
|
637 // Set r0 = fault address and r1 = fault status. |
|
638 // For prefetch aborts use IFAR if it exists otherwise use the return address. |
|
639 #ifdef __USE_CP15_FAULT_INFO__ |
|
640 asm("cmp r1, #%a0 " : : "i" ((TInt)EArmExceptionPrefetchAbort)); |
|
641 #ifdef __CPU_ARM_HAS_SPLIT_FSR |
|
642 asm("mrcne p15, 0, r1, c5, c0, 0"); // r1 = data fault status |
|
643 asm("mrcne p15, 0, r0, c6, c0, 0"); // r0 = DFAR fault address |
|
644 asm("mrceq p15, 0, r1, c5, c0, 1"); // r1 = instruction fault status |
|
645 #ifdef __CPU_ARM_HAS_CP15_IFAR |
|
646 asm("mrceq p15, 0, r0, c6, c0, 2"); // r0 = IFAR fault address |
|
647 #else |
|
648 asm("moveq r0, r10"); // r0 = return address. |
|
649 #endif // __CPU_ARM_HAS_CP15_IFAR |
|
650 #else |
|
651 asm("mrcne p15, 0, r0, c6, c0"); // r0 = fault address |
|
652 asm("moveq r0, r10"); // r0 = return address. |
|
653 asm("mrc p15, 0, r1, c5, c0"); // r1 = fault status |
|
654 #endif // __CPU_ARM_HAS_SPLIT_FSR |
|
655 #endif // __USE_CP15_FAULT_INFO__ |
|
656 |
|
657 asm("mrs r3, spsr "); // r3=spsr_svc |
|
658 asm("stmia r2, {r0,r1,r3} "); // save these |
|
659 asm("msr cpsr, r12 "); // back into exception mode |
|
660 asm("add sp, sp, #28 "); // restore exception stack balance |
|
661 asm("mov r5, #0x13 "); |
|
662 asm("msr cpsr, r5 "); // back into mode_svc |
|
663 |
|
664 // Now we can unlock the kernel and process the exception |
|
665 asm("bl " CSM_ZN10TScheduler10RescheduleEv); |
|
666 asm("msr cpsr, r5 "); // enable interrupts |
|
667 |
|
668 // call the exception dispatcher, r3 is the current thread |
|
669 asm("ldr r12, [r3, #%a0]" : : "i" _FOFF(NThread,iHandlers)); |
|
670 asm("mov r1, r3 "); |
|
671 asm("mov r0, sp "); // r0 points to saved exception information |
|
672 asm("sub sp, sp, #4 "); // make room for r0 |
|
673 asm("bic sp, sp, #4 "); // align stack to 8 byte boundary |
|
674 asm("str r0, [sp] "); // save original stack pointer |
|
675 |
|
676 USER_MEMORY_GUARD_ON(,r11,lr); |
|
677 asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(SNThreadHandlers,iExceptionHandler)); |
|
678 asm("mov lr, pc "); |
|
679 __JUMP(,r12); // call exception handler |
|
680 USER_MEMORY_GUARD_RESTORE(r11,lr); |
|
681 asm("ldr sp, [sp, #0] "); // restore stack pointer |
|
682 |
|
683 // return from exception |
|
684 asm("ldr r0, __TheScheduler "); |
|
685 asm("mov r3, sp "); |
|
686 asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); |
|
687 asm("ldr r0, [r3], #12 "); // r0=cpsr, skip exc id and sp_svc |
|
688 asm("ldmfd r3!, {r4-r11,lr} "); // restore r4-r11 and lr_svc |
|
689 asm("ldr r12, [r3, #8]! "); // skip fault address and fault status, r12=spsr_svc |
|
690 asm("ldmib r3, {r13,r14}^ "); // restore sp_usr and lr_usr |
|
691 asm("add r1, r3, #12 "); // r3 points to saved r0-r3,r12,pc |
|
692 asm("mov r3, #0xd3 "); |
|
693 asm("msr cpsr, r3 "); // mode_svc, all interrupts off |
|
694 asm("msr spsr, r12 "); // restore spsr_svc |
|
695 #ifdef __CHECK_LOCK_STATE__ |
|
696 asm("tst r0, #0x0f "); |
|
697 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571622_FIXED) |
|
698 asm("nop "); // ARM Cortex-A9 MPCore erratum 571622 workaround |
|
699 asm("nop "); // Insert nops so branch doesn't occur in 2nd or 3rd position after a msr spsr |
|
700 #endif |
|
701 asm("bleq " CSM_Z14CheckLockStatev); |
|
702 #endif |
|
703 asm("tst r0, #0x0f "); // check if exception in mode_usr |
|
704 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
705 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571622_FIXED) |
|
706 asm("nop "); // ARM Cortex-A9 MPCore erratum 571622 workaround |
|
707 asm("nop "); // Insert nops so branch doesn't occur in 2nd or 3rd position after a msr spsr |
|
708 #endif |
|
709 asm("bne 1f "); |
|
710 USER_MEMORY_GUARD_ON(,lr,r12); |
|
711 asm("tst lr, #0xc0000000 "); // user memory enabled? |
|
712 asm("adrne lr, 2f "); // yes - enable it after callbacks |
|
713 asm("adreq lr, 1f "); // no - leave it disabled after callbacks |
|
714 asm("mov r3, #0 "); |
|
715 asm("b callUserModeCallbacks2 "); // call user-mode callbacks |
|
716 asm("2: "); |
|
717 USER_MEMORY_GUARD_OFF(,lr,lr); |
|
718 asm("1: "); |
|
719 #else |
|
720 asm("mov r3, #0 "); |
|
721 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571622_FIXED) |
|
722 asm("nop "); // ARM Cortex-A9 MPCore erratum 571622 workaround |
|
723 // Insert nops so branch doesn't occur in 2nd or 3rd position after a msr spsr |
|
724 #endif |
|
725 asm("bleq callUserModeCallbacks2 "); // call user-mode callbacks |
|
726 #endif |
|
727 asm("tst r0, #0x0f "); // check if exception in mode_usr |
|
728 asm("mov r3, #%a0 " : : "i" ((TInt)NThread::EContextUndefined)); |
|
729 asm("streqb r3, [r2, #%a0]" : : "i" _FOFF(NThread,iSpare3)); // if so, set iUserContextType = EContextUndefined |
|
730 asm("add sp, r1, #24 "); // restore mode_svc stack balance |
|
731 asm("mov r2, #0xd7 "); |
|
732 asm("msr cpsr, r2 "); // mode_abt, all interrupts off |
|
733 asm("msr spsr, r0 "); // spsr_abt=aborted cpsr |
|
734 ERRATUM_353494_MODE_CHANGE(,r12); |
|
735 asm("ldmia r1, {r0-r3,r12,pc}^ "); // restore r0-r3,r12 and return from exception |
|
736 |
|
737 // get here if exception occurred in mode other than usr or svc |
|
738 // we are in mode_abt or mode_und with IRQs disabled |
|
739 asm("fatal_exception_mode: "); |
|
740 asm("ldr r2, __TheScheduler "); |
|
741 asm("ldr lr, [r2, #%a0]" : : "i" _FOFF(TScheduler,iMonitorExceptionHandler)); |
|
742 asm("cmp lr, #0 "); |
|
743 __JUMP(ne,lr); // if crash debugger running, let it handle exception |
|
744 |
|
745 // get here if mode_svc stack has overflowed |
|
746 // we are in mode_svc with interrupts enabled and the kernel locked |
|
747 // R0=original CPSR R1=exc code R12=mode of exception |
|
748 asm("fatal_exception_stack: "); |
|
749 asm("orr r3, r12, #0xC0 "); |
|
750 asm("msr cpsr, r3 "); // back to exception mode, all interrupts off |
|
751 asm("mov r2, r0 "); |
|
752 asm("ldr r0, __TheScheduler "); |
|
753 asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(TScheduler,i_Regs)); // pass in address of stored registers |
|
754 asm("bl " CSM_ZN3Arm9SaveStateER14SFullArmRegSet ); |
|
755 asm("str r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iExcCode)); |
|
756 asm("str r2, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iN.iFlags)); |
|
757 asm("ldmia sp!, {r3-r7} "); // get original R0-R4 |
|
758 asm("stmia r0, {r1-r5} "); // save original R0-R4 |
|
759 asm("ldmia sp!, {r6,r7} "); // get original R12 and aborted instruction address |
|
760 asm("str r6, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iN.iR12)); |
|
761 asm("str r7, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iN.iR15)); |
|
762 asm("mov r1, #13 "); // r1 = regnum |
|
763 asm("mrs r2, cpsr "); // r2 = mode |
|
764 asm("mov r4, r0 "); |
|
765 asm("bl " CSM_ZN3Arm3RegER14SFullArmRegSetim ); // r0 = pointer to exception mode R13 |
|
766 asm("str sp, [r0] "); // save correct original value for exception mode R13 |
|
767 |
|
768 // call the exception fault dispatcher |
|
769 asm("mov r0, #0 "); |
|
770 asm("b ExcFault "); |
|
771 } |
|
772 |
|
773 extern "C" __NAKED__ void __ArmVectorAbortPrefetch() |
|
774 // |
|
775 // Prefetch abort |
|
776 // |
|
777 { |
|
778 asm("sub lr, lr, #4"); // lr now points to instruction whose prefetch was aborted |
|
779 asm("stmfd sp!, {r0-r4,r12,lr}"); // save it along with r0-r4,r12 |
|
780 #if defined(__CPU_ARM_HAS_WORKING_CLREX) |
|
781 CLREX // reset exclusive monitor |
|
782 #elif defined(__CPU_ARM_HAS_LDREX_STREX) |
|
783 STREX(12,0,13); // dummy STREX to reset exclusivity monitor |
|
784 #endif |
|
785 asm("mov r1, #%a0 " : : "i" ((TInt)EArmExceptionPrefetchAbort)); |
|
786 asm("b handle_exception "); |
|
787 } |
|
788 |
|
789 extern "C" __NAKED__ void __ArmVectorUndef() |
|
790 // |
|
791 // Undefined instruction exception |
|
792 // |
|
793 { |
|
794 asm("sub lr, lr, #4"); // lr now points to undefined instruction |
|
795 asm("stmfd sp!, {r0-r4,r12,lr}"); // save it along with r0-r4,r12 |
|
796 #if defined(__CPU_ARM_HAS_WORKING_CLREX) |
|
797 CLREX // reset exclusive monitor |
|
798 #elif defined(__CPU_ARM_HAS_LDREX_STREX) |
|
799 STREX(12,0,13); // dummy STREX to reset exclusivity monitor |
|
800 #endif |
|
801 asm("mrs r0, spsr "); // r0=CPSR at time of exception |
|
802 asm("mov r1, #%a0 " : : "i" ((TInt)EArmExceptionUndefinedOpcode)); |
|
803 asm("tst r0, #0x20 "); // exception in THUMB mode? |
|
804 asm("addne lr, lr, #2 "); // if so, correct saved return address |
|
805 asm("strne lr, [sp, #24] "); |
|
806 asm("b handle_exception2 "); |
|
807 } |
|
808 |