|
1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Eclipse Public License v1.0" |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // e32/include/nkernsmp/arm/entry.h |
|
15 // |
|
16 // |
|
17 |
|
18 #include <arm_gic.h> |
|
19 #include <arm_tmr.h> |
|
20 |
|
21 |
|
22 extern "C" { |
|
23 |
|
24 extern void __ArmVectorReset(); |
|
25 extern void __ArmVectorUndef(); |
|
26 extern void __ArmVectorSwi(); |
|
27 extern void __ArmVectorAbortPrefetch(); |
|
28 extern void __ArmVectorAbortData(); |
|
29 extern void __ArmVectorReserved(); |
|
30 extern void __ArmVectorIrq(); |
|
31 extern void __ArmVectorFiq(); |
|
32 |
|
33 #define __DECLARE_UNDEFINED_INSTRUCTION_HANDLER asm(".word __ArmVectorUndef ") |
|
34 #define __DECLARE_PREFETCH_ABORT_HANDLER asm(".word __ArmVectorAbortPrefetch ") |
|
35 #define __DECLARE_DATA_ABORT_HANDLER asm(".word __ArmVectorAbortData ") |
|
36 |
|
37 #ifdef BTRACE_CPU_USAGE |
|
38 extern void btrace_irq_entry(TInt); |
|
39 extern void btrace_fiq_entry(); |
|
40 #endif |
|
41 |
|
42 extern void handle_crash_ipi(); |
|
43 |
|
44 #ifdef _DEBUG |
|
45 extern void __DebugMsgIrq(TUint aIrqNumber); |
|
46 #endif |
|
47 |
|
48 /* NOTE: We must ensure that this code goes at the beginning of the kernel image. |
|
49 */ |
|
50 __NAKED__ void __this_must_go_at_the_beginning_of_the_kernel_image() |
|
51 { |
|
52 asm("ldr pc, __reset_vector "); // 00 = Reset vector |
|
53 asm("ldr pc, __undef_vector "); // 04 = Undefined instruction vector |
|
54 asm("ldr pc, __swi_vector "); // 08 = SWI vector |
|
55 asm("ldr pc, __pabt_vector "); // 0C = Prefetch abort vector |
|
56 asm("ldr pc, __dabt_vector "); // 10 = Data abort vector |
|
57 asm("ldr pc, __unused_vector "); // 14 = unused |
|
58 asm("b HandleIrq "); // 18 = IRQ vector |
|
59 // 1C = FIQ vector, code in situ |
|
60 /*** FIQ entry point ******************************************************/ |
|
61 asm("ldr r12, __ArmInterrupt "); |
|
62 #ifdef BTRACE_CPU_USAGE |
|
63 asm("ldr r11, __BTraceCpuUsageFilter "); |
|
64 #endif |
|
65 asm("sub lr, lr, #4 "); |
|
66 asm("ldr r10, [r12,#%a0]" : : "i" _FOFF(SArmInterruptInfo,iFiqHandler)); // r10 points to FIQ handler |
|
67 asm("str lr, [sp, #-4]! "); |
|
68 // we assume FIQ handler preserves r0-r7 but not r8-r12 |
|
69 // hence must be assembler, so stack misalignment OK |
|
70 #if defined(__CPU_ARM_HAS_WORKING_CLREX) |
|
71 CLREX |
|
72 #elif defined(__CPU_ARM_HAS_LDREX_STREX) |
|
73 STREX(8,14,13); // dummy STREX to reset exclusivity monitor |
|
74 #endif |
|
75 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
|
76 USER_MEMORY_GUARD_ON(,lr,r8); |
|
77 asm("str lr, [sp, #-4]! "); |
|
78 #endif |
|
79 #ifdef BTRACE_CPU_USAGE |
|
80 asm("ldrb r8, [r11] "); |
|
81 asm("ldr lr, _ArmVectorFiq "); |
|
82 asm("cmp r8, #0 "); |
|
83 asm("bne btrace_fiq"); |
|
84 __JUMP(, r10); // jump to FIQ handler |
|
85 |
|
86 asm("btrace_fiq: "); // call trace handler before fiq handler... |
|
87 asm("stmfd sp!, {r0-r3,r12,lr} "); |
|
88 asm("adr lr, btrace_fiq_return "); |
|
89 asm("ldr pc, __btrace_fiq_entry "); |
|
90 asm("btrace_fiq_return: "); |
|
91 asm("ldmfd sp!, {r0-r3,r12,lr} "); |
|
92 __JUMP(, r10); // jump to FIQ handler |
|
93 #endif |
|
94 asm("ldr lr, _ArmVectorFiq "); |
|
95 __JUMP(,r10); // jump to FIQ handler |
|
96 |
|
97 |
|
98 /*** Nested IRQ register save *********************************************/ |
|
99 asm("nested_irq: "); |
|
100 SRSDBW(MODE_SYS); // save return address and return CPSR to interrupt stack |
|
101 CPSCHM(MODE_SYS); // mode_sys, IRQs off |
|
102 asm("stmfd sp!, {r0-r12,lr} "); // save R0-R12,R14 from system mode |
|
103 GET_RWNO_TID(,r4); |
|
104 asm("b nested_irq_rejoin "); |
|
105 |
|
106 /*** IRQ entry point ******************************************************/ |
|
107 asm("HandleIrq: "); |
|
108 asm("mrs r13, spsr "); |
|
109 asm("sub lr, lr, #4 "); |
|
110 asm("and r13, r13, #0x1f "); |
|
111 asm("cmp r13, #0x1f "); // interrupted mode_sys? |
|
112 asm("beq nested_irq "); // yes -> nested interrupt |
|
113 SRSDBW(MODE_SVC); // save return address and return CPSR to supervisor stack |
|
114 __ASM_CLI_MODE(MODE_SVC); // mode_svc, IRQs and FIQs off |
|
115 asm("sub sp, sp, #%a0" : : "i" _FOFF(SThreadExcStack,iR15)); |
|
116 asm("stmia sp, {r0-r14}^ "); // save R0-R12, R13_usr, R14_usr |
|
117 asm("mov r1, #%a0" : : "i" ((TInt)SThreadExcStack::EIrq)); |
|
118 #if defined(__CPU_ARM_HAS_WORKING_CLREX) |
|
119 CLREX |
|
120 #elif defined(__CPU_ARM_HAS_LDREX_STREX) |
|
121 STREX(12, 0, 13); // dummy STREX to reset exclusivity monitor |
|
122 #endif |
|
123 GET_RWNO_TID(,r4); |
|
124 asm("mov r5, sp "); |
|
125 asm("str r1, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iExcCode)); // word describing exception type |
|
126 __ASM_STI2_MODE(MODE_SYS); // mode_sys, IRQs off, FIQs on |
|
127 asm("ldr sp, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqStackTop)); |
|
128 USER_MEMORY_GUARD_ON(,r8,r0); // r8 = original DACR if user memory guards in use |
|
129 |
|
130 asm("nested_irq_rejoin: "); |
|
131 asm("ldr r0, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount)); |
|
132 asm("ldr r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount)); |
|
133 asm("ldr r12, __ArmInterrupt "); |
|
134 asm("ldr r10, _ArmVectorIrq "); |
|
135 asm("add r0, r0, #1 "); |
|
136 asm("add r7, r7, #1 "); |
|
137 __DATA_MEMORY_BARRIER_Z__(r2); // ensure memory accesses in interrupted code are observed before |
|
138 // the writes to i_IrqCount, i_IrqNestCount |
|
139 asm("str r0, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount)); // increment i_IrqCount |
|
140 asm("ldr r11, [r12,#%a0]" : : "i" _FOFF(SArmInterruptInfo,iIrqHandler)); // address if IRQ handler |
|
141 asm("ldr r6, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicCpuIfcAddr)); |
|
142 asm("str r7, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount)); // increment i_IrqNestCount |
|
143 |
|
144 asm("1: "); |
|
145 #ifdef BTRACE_CPU_USAGE |
|
146 asm("ldr r2, __BTraceCpuUsageFilter "); |
|
147 #endif |
|
148 asm("mov r1, #%a0" : : "i" ((TInt)E_GicIntId_Spurious+1)); |
|
149 asm("ldr r0, [r6, #%a0]" : : "i" _FOFF(GicCpuIfc, iAck)); // r0 = number of interrupt to service |
|
150 #ifdef BTRACE_CPU_USAGE |
|
151 asm("ldrb r2, [r2] "); |
|
152 #endif |
|
153 asm("sub r1, r1, #1 "); |
|
154 asm("cmp r0, r1 "); // any more interrupts pending? |
|
155 asm("beq 2f "); // if not, branch out |
|
156 #ifdef BTRACE_CPU_USAGE |
|
157 asm("cmp r2, #0 "); |
|
158 asm("beq 9f "); |
|
159 asm("stmfd sp!, {r0-r3} "); |
|
160 asm("adr lr, btrace_irq_return "); |
|
161 asm("ldr pc, __btrace_irq_entry "); |
|
162 asm("btrace_irq_return: "); |
|
163 asm("ldmfd sp!, {r0-r3} "); |
|
164 asm("9: "); |
|
165 #endif // BTRACE_CPU_USAGE |
|
166 ASM_DEBUG1(_longjump_Irq,r0); |
|
167 asm("adr lr, 1b "); |
|
168 asm("tst r0, #0x3e0 "); // check for interrupt numbers 0-31 |
|
169 asm("beq 3f "); // branch out if so |
|
170 __JUMP(,r11); // jump to dispatcher, R0 = interrupt number, return to 1: |
|
171 // dispatcher acknowledges interrupt |
|
172 |
|
173 // No more interrupts pending - jump to postamble in the kernel |
|
174 // R4->TSubScheduler at this point, R5->saved registers on SVC stack if not nested IRQ |
|
175 // R6->GIC CPU interface |
|
176 asm("2: "); |
|
177 __JUMP(,r10); |
|
178 |
|
179 // Kernel IPI |
|
180 asm("3: "); |
|
181 asm("and r2, r0, #31 "); // r2 = interrupt number 0...31 |
|
182 asm("cmp r2, #%a0" : : "i" ((TInt)TIMESLICE_VECTOR)); |
|
183 asm("beq do_timeslice_irq "); |
|
184 asm("cmp r2, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); |
|
185 asm("beq do_resched_ipi "); |
|
186 asm("cmp r2, #%a0" : : "i" ((TInt)GENERIC_IPI_VECTOR)); |
|
187 asm("beq do_generic_ipi "); |
|
188 asm("cmp r2, #%a0" : : "i" ((TInt)TRANSFERRED_IRQ_VECTOR)); |
|
189 asm("beq do_transferred_ipi "); |
|
190 asm("cmp r2, #15 "); |
|
191 __JUMP(hi, r11); // if >15 but not TIMESLICE_VECTOR, call dispatcher |
|
192 |
|
193 // else assume CRASH_IPI |
|
194 asm("str r0, [r6, #%a0]" : : "i" _FOFF(GicCpuIfc, iEoi)); // acknowledge interrupt |
|
195 __DATA_SYNC_BARRIER_Z__(r1); |
|
196 asm("ldr r1, __HandleCrashIPI "); |
|
197 __JUMP(, r1); // CRASH IPI, so crash |
|
198 |
|
199 // TIMESLICE, RESCHED or TRANSFERRED |
|
200 asm("do_timeslice_irq: "); |
|
201 asm("ldr r2, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, i_LocalTimerAddr)); |
|
202 asm("mov r1, #1 "); |
|
203 asm("str r1, [r2, #%a0]" : : "i" _FOFF(ArmLocalTimer, iTimerIntStatus)); // clear timer event flag |
|
204 asm("do_resched_ipi: "); |
|
205 asm("mov r1, #1 "); |
|
206 asm("strb r1, [r4, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag)); |
|
207 asm("do_transferred_ipi: "); |
|
208 asm("str r0, [r6, #%a0]" : : "i" _FOFF(GicCpuIfc, iEoi)); // acknowledge interrupt |
|
209 __DATA_SYNC_BARRIER_Z__(r1); // ensure writes to i_IrqCount, i_IrqNestCount, iRescheduleNeededFlag complete before SEV |
|
210 // also ensure EOI is written before we return from the interrupt |
|
211 ARM_SEV; // kick any CPUs waiting for us to enter the ISR |
|
212 asm("b 1b "); |
|
213 |
|
214 // GENERIC_IPI |
|
215 asm("do_generic_ipi: "); |
|
216 asm("ldr r2, _GenericIPIIsr "); |
|
217 asm("str r0, [r6, #%a0]" : : "i" _FOFF(GicCpuIfc, iEoi)); // acknowledge interrupt |
|
218 asm("mov r0, r4 "); // r0->SubScheduler |
|
219 __DATA_SYNC_BARRIER_Z__(r1); |
|
220 __JUMP(, r2); |
|
221 |
|
222 asm("__DebugMsg_longjump_Irq: "); |
|
223 asm("ldr pc, _dmIrq "); |
|
224 |
|
225 asm("__reset_vector:"); |
|
226 asm(".word __ArmVectorReset "); |
|
227 asm("__undef_vector:"); |
|
228 __DECLARE_UNDEFINED_INSTRUCTION_HANDLER; |
|
229 asm("__swi_vector:"); |
|
230 asm(".word __ArmVectorSwi "); |
|
231 asm("__pabt_vector:"); |
|
232 __DECLARE_PREFETCH_ABORT_HANDLER; |
|
233 asm("__dabt_vector:"); |
|
234 __DECLARE_DATA_ABORT_HANDLER; |
|
235 asm("__unused_vector:"); |
|
236 asm(".word __ArmVectorReserved "); |
|
237 |
|
238 asm("__ArmInterrupt: "); |
|
239 asm(".word ArmInterruptInfo "); |
|
240 asm("_ArmVectorIrq: "); |
|
241 asm(".word __ArmVectorIrq "); |
|
242 asm("_GenericIPIIsr: "); |
|
243 asm(".word generic_ipi_isr "); |
|
244 asm("_ArmVectorFiq: "); |
|
245 asm(".word __ArmVectorFiq "); |
|
246 asm("__HandleCrashIPI: "); |
|
247 asm(".word handle_crash_ipi "); |
|
248 #ifdef BTRACE_CPU_USAGE |
|
249 asm("__BTraceCpuUsageFilter: "); |
|
250 asm(".word %a0" : : "i" ((TInt)&BTraceData.iFilter[BTrace::ECpuUsage])); |
|
251 asm("__btrace_irq_entry: "); |
|
252 asm(".word btrace_irq_entry "); |
|
253 asm("__btrace_fiq_entry: "); |
|
254 asm(".word btrace_fiq_entry "); |
|
255 #endif |
|
256 asm("_dmIrq: "); |
|
257 asm(".word __DebugMsgIrq "); |
|
258 } |
|
259 } |
|
260 |