diff -r 000000000000 -r a41df078684a kernel/eka/nkern/arm/vectors.cia --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/kernel/eka/nkern/arm/vectors.cia Mon Oct 19 15:55:17 2009 +0100 @@ -0,0 +1,808 @@ +// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). +// All rights reserved. +// This component and the accompanying materials are made available +// under the terms of the License "Eclipse Public License v1.0" +// which accompanies this distribution, and is available +// at the URL "http://www.eclipse.org/legal/epl-v10.html". +// +// Initial Contributors: +// Nokia Corporation - initial contribution. +// +// Contributors: +// +// Description: +// e32\nkern\arm\vectors.cia +// +// + +#define __INCLUDE_NTHREADBASE_DEFINES__ +#include +#include + +void FastMutexNestAttempt(); +void FastMutexSignalError(); +extern "C" void ExcFault(TAny*); + +#ifdef __CPU_HAS_MMU +#define __USE_CP15_FAULT_INFO__ +#endif + +#ifdef _DEBUG +#define __CHECK_LOCK_STATE__ +#endif + +//#define __FAULT_ON_FIQ__ + +#ifdef __CHECK_LOCK_STATE__ +// Check that the kernel is unlocked, no fast mutexes are held and that the thread is not in a +// critical section. Called when returning to user mode +__NAKED__ void CheckLockState() + { + asm("stmfd sp!, {r14}"); + asm("ldr r12, __TheScheduler "); + asm("ldr r14, [r12, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); + asm("cmp r14, #0 "); + asm("movne r12, #0xdd000000 "); + asm("strne r12, [r12, #1] "); + asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); + asm("ldr r14, [r12, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); + asm("cmp r14, #0 "); + __CPOPRET(eq, ""); + asm("badLockState: "); + asm("mov r12, #0xd7 "); + asm("msr cpsr, r12 "); + asm("mov r12, #0xdd000000 "); + asm("str r12, [r12, #3] "); + } +#endif + +__ASSERT_COMPILE(EUserModeCallbackRun == 0); + +__NAKED__ void CallUserModeCallbacks() + { + // called with interrupts disabled + // preserves r0 and r1 in additional to usual registers + // leaves current thread in r2 + // the vast majority of times this is called with zero or one callback pending + + asm(".global callUserModeCallbacks "); + asm("callUserModeCallbacks: "); + + asm("ldr ip, __TheScheduler "); + asm("ldr r2, [ip, #%a0]" : : "i" _FOFF(TScheduler, iCurrentThread)); + + asm("callUserModeCallbacks2: "); + +#ifdef __USER_MEMORY_GUARDS_ENABLED__ + asm("mrc p15, 0, r12, c3, c0, 0 "); + asm("tst r12, #0xc0000000 "); + asm("cdpne p15, 0, c0, c0, c0, 0 "); +#endif +#ifdef __CHECK_LOCK_STATE__ + asm("ldr ip, [r2, #%a0]" : : "i" _FOFF(NThread,iCsCount)); + asm("cmp ip, #0 "); + asm("bne badLockState "); +#endif + + asm("ldr ip, [r2, #%a0]" : : "i" _FOFF(NThread, iUserModeCallbacks)); + asm("teq ip, #0"); + asm("bne 1f"); + __JUMP(,lr); + + asm("1: "); + asm("stmfd sp!, {r0-r2, r4-r11, lr}"); + asm("movs r4, r3"); + // if r3 != 0 it is the user context type to set the thread to + asm("strneb r3, [r2, #%a0]" : : "i" _FOFF(NThread, iUserContextType)); + + // Remove first callback and enter critical section - we can just set iCsCount to 1 as we are + // guaranteed not be be in a critical section already + asm("ldmia ip, {r1, r3} "); // HARDCODED: TUserModeCallback layout + asm("mov r0, #1"); + asm("str r0, [r2, #%a0]" : : "i" _FOFF(NThread, iCsCount)); + asm("str r1, [r2, #%a0]" : : "i" _FOFF(NThread,iUserModeCallbacks)); + + // Re-enable interrupts and call callback + SET_MODE(r0, MODE_SVC, INTS_ALL_ON); + asm("mov r1, #%a0 " : : "i" ((TInt)KUserModeCallbackUnqueued)); + asm("str r1, [ip, #%a0]" : : "i" _FOFF(TUserModeCallback, iNext)); + asm("mov r0, ip"); + asm("mov r1, #0 "); // 0 == EUserModeCallbackRun + __JUMPL(3); + + SET_MODE(r0, MODE_SVC, INTS_ALL_OFF); + + asm("movs r3, r4"); + // Leave critical section, avoid calling NKern::ThreadLeaveCS unless we have to + asm("ldmfd sp!, {r0-r2, r4-r11, lr}"); + // reset user context type to undefined if r3 != 0 + asm("mov ip, #%a0" : : "i" (NThread::EContextUndefined)); + asm("strneb ip, [r2, #%a0]" : : "i" _FOFF(NThread, iUserContextType)); + asm("ldr ip, [r2, #%a0]" : : "i" _FOFF(NThread, iCsFunction)); + asm("teq ip, #0"); + asm("streq ip, [r2, #%a0]" : : "i" _FOFF(NThread, iCsCount)); + asm("beq callUserModeCallbacks2 "); + + asm("leaveCS:"); + asm("sub sp, sp, #48 "); + SET_MODE(r0, MODE_SVC, INTS_ALL_ON); + asm("bl " CSM_ZN5NKern13ThreadLeaveCSEv); + SET_MODE(r0, MODE_SVC, INTS_ALL_OFF); + asm("ldmfd sp!, {r0-r2, r4-r11, lr}"); + asm("b callUserModeCallbacks2 "); + } + +/*************************************************************************** + * SWI Handler + ***************************************************************************/ + +extern "C" __NAKED__ void __ArmVectorSwi() + { + // IRQs disabled, FIQs enabled here + asm("ldr r12, [lr, #-4] "); // get SWI opcode + asm("stmfd sp!, {r11, lr} "); // save return address, r11 for 8 byte align + USER_MEMORY_GUARD_ON_IF_MODE_USR(r11); + asm("ldr r11, __TheScheduler "); + asm("adr lr, fast_swi_exit "); + asm("movs r12, r12, lsl #9 "); // 512*SWI number into r12 + asm("bcc slow_swi "); // bit 23=0 for slow/unprot + asm("ldr r1, [r11, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); + asm("beq wait_for_any_request "); // special case for Exec::WaitForAnyRequest +#ifdef __CPU_ARM_HAS_CPS + asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iFastExecTable)); + CPSIDIF; // all interrupts off + asm("ldr r3, [r2], r12, lsr #7 "); // r3=limit, r2->dispatch table entry + asm("ldr r2, [r2] "); // r2->kernel function + asm("cmp r3, r12, lsr #9 "); // r3-SWI number + __JUMP(hi, r2); // if SWI number valid, call kernel function +#else + SET_INTS(r2, MODE_SVC, INTS_ALL_OFF); + asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iFastExecTable)); + asm("ldr r3, [r2], r12, lsr #7 "); // r3=limit, r2->dispatch table entry + asm("cmp r3, r12, lsr #9 "); // r3-SWI number + asm("ldrhi pc, [r2] "); // if SWI number valid, call kernel function +#endif + asm("mvn r12, #0 "); // put invalid SWI number into r12 + asm("b slow_swi "); // go through slow SWI routine to call invalid SWI handler + + asm("fast_swi_exit: "); +#ifdef __CHECK_LOCK_STATE__ + asm("mrs r12, spsr "); + asm("tst r12, #0x0f "); + asm("bleq " CSM_Z14CheckLockStatev); +#endif + USER_MEMORY_GUARD_OFF_IF_MODE_USR(r11); + ERRATUM_353494_MODE_CHANGE(,r11); + asm("ldmfd sp!, {r11, pc}^ "); // return and restore cpsr + + asm("slow_swi: "); // IRQs off, FIQs on here + asm("stmfd sp!, {r3-r10} "); // save nonvolatile registers, r3 for 8 byte align + asm("ldr r9, [r11, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r9->current thread + SET_INTS(lr, MODE_SVC, INTS_ALL_ON); // all interrupts on + asm("mov r10, r11 "); // r10->scheduler + asm("ldr r4, [r9, #%a0]" : : "i" _FOFF(NThread,iSlowExecTable)); + asm("mrs r11, spsr "); // spsr_svc into r11 + asm("adr lr, slow_swi_exit "); + asm("add r6, r4, r12, lsr #6 "); // r6->dispatch table entry + asm("ldr r5, [r4, #-12] "); // r5=limit + SET_INTS_1(r7, MODE_SVC, INTS_ALL_OFF); + asm("cmp r5, r12, lsr #9 "); // r5-SWI number + asm("ldmhiia r6, {r5,r6} "); // if SWI number OK, flags into r5, function addr into r6 + asm("ldrls pc, [r4, #-8] "); // if SWI number invalid, call invalid handler + asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagExtraArgMask)); // extra arguments needed? + asm("addne r2, sp, #4 "); // if so, point r2 at saved registers on stack + asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagClaim)); // claim system lock? + asm("beq slow_swi_no_wait "); // skip if not + + SET_INTS_2(r7, MODE_SVC, INTS_ALL_OFF); // interrupts off +#ifdef _DEBUG + asm("ldr r12, [r9, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); + asm("cmp r12, #0 "); + asm("bne " CSM_Z20FastMutexNestAttemptv); // debug check that current thread doesn't already hold a fast mutex +#endif + asm("ldr r12, [r10, #%a0]!" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // r12=iLock.iHoldingThread + SET_INTS_1(r7, MODE_SVC, INTS_ALL_ON); + asm("cmp r12, #0 "); // is system lock already held? + asm("bne ss_fast_mutex_held "); // branch if it is + asm("ss_fast_mutex_obtained: "); + asm("str r10, [r9, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=&iLock + asm("str r9, [r10], #-%a0" : : "i" _FOFF(TScheduler,iLock)); // iLock.iHoldingThread=current thread, r10->scheduler +#ifdef BTRACE_FAST_MUTEX + asm("ldrb r12, [r10,#%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter))); + asm("cmp r12, #0"); + asm("bne syslock_trace_wait"); + asm("syslock_trace_wait_done:"); +#endif + SET_INTS_2(r7, MODE_SVC, INTS_ALL_ON); // all interrupts on + + asm("slow_swi_no_wait: "); + asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagPreprocess)); // preprocess (handle lookup)? can use r4, r7, r8, r12, r0 + asm("mov lr, pc "); + asm("ldrne pc, [r4, #-4] "); // call preprocess handler if required + asm("mov lr, pc "); + __JUMP(,r6); // call exec function, preserve r5,r11 if release syslock not required + // preserve r5,r9,r10,r11 if release required + asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagRelease)); // release system lock? + asm("beq slow_swi_exit "); // skip if not + + SET_INTS(r12, MODE_SVC, INTS_ALL_OFF); // disable interrupts +#ifdef _DEBUG + asm("add r8, r10, #%a0" : : "i" _FOFF(TScheduler,iLock)); + asm("ldr r12, [r9, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); + asm("cmp r12, r8 "); + asm("bne " CSM_Z20FastMutexSignalErrorv); // debug check that current thread holds system lock +#endif +#ifdef BTRACE_FAST_MUTEX + asm("ldrb r12, [r10,#%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter))); + asm("cmp r12, #0"); + asm("bne syslock_trace_signal"); + asm("syslock_trace_signal_done:"); +#endif + asm("mov r12, #0 "); + asm("str r12, [r10, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // iLock.iHoldingThread=NULL + asm("str r12, [r9, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=NULL + asm("ldr r3, [r10, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting)); // r3=iLock.iWaiting + asm("str r12, [r10, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting)); // iLock.iWaiting=0 + SET_INTS_1(r8, MODE_SVC, INTS_ALL_ON); + asm("cmp r3, #0 "); // check waiting flag + asm("bne ss_signal_check "); // branch if set + asm("ss_signal_done: "); + SET_INTS_2(r8, MODE_SVC, INTS_ALL_ON); // otherwise reenable interrupts + + asm("slow_swi_exit: "); +#ifdef __CHECK_LOCK_STATE__ + asm("tst r11, #0x0f "); + asm("bleq " CSM_Z14CheckLockStatev); +#endif + SET_INTS(r12, MODE_SVC, INTS_ALL_OFF); // disable interrupts + asm("msr spsr, r11 "); // restore spsr_svc + asm("tst r11, #0x0f "); + asm("mov r3, #0 "); +#if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571622_FIXED) + asm("nop "); // ARM Cortex-A9 MPCore erratum 571622 workaround + // Insert nops so branch doesn't occur in 2nd or 3rd position after a msr spsr +#endif + asm("bleq callUserModeCallbacks "); // call user-mode callbacks + USER_MEMORY_GUARD_OFF_IF_MODE_USR(r11); + ERRATUM_353494_MODE_CHANGE(,r11); + asm("ldmfd sp!, {r3-r11,pc}^ "); // return from EXEC function + + // Come here if we need to wait for the system lock + // r9->current thread, r10=&iLock, r12=iLock.iHoldingThread + asm("ss_fast_mutex_held: "); + asm("mov r8, #1 "); + asm("str r8, [r10, #%a0]" : : "i" (_FOFF(TScheduler,iKernCSLocked)-_FOFF(TScheduler,iLock))); // lock the kernel + SET_INTS_2(r7, MODE_SVC, INTS_ALL_ON); // enable interrupts + asm("str r8, [r10, #4] "); // iWaiting=1 + asm("str r10, [r9, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // current thread->iWaitFastMutex=&iLock + asm("stmfd sp!, {r0-r3} "); // save exec call arguments + asm("mov r0, r12 "); // parameter for YieldTo + ASM_DEBUG1(NKFMWaitYield,r0); + asm("bl " CSM_ZN10TScheduler7YieldToEP11NThreadBase); // yield to the mutex holding thread + // will not return until the mutex is free + // on return r0=Scheduler,r1=0,r2!=0,r3=current thread, kernel unlocked, interrupts disabled + asm("str r1, [r9, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // iWaitFastMutex=NULL + asm("ldmfd sp!, {r0-r3} "); // retrieve exec call arguments + asm("b ss_fast_mutex_obtained "); // branch back to main code path + + // Come here if we need to reschedule after releasing the system lock + // kernel unlocked, interrupts enabled, r0 contains return value from Exec call + // r9->current thread, r10=&TheScheduler, r3=1, r8=0x13 + asm("ss_signal_check: "); + asm("str r3, [r10, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel (assumes iWaiting always 0 or 1) + SET_INTS_2(r8, MODE_SVC, INTS_ALL_ON); // reenable interrupts + asm("strb r3, [r10, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); + asm("ldr r3, [r9, #%a0]" : : "i" _FOFF(NThread,iCsFunction)); // r3=current thread->iCsFunction + asm("ldr r2, [r9, #%a0]" : : "i" _FOFF(NThread,iCsCount)); // r2=current thread->iCsCount + asm("mov r4, r0 "); // save return value + asm("cmp r3, #0 "); // outstanding CS function? + asm("beq 2f "); // branch if not + asm("cmp r2, #0 "); // iCsCount!=0 ? + asm("moveq r0, r9 "); // if iCsCount=0, DoCsFunction() + asm("bleq " CSM_ZN11NThreadBase12DoCsFunctionEv); + asm("2: "); + asm("bl " CSM_ZN10TScheduler10RescheduleEv); // reschedule to allow waiting thread in + asm("mov r0, r4 "); // recover return value + asm("b ss_signal_done "); // branch back to main code path + +#ifdef BTRACE_FAST_MUTEX + asm("syslock_trace_wait:"); + asm("ldr r12, [sp,#9*4]"); // r12 = return address from SWI + asm("mov r8, r3"); // save r3 + asm("stmdb sp!,{r0-r2,r12}"); // 4th item on stack is PC value for trace + asm("ldr r0, fmwait_trace_header"); + asm("mov r2, r9"); // current thread + asm("add r3, r10, #%a0" : : "i" _FOFF(TScheduler,iLock)); + asm("mov lr, pc"); + asm("ldr pc, [r10, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); + asm("ldmia sp!,{r0-r2,r12}"); + asm("mov r3, r8"); // restore r3 + asm("b syslock_trace_wait_done"); + + asm("syslock_trace_signal:"); + asm("ldr r12, [sp,#9*4]"); // r12 = return address from SWI + asm("stmdb sp!,{r0-r2,r12}"); // 4th item on stack is PC value for trace + asm("ldr r0, fmsignal_trace_header"); + asm("mov r2, r9"); // current thread + asm("add r3, r10, #%a0" : : "i" _FOFF(TScheduler,iLock)); + asm("mov lr, pc"); + asm("ldr pc, [r10, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); + asm("ldmia sp!,{r0-r2,r12}"); + asm("b syslock_trace_signal_done"); + + asm("fmsignal_trace_header:"); + asm(".word %a0" : : "i" ((TInt)(16<