348 |
347 |
349 extern "C" __NAKED__ void __ArmVectorIrq() |
348 extern "C" __NAKED__ void __ArmVectorIrq() |
350 { |
349 { |
351 // FIQs enabled here but not IRQs |
350 // FIQs enabled here but not IRQs |
352 asm("ldr r1, __TheScheduler "); |
351 asm("ldr r1, __TheScheduler "); |
353 asm("mrs r0, spsr "); // check interrupted mode |
352 asm("mrs r0, spsr "); // check interrupted mode |
354 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
353 asm("add r12, sp, #%a0 " : : "i" (4*(6+USER_MEMORY_GUARD_SAVE_WORDS))); // r12=sp_irq+6 or 8 words |
355 asm("add r12, sp, #32 "); // r12=sp_irq+8 words |
|
356 #else |
|
357 asm("add r12, sp, #24 "); // r12=sp_irq+6 words |
|
358 #endif |
|
359 asm("and r2, r0, #0x1f "); |
354 asm("and r2, r0, #0x1f "); |
360 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // r3=KernCSLocked |
355 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // r3=KernCSLocked |
361 asm("cmp r2, #0x10 "); // check for mode_usr |
356 asm("cmp r2, #0x10 "); // check for mode_usr |
362 asm("cmpne r2, #0x13 "); // or mode_svc |
357 asm("cmpne r2, #0x13 "); // or mode_svc |
363 asm("cmpeq r3, #0 "); // and then check if kernel locked |
358 asm("cmpeq r3, #0 "); // and then check if kernel locked |
364 asm("bne IrqExit0 "); // if wrong mode or locked, return immediately |
359 asm("bne IrqExit0 "); // if wrong mode or locked, return immediately |
365 SET_INTS(r2, MODE_IRQ, INTS_ALL_OFF); // disable FIQs before we check for reschedule |
360 SET_INTS(r2, MODE_IRQ, INTS_ALL_OFF); // disable FIQs before we check for reschedule |
366 asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // r2=DfcPendingFlag/RescheduleNeededFlag |
361 asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // r2=DfcPendingFlag/RescheduleNeededFlag |
367 asm("add r3, r3, #1 "); |
362 asm("add r3, r3, #1 "); |
368 SET_MODE_1(lr, MODE_SVC, INTS_ALL_ON); |
363 SET_MODE_1(lr, MODE_SVC, INTS_ALL_ON); |
369 asm("cmp r2, #0 "); // check if reschedule needed |
364 asm("cmp r2, #0 "); // check if reschedule needed |
370 asm("beq IrqExit0 "); // if not, return immediately |
365 asm("beq IrqExit0 "); // if not, return immediately |
371 asm("str r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel |
366 asm("str r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel |
372 SET_MODE_2(lr, MODE_SVC, INTS_ALL_ON); // mode_svc, interrupts back on |
367 SET_MODE_2(lr, MODE_SVC, INTS_ALL_ON); // mode_svc, interrupts back on |
373 |
368 |
374 asm("ldmdb r12!, {r1-r3} "); // move saved registers (r0-r3,r12,pc) over to mode_svc stack |
369 asm("ldmdb r12!, {r1-r3} "); // move saved registers (r0-r3,r12,pc) over to mode_svc stack |
375 asm("stmfd sp!, {r1-r3} "); |
370 asm("stmfd sp!, {r1-r3} "); |
376 asm("ldmdb r12!, {r1-r3} "); |
371 asm("ldmdb r12!, {r1-r3} "); |
377 asm("stmfd sp!, {r1-r3} "); |
372 asm("stmfd sp!, {r1-r3} "); |
378 asm("stmfd sp!, {r0,lr} "); // store lr_svc and interrupted cpsr on current mode_svc stack |
373 asm("stmfd sp!, {r0,lr} "); // store lr_svc and interrupted cpsr on current mode_svc stack |
379 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
374 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
380 asm("ldmdb r12, {r1-r2} "); |
375 asm("ldmdb r12, {r1-r2} "); |
381 asm("stmfd sp!, {r1-r2} "); // move user guard over to mode_svc stack |
376 asm("stmfd sp!, {r1-r2} "); // move user guard over to mode_svc stack |
382 #endif |
377 #endif |
383 |
378 |
384 SET_MODE_1(r2, MODE_SVC, INTS_ALL_ON); |
379 SET_MODE_1(r2, MODE_SVC, INTS_ALL_ON); |
385 SET_MODE(lr, MODE_IRQ, INTS_IRQ_OFF); // mode_irq, IRQs off |
380 SET_MODE(lr, MODE_IRQ, INTS_IRQ_OFF); // mode_irq, IRQs off |
386 asm("add sp, r12, #24 "); // restore mode_irq stack balance |
381 asm("add sp, r12, #24 "); // restore mode_irq stack balance |
387 SET_MODE_2(r2, MODE_SVC, INTS_ALL_ON); // back to mode_svc, IRQs on |
382 SET_MODE_2(r2, MODE_SVC, INTS_ALL_ON); // back to mode_svc, IRQs on |
388 |
383 |
389 // reschedule - this also switches context if necessary |
384 // reschedule - this also switches context if necessary |
390 // enter this function in mode_svc, interrupts on, kernel locked |
385 // enter this function in mode_svc, interrupts on, kernel locked |
391 // exit this function in mode_svc, all interrupts off, kernel unlocked |
386 // exit this function in mode_svc, all interrupts off, kernel unlocked |
392 asm("irq_do_resched: "); |
387 asm("irq_do_resched: "); |
393 asm("bl " CSM_ZN10TScheduler10RescheduleEv); |
388 asm("bl " CSM_ZN10TScheduler10RescheduleEv); |
394 asm(".global irq_resched_return "); |
389 asm(".global irq_resched_return "); |
395 asm("irq_resched_return: "); |
390 asm("irq_resched_return: "); |
396 |
391 |
397 SET_MODE(r2, MODE_SVC, INTS_ALL_OFF); // all interrupts off |
392 SET_MODE(r2, MODE_SVC, INTS_ALL_OFF); // all interrupts off |
398 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
393 asm("ldr r1, [sp, #%a0] " : : "i" (4*USER_MEMORY_GUARD_SAVE_WORDS)); // get interrupted cpsr, don't unbalance stack |
399 asm("ldr r1, [sp, #8] " ); // get interrupted cpsr, don't unbalance stack |
394 |
400 #else |
|
401 asm("ldr r1, [sp] " ); // get interrupted cpsr, don't unbalance stack |
|
402 #endif |
|
403 #ifdef __CHECK_LOCK_STATE__ |
395 #ifdef __CHECK_LOCK_STATE__ |
404 asm("mov r2, r12 "); |
396 asm("mov r2, r12 "); |
405 asm("tst r1, #0x0f "); |
397 asm("tst r1, #0x0f "); |
406 asm("bleq " CSM_Z14CheckLockStatev); |
398 asm("bleq " CSM_Z14CheckLockStatev); |
407 asm("mov r12, r2 "); |
399 asm("mov r12, r2 "); |
408 #endif |
400 #endif |
|
401 |
409 asm("tst r1, #0x0f "); |
402 asm("tst r1, #0x0f "); |
410 asm("mov r3, #%a0 " : : "i" (NThread::EContextUserIntrCallback)); |
403 asm("mov r3, #%a0 " : : "i" (NThread::EContextUserIntrCallback)); |
411 asm("bleq callUserModeCallbacks "); // call user-mode callbacks |
404 asm("bleq callUserModeCallbacks "); // call user-mode callbacks |
412 |
405 |
413 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
406 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
414 asm("ldr r1, [sp], #8 "); |
407 asm("ldr r1, [sp], #%a0 " : : "i" (4*USER_MEMORY_GUARD_SAVE_WORDS)); // pop saved DACR, adjust sp |
415 USER_MEMORY_GUARD_RESTORE(r1,lr); |
408 USER_MEMORY_GUARD_RESTORE(r1,lr); |
416 #endif |
409 #endif |
417 asm("ldmfd sp!, {r1, lr} "); // restore lr_svc |
410 |
418 asm("add sp, sp, #24 "); // restore mode_svc stack balance |
411 asm("ldmfd sp!, {r1, lr} "); // restore lr_svc |
419 asm("mov r12, sp "); // r12=address of remaining saved registers |
412 asm("add sp, sp, #24 "); // restore mode_svc stack balance |
420 |
413 asm("mov r12, sp "); // r12=address of remaining saved registers |
421 SET_MODE(r2, MODE_IRQ, INTS_ALL_OFF); // back into mode_irq, all interrupts off |
414 |
|
415 SET_MODE(r2, MODE_IRQ, INTS_ALL_OFF); // back into mode_irq, all interrupts off |
422 |
416 |
423 asm("msr spsr, r1 "); // interrupted cpsr into spsr_irq |
417 asm("msr spsr, r1 "); // interrupted cpsr into spsr_irq |
424 ERRATUM_353494_MODE_CHANGE(,r12); |
418 ERRATUM_353494_MODE_CHANGE(,r12); |
425 asm("ldmdb r12, {r0-r3,r12,pc}^ "); // return from interrupt |
419 asm("ldmdb r12, {r0-r3,r12,pc}^ "); // return from interrupt |
|
420 |
426 |
421 |
427 asm("IrqExit0: "); |
422 asm("IrqExit0: "); |
428 #ifdef __CHECK_LOCK_STATE__ |
423 #ifdef __CHECK_LOCK_STATE__ |
429 asm("tst r0, #0x0f "); |
424 asm("tst r0, #0x0f "); |
430 asm("bleq " CSM_Z14CheckLockStatev); |
425 asm("bleq " CSM_Z14CheckLockStatev); |
431 #endif |
426 #endif |
432 asm("IrqExit1: "); // entry point for __ArmVectorIrqPostambleNoResched() |
427 |
|
428 asm("IrqExit1: "); // entry point for __ArmVectorIrqPostambleNoResched() |
433 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
429 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
434 asm("ldr lr, [sp], #8 "); |
430 asm("ldr lr, [sp], #%a0 " : : "i" (4*USER_MEMORY_GUARD_SAVE_WORDS)); // pop saved DACR, adjust sp |
435 USER_MEMORY_GUARD_RESTORE(lr,r12); |
431 USER_MEMORY_GUARD_RESTORE(lr,r12); |
436 #endif |
432 #endif |
|
433 |
437 #ifdef BTRACE_CPU_USAGE |
434 #ifdef BTRACE_CPU_USAGE |
438 asm("ldrb r2, [r1,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter)); |
435 asm("ldrb r2, [r1,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter)); |
439 asm("mov r0, #%a0" : : "i" ((TInt)4 ) ); |
436 asm("mov r0, #%a0" : : "i" ((TInt)4 ) ); |
440 asm("add r0, r0, #%a0" : : "i" ((TInt)(BTrace::ECpuUsage<<BTrace::ECategoryIndex*8)+(BTrace::EIrqEnd<<BTrace::ESubCategoryIndex*8)) ); |
437 asm("add r0, r0, #%a0" : : "i" ((TInt)(BTrace::ECpuUsage<<BTrace::ECategoryIndex*8)+(BTrace::EIrqEnd<<BTrace::ESubCategoryIndex*8)) ); |
441 asm("cmp r2, #0"); |
438 asm("cmp r2, #0"); |
442 asm("movne lr, pc"); |
439 asm("movne lr, pc"); |
443 asm("ldrne pc, [r1,#%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); |
440 asm("ldrne pc, [r1,#%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); |
444 #endif |
441 #endif |
445 ERRATUM_353494_MODE_CHANGE(,r12); |
442 ERRATUM_353494_MODE_CHANGE(,r12); |
446 asm("ldmfd sp!, {r0-r3,r12,pc}^ "); // return from interrupt |
443 asm("ldmfd sp!, {r0-r3,r12,pc}^ "); // return from interrupt |
447 } |
444 } |
448 |
445 |
449 /*************************************************************************** |
446 /*************************************************************************** |
450 * IRQ Postamble which will not reschedule (can be returned to by co-resident OS). |
447 * IRQ Postamble which will not reschedule (can be returned to by co-resident OS). |
451 * This routine is called after the IRQ has been dispatched |
448 * This routine is called after the IRQ has been dispatched |
475 #endif |
472 #endif |
476 // IRQs and FIQs disabled here |
473 // IRQs and FIQs disabled here |
477 // r0-r7 are unaltered from when FIQ occurred |
474 // r0-r7 are unaltered from when FIQ occurred |
478 asm("ldr r9, __TheScheduler "); |
475 asm("ldr r9, __TheScheduler "); |
479 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
476 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
480 asm("ldr r12, [sp], #4 "); |
477 asm("ldr r12, [sp], #4 "); // pop saved DACR |
481 #endif |
478 #endif |
482 asm("mrs r8, spsr "); // check interrupted mode |
479 asm("mrs r8, spsr "); // check interrupted mode |
483 asm("and r10, r8, #0x1f "); |
480 asm("and r10, r8, #0x1f "); |
484 asm("cmp r10, #0x10 "); // check for mode_usr |
481 asm("cmp r10, #0x10 "); // check for mode_usr |
485 asm("ldr r11, [r9, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); |
482 asm("ldr r11, [r9, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); |
486 asm("cmpne r10, #0x13 "); // or mode_svc |
483 asm("cmpne r10, #0x13 "); // or mode_svc |
487 asm("ldreq r10, [r9, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); |
484 asm("ldreq r10, [r9, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); |
488 asm("cmpeq r11, #0 "); // and check if kernel locked |
485 asm("cmpeq r11, #0 "); // and check if kernel locked |
489 asm("bne FiqExit0 "); // if wrong mode or kernel locked, return immediately |
486 asm("bne FiqExit0 "); // if wrong mode or kernel locked, return immediately |
490 asm("cmp r10, #0 "); // check if reschedule needed |
487 asm("cmp r10, #0 "); // check if reschedule needed |
491 asm("beq FiqExit0 "); // if not, return from interrupt |
488 asm("beq FiqExit0 "); // if not, return from interrupt |
|
489 |
492 // we interrupted mode_usr or mode_svc, kernel unlocked, reschedule needed |
490 // we interrupted mode_usr or mode_svc, kernel unlocked, reschedule needed |
493 asm("add r11, r11, #1 "); |
491 asm("add r11, r11, #1 "); |
494 asm("str r11, [r9, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel |
492 asm("str r11, [r9, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel |
495 asm("stmfd sp!, {r1-r3} "); // save interrupted r1-r3 on FIQ stack |
493 asm("stmfd sp!, {r1-r3} "); // save interrupted r1-r3 on FIQ stack |
496 asm("mov r1, r8 "); // r1=interrupted cpsr |
494 asm("mov r1, r8 "); // r1=interrupted cpsr |
497 asm("mov r3, sp "); // r3 points to saved registers |
495 asm("mov r3, sp "); // r3 points to saved registers |
498 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
496 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
499 asm("mov r2, r12 "); // saved DACR into R2 |
497 asm("mov r2, r12 "); // saved DACR into R2 |
500 #endif |
498 #endif |
501 SET_MODE(lr, MODE_SVC, INTS_ALL_ON); // switch to mode_svc, IRQs and FIQs back on |
499 SET_MODE(lr, MODE_SVC, INTS_ALL_ON); // switch to mode_svc, IRQs and FIQs back on |
502 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
500 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
503 asm("str r2, [sp, #-40]! "); // save DACR and leave room for spare, cpsr, lr_svc, r0-r3, r12, pc |
501 asm("str r2, [sp, #%a0]! " : : "i" (-4*(8+USER_MEMORY_GUARD_SAVE_WORDS))); // save DACR and leave room for spare, cpsr, lr_svc, r0-r3, r12, pc |
504 asm("ldr r2, [r3, #12] "); // r2=return address |
|
505 asm("str r12, [sp, #32] "); // save r12 on mode_svc stack |
|
506 asm("str r2, [sp, #36] "); // save return address on mode_svc stack |
|
507 asm("add r12, sp, #8 "); |
|
508 #else |
502 #else |
509 asm("ldr r2, [r3, #12] "); // r2=return address |
503 asm("sub sp, sp, #32 "); // make room for saved registers on mode_svc stack |
510 asm("sub sp, sp, #32 "); // make room for saved registers on mode_svc stack |
504 #endif |
511 asm("str r12, [sp, #24] "); // save r12 on mode_svc stack |
505 asm("ldr r2, [r3, #12] "); // r2=return address |
512 asm("str r2, [sp, #28] "); // save return address on mode_svc stack |
506 asm("str r12, [sp, #%a0] " : : "i" (4*(6+USER_MEMORY_GUARD_SAVE_WORDS))); // save r12 on mode_svc stack |
513 asm("mov r12, sp "); |
507 asm("str r2, [sp, #%a0] " : : "i" (4*(7+USER_MEMORY_GUARD_SAVE_WORDS))); // save return address on mode_svc stack |
514 #endif |
508 asm("add r12, sp, #%a0 " : : "i" (4*(USER_MEMORY_GUARD_SAVE_WORDS))); |
515 asm("stmia r12!, {r1,lr} "); // save interrupted cpsr and lr_svc |
509 |
516 asm("ldmia r3, {r1,r2,lr} "); // retrieve original r1-r3 from mode_fiq stack |
510 asm("stmia r12!, {r1,lr} "); // save interrupted cpsr and lr_svc |
517 asm("stmia r12, {r0-r2,lr} "); // save original r0-r3 - saved register order is now cpsr,lr_svc,r0-r3,r12,pc |
511 asm("ldmia r3, {r1,r2,lr} "); // retrieve original r1-r3 from mode_fiq stack |
|
512 asm("stmia r12, {r0-r2,lr} "); // save original r0-r3 - saved register order is now cpsr,lr_svc,r0-r3,r12,pc |
518 SET_MODE_1(r2, MODE_SVC, INTS_ALL_ON); |
513 SET_MODE_1(r2, MODE_SVC, INTS_ALL_ON); |
519 SET_MODE(lr, MODE_FIQ, INTS_ALL_OFF); // mode_fiq, IRQs and FIQs off |
514 SET_MODE(lr, MODE_FIQ, INTS_ALL_OFF); // mode_fiq, IRQs and FIQs off |
520 asm("add sp, r3, #16 "); // restore mode_fiq stack balance |
515 asm("add sp, r3, #16 "); // restore mode_fiq stack balance |
521 SET_MODE_2(r2, MODE_SVC, INTS_ALL_ON); // back to mode_svc, IRQs on |
516 SET_MODE_2(r2, MODE_SVC, INTS_ALL_ON); // back to mode_svc, IRQs on |
522 asm("adr lr, irq_resched_return "); |
517 asm("adr lr, irq_resched_return "); |
523 asm("b " CSM_ZN10TScheduler10RescheduleEv); // do reschedule and return to irq_resched_return |
518 asm("b " CSM_ZN10TScheduler10RescheduleEv); // do reschedule and return to irq_resched_return |
524 |
519 |
525 asm("FiqExit0:"); // also entry point for __ArmVectorFiqPostambleNoResched() |
520 asm("FiqExit0:"); // also entry point for __ArmVectorFiqPostambleNoResched() |
526 USER_MEMORY_GUARD_RESTORE(r12,lr); |
521 USER_MEMORY_GUARD_RESTORE(r12,lr); |
|
522 |
527 #ifndef BTRACE_CPU_USAGE |
523 #ifndef BTRACE_CPU_USAGE |
528 ERRATUM_353494_MODE_CHANGE(,r11); |
524 ERRATUM_353494_MODE_CHANGE(,r11); |
529 asm("ldmfd sp!, {pc}^ "); // return from interrupt |
525 asm("ldmfd sp!, {pc}^ "); // return from interrupt |
530 #else |
526 #else |
531 asm("ldrb r8, [r9,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter)); |
527 asm("ldrb r8, [r9,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter)); |
532 asm("mov r10, #%a0" : : "i" ((TInt)(BTrace::ECpuUsage<<BTrace::ECategoryIndex*8)+(BTrace::EFiqEnd<<BTrace::ESubCategoryIndex*8)) ); |
528 asm("mov r10, #%a0" : : "i" ((TInt)(BTrace::ECpuUsage<<BTrace::ECategoryIndex*8)+(BTrace::EFiqEnd<<BTrace::ESubCategoryIndex*8)) ); |
533 asm("adr lr, FiqTraceExit0"); |
529 asm("adr lr, FiqTraceExit0"); |
534 asm("cmp r8, #0"); |
530 asm("cmp r8, #0"); |
535 ERRATUM_353494_MODE_CHANGE(eq,r8); |
531 ERRATUM_353494_MODE_CHANGE(eq,r8); |
536 asm("ldmeqfd sp!, {pc}^ "); // return from interrupt if trace not enabled |
532 asm("ldmeqfd sp!, {pc}^ "); // return from interrupt if trace not enabled |
537 asm("stmfd sp!, {r0-r3} "); |
533 asm("stmfd sp!, {r0-r3} "); |
538 asm("add r0, r10, #%a0" : : "i" ((TInt)4 ) ); |
534 asm("add r0, r10, #%a0" : : "i" ((TInt)4 ) ); |
539 asm("ldr pc, [r9,#%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); |
535 asm("ldr pc, [r9,#%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler)); |
540 asm("FiqTraceExit0:"); |
536 asm("FiqTraceExit0:"); |
541 ERRATUM_353494_MODE_CHANGE(,r3); |
537 ERRATUM_353494_MODE_CHANGE(,r3); |
542 asm("ldmfd sp!, {r0-r3,pc}^ "); // return from interrupt |
538 asm("ldmfd sp!, {r0-r3,pc}^ "); // return from interrupt |
543 #endif |
539 #endif |
544 |
540 |
545 asm("__TheScheduler: "); |
541 asm("__TheScheduler: "); |
546 asm(".word TheScheduler "); |
542 asm(".word TheScheduler "); |
547 } |
543 } |
690 asm("ldmib r3, {r13,r14}^ "); // restore sp_usr and lr_usr |
686 asm("ldmib r3, {r13,r14}^ "); // restore sp_usr and lr_usr |
691 asm("add r1, r3, #12 "); // r3 points to saved r0-r3,r12,pc |
687 asm("add r1, r3, #12 "); // r3 points to saved r0-r3,r12,pc |
692 asm("mov r3, #0xd3 "); |
688 asm("mov r3, #0xd3 "); |
693 asm("msr cpsr, r3 "); // mode_svc, all interrupts off |
689 asm("msr cpsr, r3 "); // mode_svc, all interrupts off |
694 asm("msr spsr, r12 "); // restore spsr_svc |
690 asm("msr spsr, r12 "); // restore spsr_svc |
695 #ifdef __CHECK_LOCK_STATE__ |
691 asm("tst r0, #0x0f "); // check if exception in mode_usr |
696 asm("tst r0, #0x0f "); |
|
697 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571622_FIXED) |
692 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571622_FIXED) |
698 asm("nop "); // ARM Cortex-A9 MPCore erratum 571622 workaround |
693 asm("nop "); // ARM Cortex-A9 MPCore erratum 571622 workaround |
699 asm("nop "); // Insert nops so branch doesn't occur in 2nd or 3rd position after a msr spsr |
694 asm("nop "); // Insert nops so branch doesn't occur in 2nd or 3rd position after a msr spsr |
700 #endif |
695 #endif |
|
696 #ifdef __CHECK_LOCK_STATE__ |
701 asm("bleq " CSM_Z14CheckLockStatev); |
697 asm("bleq " CSM_Z14CheckLockStatev); |
702 #endif |
698 asm("tst r0, #0x0f "); // recheck if exception in mode_usr |
703 asm("tst r0, #0x0f "); // check if exception in mode_usr |
699 #endif |
|
700 asm("bne 1f "); |
|
701 |
704 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
702 #ifdef __USER_MEMORY_GUARDS_ENABLED__ |
705 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571622_FIXED) |
|
706 asm("nop "); // ARM Cortex-A9 MPCore erratum 571622 workaround |
|
707 asm("nop "); // Insert nops so branch doesn't occur in 2nd or 3rd position after a msr spsr |
|
708 #endif |
|
709 asm("bne 1f "); |
|
710 USER_MEMORY_GUARD_ON(,lr,r12); |
703 USER_MEMORY_GUARD_ON(,lr,r12); |
711 asm("tst lr, #0xc0000000 "); // user memory enabled? |
704 asm("tst lr, #0xc0000000 "); // user memory enabled? |
712 asm("adrne lr, 2f "); // yes - enable it after callbacks |
705 asm("adrne lr, 2f "); // yes - enable it after callbacks |
|
706 #endif |
713 asm("adreq lr, 1f "); // no - leave it disabled after callbacks |
707 asm("adreq lr, 1f "); // no - leave it disabled after callbacks |
714 asm("mov r3, #0 "); |
708 asm("mov r3, #0 "); |
715 asm("b callUserModeCallbacks2 "); // call user-mode callbacks |
709 asm("b callUserModeCallbacks2 "); // call user-mode callbacks |
716 asm("2: "); |
710 asm("2: "); |
717 USER_MEMORY_GUARD_OFF(,lr,lr); |
711 USER_MEMORY_GUARD_OFF(,lr,lr); |
|
712 |
718 asm("1: "); |
713 asm("1: "); |
719 #else |
|
720 asm("mov r3, #0 "); |
|
721 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571622_FIXED) |
|
722 asm("nop "); // ARM Cortex-A9 MPCore erratum 571622 workaround |
|
723 // Insert nops so branch doesn't occur in 2nd or 3rd position after a msr spsr |
|
724 #endif |
|
725 asm("bleq callUserModeCallbacks2 "); // call user-mode callbacks |
|
726 #endif |
|
727 asm("tst r0, #0x0f "); // check if exception in mode_usr |
714 asm("tst r0, #0x0f "); // check if exception in mode_usr |
728 asm("mov r3, #%a0 " : : "i" ((TInt)NThread::EContextUndefined)); |
715 asm("mov r3, #%a0 " : : "i" ((TInt)NThread::EContextUndefined)); |
729 asm("streqb r3, [r2, #%a0]" : : "i" _FOFF(NThread,iSpare3)); // if so, set iUserContextType = EContextUndefined |
716 asm("streqb r3, [r2, #%a0]" : : "i" _FOFF(NThread,iSpare3)); // if so, set iUserContextType = EContextUndefined |
730 asm("add sp, r1, #24 "); // restore mode_svc stack balance |
717 asm("add sp, r1, #24 "); // restore mode_svc stack balance |
731 asm("mov r2, #0xd7 "); |
718 asm("mov r2, #0xd7 "); |