| 0 |      1 | // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
 | 
|  |      2 | // All rights reserved.
 | 
|  |      3 | // This component and the accompanying materials are made available
 | 
|  |      4 | // under the terms of the License "Eclipse Public License v1.0"
 | 
|  |      5 | // which accompanies this distribution, and is available
 | 
|  |      6 | // at the URL "http://www.eclipse.org/legal/epl-v10.html".
 | 
|  |      7 | //
 | 
|  |      8 | // Initial Contributors:
 | 
|  |      9 | // Nokia Corporation - initial contribution.
 | 
|  |     10 | //
 | 
|  |     11 | // Contributors:
 | 
|  |     12 | //
 | 
|  |     13 | // Description:
 | 
|  |     14 | // e32\nkernsmp\nk_irq.cpp
 | 
|  |     15 | // 
 | 
|  |     16 | //
 | 
|  |     17 | 
 | 
|  |     18 | /**
 | 
|  |     19 |  @file
 | 
|  |     20 |  @internalTechnology
 | 
|  |     21 | */
 | 
|  |     22 | 
 | 
|  |     23 | #include <e32cmn.h>
 | 
|  |     24 | #include <e32cmn_private.h>
 | 
|  |     25 | #include "nk_priv.h"
 | 
|  |     26 | #include <nk_irq.h>
 | 
|  |     27 | 
 | 
|  |     28 | NIrq		Irq[NK_MAX_IRQS];
 | 
|  |     29 | NIrqHandler	Handlers[NK_MAX_IRQ_HANDLERS];
 | 
|  |     30 | NIrqHandler* NIrqHandler::FirstFree;
 | 
|  |     31 | 
 | 
|  |     32 | extern "C" void send_irq_ipi(TSubScheduler*);
 | 
|  |     33 | 
 | 
|  |     34 | void StepCookie(volatile TUint16& p, TInt n)
 | 
|  |     35 | 	{
 | 
|  |     36 | 	TUint32 x = p<<17;
 | 
|  |     37 | 	while(n--)
 | 
|  |     38 | 		{
 | 
|  |     39 | 		TUint32 y = x;
 | 
|  |     40 | 		x<<=1;
 | 
|  |     41 | 		y^=x;
 | 
|  |     42 | 		x |= ((y>>31)<<17);
 | 
|  |     43 | 		}
 | 
|  |     44 | 	p = (TUint16)(x>>17);
 | 
|  |     45 | 	}
 | 
|  |     46 | 
 | 
|  |     47 | NIrq::NIrq()
 | 
|  |     48 | 	:	iNIrqLock(TSpinLock::EOrderNIrq)
 | 
|  |     49 | 	{
 | 
|  |     50 | 	iIState = EWait;
 | 
|  |     51 | 	iEventsPending = 0;
 | 
|  |     52 | 	iEnabledEvents = 0;
 | 
|  |     53 | 	iHwId = 0;
 | 
|  |     54 | 	iX = 0;
 | 
|  |     55 | 	}
 | 
|  |     56 | 
 | 
|  |     57 | TInt NIrq::BindRaw(NIsr aIsr, TAny* aPtr)
 | 
|  |     58 | 	{
 | 
|  |     59 | 	// Call only from thread context
 | 
|  |     60 | 	TInt r = KErrNone;
 | 
|  |     61 | 	Wait();
 | 
|  |     62 | 	iNIrqLock.LockOnly();
 | 
|  |     63 | 	if (iStaticFlags & EShared)
 | 
|  |     64 | 		{
 | 
|  |     65 | 		r = KErrAccessDenied;
 | 
|  |     66 | 		goto error;
 | 
|  |     67 | 		}
 | 
|  |     68 | 	if ( (iIState & ERaw) || !iHandlers.IsEmpty())
 | 
|  |     69 | 		{
 | 
|  |     70 | 		r = KErrInUse;
 | 
|  |     71 | 		goto error;
 | 
|  |     72 | 		}
 | 
|  |     73 | 	iHandlers.iA.iNext = (SDblQueLink*)aIsr;
 | 
|  |     74 | 	iHandlers.iA.iPrev = (SDblQueLink*)aPtr;
 | 
|  |     75 | 	__e32_atomic_ior_rel32(&iIState, ERaw);
 | 
|  |     76 | error:
 | 
|  |     77 | 	iNIrqLock.UnlockOnly();
 | 
|  |     78 | 	Done();
 | 
|  |     79 | 	return r;
 | 
|  |     80 | 	}
 | 
|  |     81 | 
 | 
|  |     82 | TInt NIrq::UnbindRaw()
 | 
|  |     83 | 	{
 | 
|  |     84 | 	// Call only from thread context
 | 
|  |     85 | 	TInt r = DisableRaw(TRUE);
 | 
|  |     86 | 	if (r != KErrNone)
 | 
|  |     87 | 		return r;
 | 
|  |     88 | 	Wait();
 | 
|  |     89 | 	iNIrqLock.LockOnly();
 | 
|  |     90 | 	if (iIState & ERaw)
 | 
|  |     91 | 		{
 | 
|  |     92 | 		iHandlers.iA.iNext = 0;
 | 
|  |     93 | 		iHandlers.iA.iPrev = 0;
 | 
|  |     94 | 		++iGeneration;	// release anyone still waiting in Disable()
 | 
|  |     95 | 		__e32_atomic_and_rel32(&iIState, ~(ERaw|EUnbind));
 | 
|  |     96 | 		}
 | 
|  |     97 | 	iNIrqLock.UnlockOnly();
 | 
|  |     98 | 	Done();
 | 
|  |     99 | 	return r;
 | 
|  |    100 | 	}
 | 
|  |    101 | 
 | 
|  |    102 | TInt NIrq::DisableRaw(TBool aUnbind)
 | 
|  |    103 | 	{
 | 
|  |    104 | 	TBool wait = FALSE;
 | 
|  |    105 | 	TInt r = KErrNone;
 | 
|  |    106 | 	TInt irq = __SPIN_LOCK_IRQSAVE(iNIrqLock);
 | 
|  |    107 | 	if (!(iIState & ERaw))
 | 
|  |    108 | 		r = KErrGeneral;
 | 
|  |    109 | 	else
 | 
|  |    110 | 		{
 | 
|  |    111 | 		wait = TRUE;
 | 
|  |    112 | 		if (aUnbind)
 | 
|  |    113 | 			__e32_atomic_ior_acq32(&iIState, EUnbind);
 | 
|  |    114 | 		if (!(iEnabledEvents & 1))
 | 
|  |    115 | 			{
 | 
|  |    116 | 			iEnabledEvents |= 1;
 | 
|  |    117 | 			HwDisable();
 | 
|  |    118 | //			wait = TRUE;
 | 
|  |    119 | 			}
 | 
|  |    120 | 		}
 | 
|  |    121 | 	__SPIN_UNLOCK_IRQRESTORE(iNIrqLock,irq);
 | 
|  |    122 | 	TInt c = NKern::CurrentContext();
 | 
|  |    123 | 	if (wait && c!=NKern::EInterrupt)
 | 
|  |    124 | 		{
 | 
|  |    125 | 		// wait for currently running handler to finish or interrupt to be reenabled
 | 
|  |    126 | 		if (c==NKern::EThread)
 | 
|  |    127 | 			NKern::ThreadEnterCS();
 | 
|  |    128 | 		HwWaitCpus();	// ensure other CPUs have had a chance to accept any outstanding interrupts
 | 
|  |    129 | 		TUint32 g = iGeneration;
 | 
|  |    130 | 		while ( ((iIState >> 16) || HwPending()) && (iGeneration == g))
 | 
|  |    131 | 			{
 | 
|  |    132 | 			__chill();
 | 
|  |    133 | 			}
 | 
|  |    134 | 		if (c==NKern::EThread)
 | 
|  |    135 | 			NKern::ThreadLeaveCS();
 | 
|  |    136 | 		}
 | 
|  |    137 | 	return r;
 | 
|  |    138 | 	}
 | 
|  |    139 | 
 | 
|  |    140 | TInt NIrq::EnableRaw()
 | 
|  |    141 | 	{
 | 
|  |    142 | 	TInt r = KErrNone;
 | 
|  |    143 | 	TInt irq = __SPIN_LOCK_IRQSAVE(iNIrqLock);
 | 
|  |    144 | 	if (!(iIState & ERaw))
 | 
|  |    145 | 		r = KErrGeneral;
 | 
|  |    146 | 	else if (iIState & EUnbind)
 | 
|  |    147 | 		r = KErrNotReady;
 | 
|  |    148 | 	else if (iEnabledEvents & 1)
 | 
|  |    149 | 		{
 | 
|  |    150 | 		iEnabledEvents = 0;
 | 
|  |    151 | 		HwEnable();
 | 
|  |    152 | 		++iGeneration;
 | 
|  |    153 | 		}
 | 
|  |    154 | 	__SPIN_UNLOCK_IRQRESTORE(iNIrqLock,irq);
 | 
|  |    155 | 	return r;
 | 
|  |    156 | 	}
 | 
|  |    157 | 
 | 
|  |    158 | TInt NIrq::Bind(NIrqHandler* aH)
 | 
|  |    159 | 	{
 | 
|  |    160 | 	// Call only from thread context
 | 
|  |    161 | 	TInt r = KErrInUse;
 | 
|  |    162 | 	Wait();
 | 
|  |    163 | 	if (!(iIState & ERaw))
 | 
|  |    164 | 		{
 | 
|  |    165 | 		r = KErrNone;
 | 
|  |    166 | 		TBool empty = iHandlers.IsEmpty();
 | 
|  |    167 | 		TBool shared = iStaticFlags & EShared;
 | 
|  |    168 | 		TBool exclusive = iIState & NIrqHandler::EExclusive;
 | 
|  |    169 | 		if (!empty)
 | 
|  |    170 | 			{
 | 
|  |    171 | 			if (!shared || exclusive)
 | 
|  |    172 | 				{
 | 
|  |    173 | 				r = KErrAccessDenied;
 | 
|  |    174 | 				goto error;
 | 
|  |    175 | 				}
 | 
|  |    176 | 			NIrqHandler* h = _LOFF(iHandlers.First(), NIrqHandler, iIrqLink);
 | 
|  |    177 | 			if (h->iHState & NIrqHandler::EExclusive)
 | 
|  |    178 | 				{
 | 
|  |    179 | 				r = KErrAccessDenied;
 | 
|  |    180 | 				goto error;
 | 
|  |    181 | 				}
 | 
|  |    182 | 			}
 | 
|  |    183 | 		aH->iIrq = this;
 | 
|  |    184 | 		iHandlers.Add(&aH->iIrqLink);
 | 
|  |    185 | 		}
 | 
|  |    186 | error:
 | 
|  |    187 | 	Done();
 | 
|  |    188 | 	return r;
 | 
|  |    189 | 	}
 | 
|  |    190 | 
 | 
|  |    191 | void NIrq::HwIsr()
 | 
|  |    192 | 	{
 | 
|  |    193 | 	TRACE_IRQ12(16, this, iVector, iIState);
 | 
|  |    194 | 	TBool eoi_done = FALSE;
 | 
|  |    195 | 	TUint32 rcf0 = EnterIsr();		// for initial run count
 | 
|  |    196 | 	TUint32 rcf1 = iIState;			// might have changed while we were waiting in EnterIsr()
 | 
|  |    197 | 	if (rcf1 & ERaw)
 | 
|  |    198 | 		{
 | 
|  |    199 | 		if (!(rcf1 & EUnbind))
 | 
|  |    200 | 			{
 | 
|  |    201 | 			NIsr f = (NIsr)iHandlers.iA.iNext;
 | 
|  |    202 | 			TAny* p = iHandlers.iA.iPrev;
 | 
|  |    203 | 			(*f)(p);
 | 
|  |    204 | 			}
 | 
|  |    205 | 		HwEoi();
 | 
|  |    206 | 		IsrDone();
 | 
|  |    207 | 		return;
 | 
|  |    208 | 		}
 | 
|  |    209 | 	if (rcf0 >> 16)
 | 
|  |    210 | 		{
 | 
|  |    211 | 		HwEoi();
 | 
|  |    212 | 		return;
 | 
|  |    213 | 		}
 | 
|  |    214 | 	if (!(iStaticFlags & ELevel))
 | 
|  |    215 | 		{
 | 
|  |    216 | 		eoi_done = TRUE;
 | 
|  |    217 | 		HwEoi();
 | 
|  |    218 | 		}
 | 
|  |    219 | 	do	{
 | 
|  |    220 | 		// Handler list can't be touched now
 | 
|  |    221 | 		SDblQueLink* anchor = &iHandlers.iA;
 | 
|  |    222 | 		SDblQueLink* p = anchor->iNext;
 | 
|  |    223 | 		while (p != anchor)
 | 
|  |    224 | 			{
 | 
|  |    225 | 			NIrqHandler* h = _LOFF(p, NIrqHandler, iIrqLink);
 | 
|  |    226 | 			h->Activate(1);
 | 
|  |    227 | 			p = p->iNext;
 | 
|  |    228 | 			}
 | 
|  |    229 | 		if (!eoi_done)
 | 
|  |    230 | 			{
 | 
|  |    231 | 			eoi_done = TRUE;
 | 
|  |    232 | 			HwEoi();
 | 
|  |    233 | 			}
 | 
|  |    234 | 		if ((iStaticFlags & ELevel) && iEventsPending)
 | 
|  |    235 | 			{
 | 
|  |    236 | 			// For a level triggered interrupt make sure interrupt is disabled until
 | 
|  |    237 | 			// all pending event handlers have run, to avoid a continuous interrupt.
 | 
|  |    238 | 			TInt irq = __SPIN_LOCK_IRQSAVE(iNIrqLock);
 | 
|  |    239 | 			if (iEventsPending)
 | 
|  |    240 | 				{
 | 
|  |    241 | 				iEnabledEvents |= 1;
 | 
|  |    242 | 				HwDisable();
 | 
|  |    243 | 				}
 | 
|  |    244 | 			__SPIN_UNLOCK_IRQRESTORE(iNIrqLock,irq);
 | 
|  |    245 | 			}
 | 
|  |    246 | 		} while (IsrDone());
 | 
|  |    247 | 	}
 | 
|  |    248 | 
 | 
|  |    249 | void NIrqHandler::Activate(TInt aCount)
 | 
|  |    250 | 	{
 | 
|  |    251 | 	TUint32 orig = DoActivate(aCount);
 | 
|  |    252 | 	TRACE_IRQ12(17, this, orig, aCount);
 | 
|  |    253 | 	if (orig & (EDisable|EUnbind|EActive))
 | 
|  |    254 | 		return;	// disabled or already active
 | 
|  |    255 | 	if (iTied)
 | 
|  |    256 | 		{
 | 
|  |    257 | 		// we need to enforce mutual exclusion between the event handler
 | 
|  |    258 | 		// and the tied thread or thread group, so the event handler must
 | 
|  |    259 | 		// run on the CPU to which the thread or group is currently attached
 | 
|  |    260 | 		// once the event has been attached to that CPU, the thread/group
 | 
|  |    261 | 		// can't be migrated until the event handler completes.
 | 
|  |    262 | 		// need a pending event count for the tied thread/group
 | 
|  |    263 | 		// so we know when the thread/group can be migrated
 | 
|  |    264 | 		TInt tied_cpu = iTied->BeginTiedEvent();
 | 
|  |    265 | 		TInt this_cpu = NKern::CurrentCpu();
 | 
|  |    266 | 		if (tied_cpu != this_cpu)
 | 
|  |    267 | 			{
 | 
|  |    268 | 			__e32_atomic_add_acq32(&iIrq->iEventsPending, 1);
 | 
|  |    269 | 			TheSubSchedulers[tied_cpu].QueueEventAndKick(this);
 | 
|  |    270 | 			// FIXME: move IRQ over to tied CPU if this is the only handler for that IRQ
 | 
|  |    271 | 			//			what to do about shared IRQs?
 | 
|  |    272 | 			return;
 | 
|  |    273 | 			}
 | 
|  |    274 | 		}
 | 
|  |    275 | 	// event can run on this CPU so run it now
 | 
|  |    276 | 	if (aCount)
 | 
|  |    277 | 		{
 | 
|  |    278 | 		orig = EventBegin();
 | 
|  |    279 | 		TRACE_IRQ8(18, this, orig);
 | 
|  |    280 | 		(*iFn)(iPtr);
 | 
|  |    281 | 		orig = EventDone();
 | 
|  |    282 | 		TRACE_IRQ8(19, this, orig);
 | 
|  |    283 | 		if (!(orig & EActive))
 | 
|  |    284 | 			{
 | 
|  |    285 | 			if (iTied)
 | 
|  |    286 | 				iTied->EndTiedEvent();
 | 
|  |    287 | 			return;	// that was last occurrence or event now disabled
 | 
|  |    288 | 			}
 | 
|  |    289 | 		}
 | 
|  |    290 | 	__e32_atomic_add_ord32(&iIrq->iEventsPending, 1);
 | 
|  |    291 | //	add event to this cpu
 | 
|  |    292 | 	SubScheduler().QueueEventAndKick(this);
 | 
|  |    293 | 	}
 | 
|  |    294 | 
 | 
|  |    295 | 
 | 
|  |    296 | NIrqHandler::NIrqHandler()
 | 
|  |    297 | 	{
 | 
|  |    298 | 	iIrqLink.iNext = 0;
 | 
|  |    299 | 	iIrq = 0;
 | 
|  |    300 | 	iTied = 0;
 | 
|  |    301 | 	iHState = EDisable|EBind|ENotReady|EEventHandlerIrq;
 | 
|  |    302 | 	iFn = 0;
 | 
|  |    303 | 	iPtr = 0;
 | 
|  |    304 | 	memclr(iNIrqHandlerSpare, sizeof(iNIrqHandlerSpare));
 | 
|  |    305 | 	}
 | 
|  |    306 | 
 | 
|  |    307 | void NIrqHandler::Free()
 | 
|  |    308 | 	{
 | 
|  |    309 | 	NKern::Lock();
 | 
|  |    310 | 	NEventHandler::TiedLock.LockOnly();
 | 
|  |    311 | 	if (!iTied)	// Only free if iTied has been cleared
 | 
|  |    312 | 		{
 | 
|  |    313 | 		iIrqLink.iNext = FirstFree;
 | 
|  |    314 | 		FirstFree = this;
 | 
|  |    315 | 		}
 | 
|  |    316 | 	NEventHandler::TiedLock.UnlockOnly();
 | 
|  |    317 | 	NKern::Unlock();
 | 
|  |    318 | 	}
 | 
|  |    319 | 
 | 
|  |    320 | NIrqHandler* NIrqHandler::Alloc()
 | 
|  |    321 | 	{
 | 
|  |    322 | 	NKern::Lock();
 | 
|  |    323 | 	NEventHandler::TiedLock.LockOnly();
 | 
|  |    324 | 	NIrqHandler* p = FirstFree;
 | 
|  |    325 | 	if (p)
 | 
|  |    326 | 		FirstFree = (NIrqHandler*)p->iIrqLink.iNext;
 | 
|  |    327 | 	NEventHandler::TiedLock.UnlockOnly();
 | 
|  |    328 | 	NKern::Unlock();
 | 
|  |    329 | 	if (p)
 | 
|  |    330 | 		new (p) NIrqHandler();
 | 
|  |    331 | 	return p;
 | 
|  |    332 | 	}
 | 
|  |    333 | 
 | 
|  |    334 | TInt NIrqHandler::Enable(TInt aHandle)
 | 
|  |    335 | 	{
 | 
|  |    336 | 	// call from any context
 | 
|  |    337 | 	TBool reactivate = FALSE;
 | 
|  |    338 | 	TInt r = KErrNotReady;
 | 
|  |    339 | 	NIrq* pI = iIrq;
 | 
|  |    340 | 	if (!pI)
 | 
|  |    341 | 		return KErrNotReady;
 | 
|  |    342 | 	TInt irq = __SPIN_LOCK_IRQSAVE(pI->iNIrqLock);	// OK since NIrq's are never deleted
 | 
|  |    343 | 	if (iIrq==pI && TUint(aHandle)==iHandle)	// check handler not unbound
 | 
|  |    344 | 		{
 | 
|  |    345 | 		TUint32 orig = DoSetEnabled();	// clear EDisable and EBind provided neither EUnbind nor ENotReady set
 | 
|  |    346 | 		if (!(orig & (EUnbind|ENotReady)))
 | 
|  |    347 | 			{
 | 
|  |    348 | 			r = KErrNone;
 | 
|  |    349 | 			if (orig & EDisable)	// check not already enabled
 | 
|  |    350 | 				{
 | 
|  |    351 | 				++iGeneration;
 | 
|  |    352 | 				TUint32 n = pI->iEnabledEvents;
 | 
|  |    353 | 				pI->iEnabledEvents += 2;
 | 
|  |    354 | 				if (n==0)
 | 
|  |    355 | 					pI->HwEnable();	// enable HW interrupt if this is first handler to be enabled
 | 
|  |    356 | 				if ((orig >> 16) && !(orig & EActive))
 | 
|  |    357 | 					// replay remembered interrupt(s)
 | 
|  |    358 | 					reactivate = TRUE;
 | 
|  |    359 | 				}
 | 
|  |    360 | 			}
 | 
|  |    361 | 		}
 | 
|  |    362 | 	if (reactivate)
 | 
|  |    363 | 		{
 | 
|  |    364 | 		pI->iNIrqLock.UnlockOnly();
 | 
|  |    365 | 		Activate(0);
 | 
|  |    366 | 		pI->iNIrqLock.LockOnly();
 | 
|  |    367 | 		}
 | 
|  |    368 | 	__SPIN_UNLOCK_IRQRESTORE(pI->iNIrqLock,irq);
 | 
|  |    369 | 	return r;
 | 
|  |    370 | 	}
 | 
|  |    371 | 
 | 
|  |    372 | TInt NIrqHandler::Disable(TBool aUnbind, TInt aHandle)
 | 
|  |    373 | 	{
 | 
|  |    374 | 	// call from any context
 | 
|  |    375 | 	NIrq* pI = iIrq;
 | 
|  |    376 | 	if (!pI)
 | 
|  |    377 | 		return KErrGeneral;
 | 
|  |    378 | 	TInt irq = __SPIN_LOCK_IRQSAVE(pI->iNIrqLock);	// OK since NIrq's are never deleted
 | 
|  |    379 | 	if (iIrq != pI || TUint(aHandle)!=iHandle)	// check handler not unbound
 | 
|  |    380 | 		{
 | 
|  |    381 | 		__SPIN_UNLOCK_IRQRESTORE(pI->iNIrqLock,irq);
 | 
|  |    382 | 		return KErrGeneral;
 | 
|  |    383 | 		}
 | 
|  |    384 | 	TInt r = aUnbind ? KErrGeneral : KErrNone;
 | 
|  |    385 | 	TUint32 f = aUnbind ? EUnbind|EDisable : EDisable;
 | 
|  |    386 | 	TUint32 orig = __e32_atomic_ior_acq32(&iHState, f);
 | 
|  |    387 | 	TUint32 g = iGeneration;
 | 
|  |    388 | 	if (!(orig & EDisable))	// check not already disabled
 | 
|  |    389 | 		{
 | 
|  |    390 | 		pI->iEnabledEvents -= 2;
 | 
|  |    391 | 		if (!pI->iEnabledEvents)
 | 
|  |    392 | 			pI->HwDisable();	// disable HW interrupt if no more enabled handlers
 | 
|  |    393 | 		}
 | 
|  |    394 | 	if (aUnbind && !(orig & EUnbind))
 | 
|  |    395 | 		{
 | 
|  |    396 | 		volatile TUint16& cookie = *(volatile TUint16*)(((TUint8*)&iHandle)+2);
 | 
|  |    397 | 		StepCookie(cookie, 1);
 | 
|  |    398 | 		r = KErrNone;
 | 
|  |    399 | 		}
 | 
|  |    400 | 	__SPIN_UNLOCK_IRQRESTORE(pI->iNIrqLock,irq);
 | 
|  |    401 | 	if (NKern::CurrentContext() != NKern::EInterrupt)
 | 
|  |    402 | 		{
 | 
|  |    403 | 		// wait for currently running handler to finish or interrupt to be reenabled
 | 
|  |    404 |  		while ((iHState & EActive) && (iGeneration == g))
 | 
|  |    405 | 			{
 | 
|  |    406 | 			__chill();
 | 
|  |    407 | 			}
 | 
|  |    408 | 		}
 | 
|  |    409 | 	return r;
 | 
|  |    410 | 	}
 | 
|  |    411 | 
 | 
|  |    412 | TInt NIrqHandler::Unbind(TInt aId, NSchedulable* aTied)
 | 
|  |    413 | 	{
 | 
|  |    414 | 	TInt r = Disable(TRUE, aId);	// waits for any current activation of ISR to finish
 | 
|  |    415 | 	if (r==KErrNone || aTied)	// returns KErrGeneral if someone else already unbound this interrupt handler
 | 
|  |    416 | 		{
 | 
|  |    417 | 		// Possible race condition here between tied thread termination and interrupt unbind.
 | 
|  |    418 | 		// We need to be sure that the iTied field must be NULL before the tied thread/group
 | 
|  |    419 | 		// is destroyed.
 | 
|  |    420 | 		NKern::Lock();
 | 
|  |    421 | 		NEventHandler::TiedLock.LockOnly();	// this guarantees pH->iTied cannot change
 | 
|  |    422 | 		NSchedulable* t = iTied;
 | 
|  |    423 | 		if (t)
 | 
|  |    424 | 			{
 | 
|  |    425 | 			// We need to guarantee the object pointed to by t cannot be deleted until we
 | 
|  |    426 | 			// have finished with it.
 | 
|  |    427 | 			t->AcqSLock();
 | 
|  |    428 | 			if (iTiedLink.iNext)
 | 
|  |    429 | 				{
 | 
|  |    430 | 				iTiedLink.Deque();
 | 
|  |    431 | 				iTiedLink.iNext = 0;
 | 
|  |    432 | 				iTied = 0;
 | 
|  |    433 | 				}
 | 
|  |    434 | 			if (aTied && aTied==t)
 | 
|  |    435 | 				iTied = 0;
 | 
|  |    436 | 			t->RelSLock();
 | 
|  |    437 | 			}
 | 
|  |    438 | 		NEventHandler::TiedLock.UnlockOnly();
 | 
|  |    439 | 		NKern::Unlock();
 | 
|  |    440 | 		}
 | 
|  |    441 | 	if (r==KErrNone)
 | 
|  |    442 | 		{
 | 
|  |    443 | 		DoUnbind();
 | 
|  |    444 | 		Free();
 | 
|  |    445 | 		}
 | 
|  |    446 | 	return r;
 | 
|  |    447 | 	}
 | 
|  |    448 | 
 | 
|  |    449 | void NIrqHandler::DoUnbind()
 | 
|  |    450 | 	{
 | 
|  |    451 | 	// Call only from thread context
 | 
|  |    452 | 	NIrq* pI = iIrq;
 | 
|  |    453 | 	pI->Wait();
 | 
|  |    454 | 	iIrqLink.Deque();
 | 
|  |    455 | 	iIrq = 0;
 | 
|  |    456 | 	pI->Done();
 | 
|  |    457 | 	}
 | 
|  |    458 | 
 | 
|  |    459 | TBool TSubScheduler::QueueEvent(NEventHandler* aEvent)
 | 
|  |    460 | 	{
 | 
|  |    461 | 	TInt irq = __SPIN_LOCK_IRQSAVE(iEventHandlerLock);
 | 
|  |    462 | 	TBool pending = iEventHandlersPending;
 | 
|  |    463 | 	iEventHandlersPending = TRUE;
 | 
|  |    464 | 	iEventHandlers.Add(aEvent);
 | 
|  |    465 | 	__SPIN_UNLOCK_IRQRESTORE(iEventHandlerLock,irq);
 | 
|  |    466 | 	return !pending;
 | 
|  |    467 | 	}
 | 
|  |    468 | 
 | 
|  |    469 | void TSubScheduler::QueueEventAndKick(NEventHandler* aEvent)
 | 
|  |    470 | 	{
 | 
|  |    471 | 	if (QueueEvent(aEvent))
 | 
|  |    472 | 		{
 | 
|  |    473 | 		// extra barrier ?
 | 
|  |    474 | 		send_irq_ipi(this);
 | 
|  |    475 | 		}
 | 
|  |    476 | 	}
 | 
|  |    477 | 
 | 
|  |    478 | extern "C" void run_event_handlers(TSubScheduler* aS)
 | 
|  |    479 | 	{
 | 
|  |    480 | 	while (aS->iEventHandlersPending)
 | 
|  |    481 | 		{
 | 
|  |    482 | 		TInt irq = __SPIN_LOCK_IRQSAVE(aS->iEventHandlerLock);
 | 
|  |    483 | 		if (aS->iEventHandlers.IsEmpty())
 | 
|  |    484 | 			{
 | 
|  |    485 | 			aS->iEventHandlersPending = FALSE;
 | 
|  |    486 | 			__SPIN_UNLOCK_IRQRESTORE(aS->iEventHandlerLock, irq);
 | 
|  |    487 | 			break;
 | 
|  |    488 | 			}
 | 
|  |    489 | 		NIrqHandler* h = (NIrqHandler*)aS->iEventHandlers.First()->Deque();
 | 
|  |    490 | 		if (aS->iEventHandlers.IsEmpty())
 | 
|  |    491 | 			aS->iEventHandlersPending = FALSE;
 | 
|  |    492 | 		TInt type = h->iHType;
 | 
|  |    493 | 		NSchedulable* tied = h->iTied;
 | 
|  |    494 | 		if (type == NEventHandler::EEventHandlerNTimer)
 | 
|  |    495 | 			{
 | 
|  |    496 | 			NEventFn f = h->iFn;
 | 
|  |    497 | 			TAny* p = h->iPtr;
 | 
|  |    498 | 			mb();	// make sure dequeue observed and iFn,iPtr,iTied sampled before state change observed
 | 
|  |    499 | 			h->i8888.iHState1 = NTimer::EIdle; // can't touch timer again after this
 | 
|  |    500 | 			__SPIN_UNLOCK_IRQRESTORE(aS->iEventHandlerLock, irq);
 | 
|  |    501 | 			(*f)(p);
 | 
|  |    502 | 			if (tied)
 | 
|  |    503 | 				tied->EndTiedEvent();
 | 
|  |    504 | 			continue;
 | 
|  |    505 | 			}
 | 
|  |    506 | 		__SPIN_UNLOCK_IRQRESTORE(aS->iEventHandlerLock, irq);
 | 
|  |    507 | 		TBool requeue = TRUE;
 | 
|  |    508 | 		switch (h->iHType)
 | 
|  |    509 | 			{
 | 
|  |    510 | 			case NEventHandler::EEventHandlerIrq:
 | 
|  |    511 | 				{
 | 
|  |    512 | 				TUint32 orig;
 | 
|  |    513 | 				// event can run on this CPU so run it now
 | 
|  |    514 | 				// if event tied, migration of tied thread/group will have been blocked
 | 
|  |    515 | 				orig = h->EventBegin();
 | 
|  |    516 | 				TRACE_IRQ8(20, h, orig);
 | 
|  |    517 | 				(*h->iFn)(h->iPtr);
 | 
|  |    518 | 				TRACE_IRQ4(21, h);
 | 
|  |    519 | 				if (!(h->iHState & NIrqHandler::ERunCountMask))	// if run count still nonzero, definitely still active
 | 
|  |    520 | 					{
 | 
|  |    521 | 					NIrq* pI = h->iIrq;
 | 
|  |    522 | 					irq = __SPIN_LOCK_IRQSAVE(pI->iNIrqLock);
 | 
|  |    523 | 					orig = h->EventDone();
 | 
|  |    524 | 					TRACE_IRQ8(22, h, orig);
 | 
|  |    525 | 					if (!(orig & NIrqHandler::EActive))
 | 
|  |    526 | 						{
 | 
|  |    527 | 						// handler is no longer active - can't touch it again
 | 
|  |    528 | 						// pI is OK since NIrq's are never deleted/reused
 | 
|  |    529 | 						requeue = FALSE;
 | 
|  |    530 | 						if (__e32_atomic_add_rel32(&pI->iEventsPending, TUint32(-1)) == 1)
 | 
|  |    531 | 							{
 | 
|  |    532 | 							if (pI->iEnabledEvents & 1)
 | 
|  |    533 | 								{
 | 
|  |    534 | 								pI->iEnabledEvents &= ~1;
 | 
|  |    535 | 								if (pI->iEnabledEvents)
 | 
|  |    536 | 									pI->HwEnable();
 | 
|  |    537 | 								}
 | 
|  |    538 | 							}
 | 
|  |    539 | 						}
 | 
|  |    540 | 					__SPIN_UNLOCK_IRQRESTORE(pI->iNIrqLock,irq);
 | 
|  |    541 | 					}
 | 
|  |    542 | 				break;
 | 
|  |    543 | 				}
 | 
|  |    544 | 			default:
 | 
|  |    545 | 				__KTRACE_OPT(KPANIC,DEBUGPRINT("h=%08x",h));
 | 
|  |    546 | 				__NK_ASSERT_ALWAYS(0);
 | 
|  |    547 | 			}
 | 
|  |    548 | 		if (tied && !requeue)
 | 
|  |    549 | 			{
 | 
|  |    550 | 			// If the tied thread/group has no more tied events outstanding
 | 
|  |    551 | 			// and has a migration pending, trigger the migration now.
 | 
|  |    552 | 			// Atomically change the tied_cpu to the target CPU here. An IDFC
 | 
|  |    553 | 			// can then effect the migration.
 | 
|  |    554 | 			// Note that the tied code can't run in parallel with us until
 | 
|  |    555 | 			// the tied_cpu is changed. However it could run as soon as the
 | 
|  |    556 | 			// tied_cpu is changed (e.g. if added to ready list after change)
 | 
|  |    557 | 			tied->EndTiedEvent();
 | 
|  |    558 | 			}
 | 
|  |    559 | 		if (requeue)
 | 
|  |    560 | 			{
 | 
|  |    561 | 			// still pending so put it back on the queue
 | 
|  |    562 | 			// leave interrupt disabled (if so) and migration of tied thread/group blocked
 | 
|  |    563 | 			aS->QueueEvent(h);
 | 
|  |    564 | 			}
 | 
|  |    565 | 		}
 | 
|  |    566 | 	}
 | 
|  |    567 | 
 | 
|  |    568 | /******************************************************************************
 | 
|  |    569 |  * Public interrupt management functions
 | 
|  |    570 |  ******************************************************************************/
 | 
|  |    571 | 
 | 
|  |    572 | void NKern::InterruptInit0()
 | 
|  |    573 | 	 {
 | 
|  |    574 | 	 TInt i;
 | 
|  |    575 | 	 TUint16 cookie = 1;
 | 
|  |    576 | 	 NIrqHandler::FirstFree = 0;
 | 
|  |    577 | 	 for (i=NK_MAX_IRQ_HANDLERS-1; i>=0; --i)
 | 
|  |    578 | 		 {
 | 
|  |    579 | 		 StepCookie(cookie, 61);
 | 
|  |    580 | 		 NIrqHandler* h = &::Handlers[i];
 | 
|  |    581 | 		__KTRACE_OPT(KBOOT,DEBUGPRINT("NIrqHandler[%d] at %08x", i, h));
 | 
|  |    582 | 		 h->iGeneration = 0;
 | 
|  |    583 | 		 h->iHandle = (cookie << 16) | i;
 | 
|  |    584 | 		 h->iIrqLink.iNext = NIrqHandler::FirstFree;
 | 
|  |    585 | 		 NIrqHandler::FirstFree = h;
 | 
|  |    586 | 		 }
 | 
|  |    587 | 	 NIrq::HwInit0();
 | 
|  |    588 | 	 }
 | 
|  |    589 | 
 | 
|  |    590 | EXPORT_C TInt NKern::InterruptInit(TInt aId, TUint32 aFlags, TInt aVector, TUint32 aHwId, TAny* aExt)
 | 
|  |    591 | 	{
 | 
|  |    592 | 	__KTRACE_OPT(KBOOT,DEBUGPRINT("NKII: ID=%02x F=%08x V=%03x HWID=%08x X=%08x", aId, aFlags, aVector, aHwId, aExt));
 | 
|  |    593 | 	TRACE_IRQ12(0, (aId|(aVector<<16)), aFlags, aHwId);
 | 
|  |    594 | 	if (TUint(aId) >= TUint(NK_MAX_IRQS))
 | 
|  |    595 |   		return KErrArgument;
 | 
|  |    596 | 	NIrq* pI = &Irq[aId];
 | 
|  |    597 | 	__KTRACE_OPT(KBOOT,DEBUGPRINT("NIrq[%02x] at %08x", aId, pI));
 | 
|  |    598 | 	TRACE_IRQ8(1, aId, pI);
 | 
|  |    599 | 	new (pI) NIrq;
 | 
|  |    600 | 	pI->iX = (NIrqX*)aExt;
 | 
|  |    601 | 	pI->iIndex = (TUint16)aId;
 | 
|  |    602 | 	pI->iHwId = aHwId;
 | 
|  |    603 | 	pI->iVector = aVector;
 | 
|  |    604 | 	pI->iStaticFlags = (TUint16)(aFlags & 0x13);
 | 
|  |    605 | 	if (aFlags & NKern::EIrqInit_Count)
 | 
|  |    606 | 		pI->iIState |= NIrq::ECount;
 | 
|  |    607 | 	pI->HwInit();
 | 
|  |    608 | 	__e32_atomic_and_rel32(&pI->iIState, ~NIrq::EWait);
 | 
|  |    609 | 	return KErrNone;
 | 
|  |    610 | 	}
 | 
|  |    611 | 
 | 
|  |    612 | EXPORT_C TInt NKern::InterruptBind(TInt aId, NIsr aIsr, TAny* aPtr, TUint32 aFlags, NSchedulable* aTied)
 | 
|  |    613 | 	{
 | 
|  |    614 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIB: ID=%02x ISR=%08x(%08x) F=%08x T=%T", aId, aIsr, aPtr, aFlags, aTied));
 | 
|  |    615 | 	TRACE_IRQ12(2, aId, aIsr, aPtr);
 | 
|  |    616 | 	TRACE_IRQ12(3, aId, aFlags, aTied);
 | 
|  |    617 | 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::InterruptBind");
 | 
|  |    618 | 	if (TUint(aId) >= TUint(NK_MAX_IRQS))
 | 
|  |    619 | 		{
 | 
|  |    620 | 		TRACE_IRQ8(4, aId, KErrArgument);
 | 
|  |    621 | 		return KErrArgument;
 | 
|  |    622 | 		}
 | 
|  |    623 | 	NIrq* pI = &Irq[aId];
 | 
|  |    624 | 	NIrqHandler* pH = 0;
 | 
|  |    625 | 	NSchedulable* pT = 0;
 | 
|  |    626 | 	if (aFlags & NKern::EIrqBind_Tied)
 | 
|  |    627 | 		{
 | 
|  |    628 | 		if (!aTied)
 | 
|  |    629 | 			aTied = NKern::CurrentThread();
 | 
|  |    630 | 		pT = aTied;
 | 
|  |    631 | 		}
 | 
|  |    632 | 	TInt r = KErrNoMemory;
 | 
|  |    633 | 	TInt handle = 0;
 | 
|  |    634 | 	NKern::ThreadEnterCS();
 | 
|  |    635 | 	if (!(aFlags & NKern::EIrqBind_Raw))
 | 
|  |    636 | 		{
 | 
|  |    637 | 		pH = NIrqHandler::Alloc();
 | 
|  |    638 | 		if (!pH)
 | 
|  |    639 | 			goto out;
 | 
|  |    640 | 		pH->iFn = aIsr;
 | 
|  |    641 | 		pH->iPtr = aPtr;
 | 
|  |    642 | 		__e32_atomic_add_ord32(&pH->iGeneration, 1);
 | 
|  |    643 | 		if (aFlags & EIrqBind_Exclusive)
 | 
|  |    644 | 			pH->iHState |= NIrqHandler::EExclusive;
 | 
|  |    645 | 		if (aFlags & EIrqBind_Count)
 | 
|  |    646 | 			pH->iHState |= NIrqHandler::ECount;
 | 
|  |    647 | 		r = pI->Bind(pH);
 | 
|  |    648 | 		if (r==KErrNone)
 | 
|  |    649 | 			{
 | 
|  |    650 | 			handle = pH->iHandle;
 | 
|  |    651 | 			// We assume that aTied cannot disappear entirely before we return
 | 
|  |    652 | 			if (pT)
 | 
|  |    653 | 				{
 | 
|  |    654 | 				NKern::Lock();
 | 
|  |    655 | 				r = pT->AddTiedEvent(pH);
 | 
|  |    656 | 				NKern::Unlock();
 | 
|  |    657 | 				}
 | 
|  |    658 | 			if (r!=KErrNone)
 | 
|  |    659 | 				{
 | 
|  |    660 | 				// unbind
 | 
|  |    661 | 				pH->DoUnbind();
 | 
|  |    662 | 				}
 | 
|  |    663 | 			}
 | 
|  |    664 | 		if (r!=KErrNone)
 | 
|  |    665 | 			pH->Free();
 | 
|  |    666 | 		}
 | 
|  |    667 | 	else
 | 
|  |    668 | 		{
 | 
|  |    669 | 		if (aFlags & NKern::EIrqBind_Tied)
 | 
|  |    670 | 			r = KErrNotSupported;
 | 
|  |    671 | 		else
 | 
|  |    672 | 			r = pI->BindRaw(aIsr, aPtr);
 | 
|  |    673 | 		}
 | 
|  |    674 | out:
 | 
|  |    675 | 	if (r==KErrNone)
 | 
|  |    676 | 		{
 | 
|  |    677 | 		// clear ENotReady so handler can be enabled
 | 
|  |    678 | 		__e32_atomic_and_rel32(&pH->iHState, ~NIrqHandler::ENotReady);
 | 
|  |    679 | 		r = handle;
 | 
|  |    680 | 		}
 | 
|  |    681 | 	NKern::ThreadLeaveCS();
 | 
|  |    682 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT("<NKIB: %08x", r));
 | 
|  |    683 | 	TRACE_IRQ8(4, aId, r);
 | 
|  |    684 | 	return r;
 | 
|  |    685 | 	}
 | 
|  |    686 | 
 | 
|  |    687 | TInt NIrq::FromHandle(TInt& aHandle, NIrq*& aIrq, NIrqHandler*& aHandler)
 | 
|  |    688 | 	{
 | 
|  |    689 | 	TRACE_IRQ4(5, aHandle);
 | 
|  |    690 | 	aIrq = 0;
 | 
|  |    691 | 	aHandler = 0;
 | 
|  |    692 | 	NIrqHandler* pH = 0;
 | 
|  |    693 | 	NIrqHandler* pH2 = 0;
 | 
|  |    694 | 	NIrq* pI = 0;
 | 
|  |    695 | 	SDblQueLink* anchor = 0;
 | 
|  |    696 | 	TUint32 i;
 | 
|  |    697 | 	TInt r = KErrArgument;
 | 
|  |    698 | 	if (aHandle & NKern::EIrqCookieMask)
 | 
|  |    699 | 		{
 | 
|  |    700 | 		i = aHandle & NKern::EIrqIndexMask;
 | 
|  |    701 | 		if (i>=NK_MAX_IRQ_HANDLERS)
 | 
|  |    702 | 			goto out;
 | 
|  |    703 | 		pH = &::Handlers[i];
 | 
|  |    704 | 		if (pH->iHandle != TUint(aHandle))
 | 
|  |    705 | 			goto out;
 | 
|  |    706 | 		aHandler = pH;
 | 
|  |    707 | 		aIrq = pH->iIrq;
 | 
|  |    708 | 		r = KErrNone;
 | 
|  |    709 | 		goto out;
 | 
|  |    710 | 		}
 | 
|  |    711 | 	if (TUint32(aHandle)>=NK_MAX_IRQS)
 | 
|  |    712 | 		goto out;
 | 
|  |    713 | 	pI = &::Irq[aHandle];
 | 
|  |    714 | 	if (pI->iIState & NIrq::ERaw)
 | 
|  |    715 | 		{
 | 
|  |    716 | 		aIrq = pI;
 | 
|  |    717 | 		r = KErrNone;
 | 
|  |    718 | 		goto out;
 | 
|  |    719 | 		}
 | 
|  |    720 | 	if (pI->iStaticFlags & NIrq::EShared)
 | 
|  |    721 | 		goto out;
 | 
|  |    722 | 	anchor = &pI->iHandlers.iA;
 | 
|  |    723 | 	pH = _LOFF(anchor->iNext, NIrqHandler, iIrqLink);
 | 
|  |    724 | 	i = pH - ::Handlers;
 | 
|  |    725 | 	if (i>=NK_MAX_IRQ_HANDLERS)
 | 
|  |    726 | 		goto out;
 | 
|  |    727 | 	pH2 = &::Handlers[i];
 | 
|  |    728 | 	if (pH2 != pH)
 | 
|  |    729 | 		goto out;
 | 
|  |    730 | 	if (pH->iIrq != pI || anchor->iPrev != anchor->iNext)
 | 
|  |    731 | 		goto out;
 | 
|  |    732 | 	aHandle = pH->iHandle;
 | 
|  |    733 | 	aHandler = pH;
 | 
|  |    734 | 	aIrq = pI;
 | 
|  |    735 | 	r = KErrNone;
 | 
|  |    736 | out:
 | 
|  |    737 | 	TRACE_IRQ4(6, r);
 | 
|  |    738 | 	TRACE_IRQ12(7, aHandle, aIrq, aHandler);
 | 
|  |    739 | 	return r;
 | 
|  |    740 | 	}
 | 
|  |    741 | 
 | 
|  |    742 | EXPORT_C TInt NKern::InterruptUnbind(TInt aId)
 | 
|  |    743 | 	{
 | 
|  |    744 | 	TRACE_IRQ4(8, aId);
 | 
|  |    745 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIU: ID=%08x", aId));
 | 
|  |    746 | 	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::InterruptBind");
 | 
|  |    747 | 	NIrq* pI;
 | 
|  |    748 | 	NIrqHandler* pH;
 | 
|  |    749 | 	TInt r = NIrq::FromHandle(aId, pI, pH);
 | 
|  |    750 | 	if (r!=KErrNone)
 | 
|  |    751 | 		return r;
 | 
|  |    752 | 	NKern::ThreadEnterCS();
 | 
|  |    753 | 	if (!pH)
 | 
|  |    754 | 		{
 | 
|  |    755 | 		// raw ISR
 | 
|  |    756 | 		r = pI->UnbindRaw();
 | 
|  |    757 | 		}
 | 
|  |    758 | 	else
 | 
|  |    759 | 		{
 | 
|  |    760 | 		r = pH->Unbind(aId, 0);
 | 
|  |    761 | 		}
 | 
|  |    762 | 	NKern::ThreadLeaveCS();
 | 
|  |    763 | 	TRACE_IRQ4(9, r);
 | 
|  |    764 | 	return r;
 | 
|  |    765 | 	}
 | 
|  |    766 | 
 | 
|  |    767 | EXPORT_C TInt NKern::InterruptEnable(TInt aId)
 | 
|  |    768 | 	{
 | 
|  |    769 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIE: ID=%08x", aId));
 | 
|  |    770 | 	TRACE_IRQ4(10, aId);
 | 
|  |    771 | 	NIrq* pI;
 | 
|  |    772 | 	NIrqHandler* pH;
 | 
|  |    773 | 	TInt r = NIrq::FromHandle(aId, pI, pH);
 | 
|  |    774 | 	if (r==KErrNone)
 | 
|  |    775 | 		r = pH ? pH->Enable(aId) : pI->EnableRaw();
 | 
|  |    776 | 	TRACE_IRQ4(11, r);
 | 
|  |    777 | 	return r;
 | 
|  |    778 | 	}
 | 
|  |    779 | 
 | 
|  |    780 | EXPORT_C TInt NKern::InterruptDisable(TInt aId)
 | 
|  |    781 | 	{
 | 
|  |    782 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKID: ID=%08x", aId));
 | 
|  |    783 | 	TRACE_IRQ4(12, aId);
 | 
|  |    784 | 	NIrq* pI;
 | 
|  |    785 | 	NIrqHandler* pH;
 | 
|  |    786 | 	TInt r = NIrq::FromHandle(aId, pI, pH);
 | 
|  |    787 | 	if (r==KErrNone)
 | 
|  |    788 | 		r = pH ? pH->Disable(FALSE, aId) : pI->DisableRaw(FALSE);
 | 
|  |    789 | 	TRACE_IRQ4(13, r);
 | 
|  |    790 | 	return r;
 | 
|  |    791 | 	}
 | 
|  |    792 | 
 | 
|  |    793 | EXPORT_C TInt NKern::InterruptClear(TInt aId)
 | 
|  |    794 | 	{
 | 
|  |    795 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIC: ID=%08x", aId));
 | 
|  |    796 | 	return KErrNotSupported;
 | 
|  |    797 | 	}
 | 
|  |    798 | 
 | 
|  |    799 | EXPORT_C TInt NKern::InterruptSetPriority(TInt aId, TInt aPri)
 | 
|  |    800 | 	{
 | 
|  |    801 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIS: ID=%08x PRI=%08x", aId, aPri));
 | 
|  |    802 | 	return KErrNotSupported;
 | 
|  |    803 | 	}
 | 
|  |    804 | 
 | 
|  |    805 | EXPORT_C TInt NKern::InterruptSetCpuMask(TInt aId, TUint32 aMask)
 | 
|  |    806 | 	{
 | 
|  |    807 | 	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIM: ID=%08x M=%08x", aId, aMask));
 | 
|  |    808 | 	return KErrNotSupported;
 | 
|  |    809 | 	}
 | 
|  |    810 | 
 | 
|  |    811 | EXPORT_C void NKern::Interrupt(TInt aId)
 | 
|  |    812 | 	{
 | 
|  |    813 | 	__NK_ASSERT_ALWAYS(TUint(aId) < TUint(NK_MAX_IRQS));
 | 
|  |    814 | 	NIrq* pI = &Irq[aId];
 | 
|  |    815 | 	pI->HwIsr();
 | 
|  |    816 | 	}
 | 
|  |    817 | 
 |