Thursday, July 06, 2006

irq thread

arch/blackfin/kernel/irqchip.c

do_irqd() {

while (!kthread_should_stop()) {
if (!down_interruptible(&desc->thrsem)) {
local_irq_disable();
desc->thrhandler(irq,&__ipipe_tick_regs[cpu]);
local_irq_enable();
}
}

}

/----------------------
Note: the semaphore here acts as a counter. So multi interrupt will not lost.
-----------------------/

ENTRY(return_from_int)

ENTRY(return_from_int)
/* If someone else already raised IRQ 15, do nothing. */
csync;
p2.l = lo(ILAT);
p2.h = hi(ILAT);
r0 = [p2];
cc = bittst (r0, EVT_IVG15_P);
if cc jump 2f;

/------------------
Check for IRQ 15
-------------------/

/* if not return to user mode, get out */
p2.l = lo(IPEND);
p2.h = hi(IPEND);
r0 = [p2];
r1 = 0x17(Z);
r2 = ~r1;
r2.h = 0;
/--------
r2.h = 0, r2.l =~(0x17)
---------/
r0 = r2 & r0;
r1 = 1;
r1 = r0 - r1;
r2 = r0 & r1;
/------------
Don't care about EMU, RST, NMI, Global Interrupt Disable
-------------/
cc = r2 == 0;
if !cc jump 2f;
/--------------------
If there is Nested interrupt, jump 2f.
---------------------/

/* Lower the interrupt level to 15. */
p0.l = lo(EVT15);
p0.h = hi(EVT15);
p1.l = schedule_and_signal_from_int;
p1.h = schedule_and_signal_from_int;
[p0] = p1;
csync;
r0 = 0x801f (z);
STI r0;
raise 15; /* raise evt15 to do signal or reschedule */
rti;
2:
rts;

adam's home

/*
* __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
* interrupt protection log is maintained here for each domain. Hw
* interrupts are masked on entry.
*/
void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
{
/--------------------
irq: starts from 7

This function can be called from:

ipipe_grab_irq() - HW
ipipe_trigger_irq() - SW
---------------------/

struct ipipe_domain *this_domain;
struct list_head *head, *pos;
int m_ack, s_ack, s = -1;
ipipe_declare_cpuid;

m_ack = (regs == NULL); /* Software-triggered IRQs do not need
* any ack. */

/--------------------

Software triggered IRQ has not regs
See:
ipipe-core.c: ipipe_trigger_irq(): __ipipe_handle_irq(irq, NULL);
---------------------/


ipipe_load_cpuid();

this_domain = ipipe_percpu_domain[cpuid];
/-------------------------

struct ipipe_domain *ipipe_percpu_domain[IPIPE_NR_CPUS] =
{[0 ... IPIPE_NR_CPUS - 1] = &ipipe_root };

What is "ipipe_percpu_domain[cpuid]" stands for?

--------------------------/

s_ack = m_ack;

if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
head = &this_domain->p_link;
else
head = __ipipe_pipeline.next;

/----------------------------
__ipipe_pipeline: global list of domains, in priority order
head starts from this_domain or the next??
-----------------------------/

/* Ack the interrupt. */

pos = head;

while (pos != &__ipipe_pipeline) {
struct ipipe_domain *next_domain =
list_entry(pos, struct ipipe_domain, p_link);

/*
* For each domain handling the incoming IRQ, mark it as
* pending in its log.
*/
if (test_bit(IPIPE_HANDLE_FLAG,
&next_domain->irqs[irq].control)) {
/*
* Domains that handle this IRQ are polled for
* acknowledging it by decreasing priority order. The
* interrupt must be made pending _first_ in the
* domain's status flags before the PIC is unlocked.
*/

next_domain->cpudata[cpuid].irq_counters[irq].total_hits++;
next_domain->cpudata[cpuid].irq_counters[irq].pending_hits++;
__ipipe_set_irq_bit(next_domain, cpuid, irq);
/--------------------

struct ipcpudata {
unsigned long status;
unsigned long irq_pending_hi;
unsigned long irq_pending_lo[IPIPE_IRQ_IWORDS];
struct ipirqcnt {
unsigned long pending_hits;
unsigned long total_hits;
} irq_counters[IPIPE_NR_IRQS];
} ____cacheline_aligned_in_smp cpudata[IPIPE_NR_CPUS];

#define __ipipe_set_irq_bit(ipd,cpuid,irq) do { if (!test_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control)) { __set_bit(irq & IPIPE_IRQ_IMASK,&(ipd)->cpudata[cpuid].irq_pending_lo[irq >> IPIPE_IRQ_ISHIFT]); __set_bit(irq >> IPIPE_IRQ_ISHIFT,&(ipd)->cpudata[cpuid].irq_pending_hi); } } while(0)


(The a bit of irq_pending_hi is set)
irq = a * 32 + irq_pending_lo[a]

--------------------/



ipipe_mark_irq_receipt(next_domain, irq, cpuid);

/---------
meaningful when enable CONFIG_IPIPE_STATS
-------------/


/*
* Always get the first master acknowledge available.
* Once we've got it, allow slave acknowledge
* handlers to run (until one of them stops us).
*/
if (next_domain->irqs[irq].acknowledge != NULL) {
if (!m_ack)
m_ack = next_domain->irqs[irq].acknowledge(irq);
else if (test_bit
(IPIPE_SHARED_FLAG,
&next_domain->irqs[irq].control) && !s_ack)
s_ack = next_domain->irqs[irq].acknowledge(irq);
}
}


/----------------------

Any usage for m_ack and s_ack?

------------------------/

/*
* If the domain does not want the IRQ to be passed down the
* interrupt pipe, exit the loop now.
*/

if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
break;

pos = next_domain->p_link.next;
}

/*
* Now walk the pipeline, yielding control to the highest
* priority domain that has pending interrupt(s) or
* immediately to the current domain if the interrupt has been
* marked as 'sticky'. This search does not go beyond the
* current domain in the pipeline. We also enforce the
* additional root stage lock (blackfin-specific). */

if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
s = __test_and_set_bit(IPIPE_STALL_FLAG,
&ipipe_root_domain->cpudata[cpuid].status);

__ipipe_walk_pipeline(head, cpuid);

if (!s)
__clear_bit(IPIPE_STALL_FLAG,
&ipipe_root_domain->cpudata[cpuid].status);
}


ipipe/core.c:

/* __ipipe_walk_pipeline(): Plays interrupts pending in the log. Must
be called with local hw interrupts disabled. */

void fastcall __ipipe_walk_pipeline(struct list_head *pos, int cpuid)
{
struct ipipe_domain *this_domain = ipipe_percpu_domain[cpuid];

while (pos != &__ipipe_pipeline) {
struct ipipe_domain *next_domain =
list_entry(pos, struct ipipe_domain, p_link);

if (test_bit
(IPIPE_STALL_FLAG, &next_domain->cpudata[cpuid].status))
break; /* Stalled stage -- do not go further. */

if (next_domain->cpudata[cpuid].irq_pending_hi != 0) {

if (next_domain == this_domain)
__ipipe_sync_stage(IPIPE_IRQMASK_ANY);
else {
__ipipe_switch_to(this_domain, next_domain,
cpuid);

ipipe_load_cpuid(); /* Processor might have changed. */

if (this_domain->cpudata[cpuid].
irq_pending_hi != 0
&& !test_bit(IPIPE_STALL_FLAG,
&this_domain->cpudata[cpuid].status))
__ipipe_sync_stage(IPIPE_IRQMASK_ANY);
}

break;
} else if (next_domain == this_domain)
break;

pos = next_domain->p_link.next;
}
}



kernel/ipipe-core.c

/*
* __ipipe_sync_stage() -- Flush the pending IRQs for the current
* domain (and processor). This routine flushes the interrupt log
* (see "Optimistic interrupt protection" from D. Stodolsky et al. for
* more on the deferred interrupt scheme). Every interrupt that
* occurred while the pipeline was stalled gets played. WARNING:
* callers on SMP boxen should always check for CPU migration on
* return of this routine. One can control the kind of interrupts
* which are going to be sync'ed using the syncmask
* parameter. IPIPE_IRQMASK_ANY plays them all, IPIPE_IRQMASK_VIRT
* plays virtual interrupts only. This routine must be called with hw
* interrupts off.
*/
void __ipipe_sync_stage(unsigned long syncmask)
{
unsigned long mask, submask;
struct ipcpudata *cpudata;
struct ipipe_domain *ipd;
ipipe_declare_cpuid;
int level, rank;
unsigned irq;

ipipe_load_cpuid();
ipd = ipipe_percpu_domain[cpuid];
cpudata = &ipd->cpudata[cpuid];

if (__test_and_set_bit(IPIPE_SYNC_FLAG, &cpudata->status))
return;
/------------
Is "sync_stage" already been called? If so, return.
-------------/

/*
* The policy here is to keep the dispatching code interrupt-free
* by stalling the current stage. If the upper domain handler
* (which we call) wants to re-enable interrupts while in a safe
* portion of the code (e.g. SA_INTERRUPT flag unset for Linux's
* sigaction()), it will have to unstall (then stall again before
* returning to us!) the stage when it sees fit.
*/
while ((mask = (cpudata->irq_pending_hi & syncmask)) != 0) {
level = ffs(mask) - 1;
__clear_bit(level, &cpudata->irq_pending_hi);

while ((submask = cpudata->irq_pending_lo[level]) != 0) {

if (ipd == ipipe_root_domain &&
test_bit(IPIPE_ROOTLOCK_FLAG, &ipd->flags)) {
__set_bit(level, &cpudata->irq_pending_hi);
goto done;
}

rank = ffs(submask) - 1;
irq = (level << IPIPE_IRQ_ISHIFT) + rank;

if (test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control)) {
__clear_bit(rank,
&cpudata->irq_pending_lo[level]);
continue;
}

if (--cpudata->irq_counters[irq].pending_hits == 0) {
__clear_bit(rank,
&cpudata->irq_pending_lo[level]);
ipipe_mark_irq_delivery(ipd,irq,cpuid);
}

__set_bit(IPIPE_STALL_FLAG, &cpudata->status);
ipipe_mark_domain_stall(ipd, cpuid);

if (ipd == ipipe_root_domain) {
/*
* Note: the I-pipe implements a
* threaded interrupt model on this
* arch for Linux external IRQs. The
* interrupt handler we call here only
* wakes up the associated IRQ thread.
*/
if (ipipe_virtual_irq_p(irq)) {

/----------------
Is this a virtual irq?

-----------------/
/* No irqtail here; virtual interrupts have
no effect on IPEND so there is no need for
processing deferral. */
local_irq_enable_hw();
ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);
local_irq_disable_hw();
} else
/* No need to run the irqtail here either; we are not
preemptable by hw IRQs, so non-Linux IRQs cannot
stack over the short thread wakeup code. Which in turn
means that no irqtail condition could be pending
for domains above Linux in the pipeline. */
((void (*)(unsigned, struct pt_regs *))
ipd->irqs[irq].handler) (irq, __ipipe_tick_regs + cpuid);
} else {
__clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);
ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);
/* Attempt to exit the outer interrupt
* level before starting the deferred
* IRQ processing. */
__ipipe_run_irqtail();
/-----------------
see bellow
-----------------/


__set_bit(IPIPE_SYNC_FLAG, &cpudata->status);
}
#ifdef CONFIG_SMP
{
int _cpuid = ipipe_processor_id();

if (_cpuid != cpuid) { /* Handle CPU migration. */
/* We expect any domain to clear the SYNC bit each
time it switches in a new task, so that preemptions
and/or CPU migrations (in the SMP case) over the
ISR do not lock out the log syncer for some
indefinite amount of time. In the Linux case,
schedule() handles this (see kernel/sched.c). For
this reason, we don't bother clearing it here for
the source CPU in the migration handling case,
since it must have scheduled another task in by
now. */
cpuid = _cpuid;
cpudata = &ipd->cpudata[cpuid];
__set_bit(IPIPE_SYNC_FLAG,&cpudata->status);
}
}
#endif /* CONFIG_SMP */
__clear_bit(IPIPE_STALL_FLAG, &cpudata->status);
ipipe_mark_domain_unstall(ipd, cpuid);
}
}

done:
__clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);
}


static inline void __ipipe_run_irqtail(void)
{
asmlinkage void __ipipe_call_irqtail(void);
unsigned long pending;

__builtin_bfin_csync();

pending = *pIPEND;
if (pending & 0x8000) {
pending &= ~0x8010;
if (pending && (pending & (pending - 1)) == 0)
__ipipe_call_irqtail();
/--------------

See bellow:

---------------/
}
}


ENTRY(___ipipe_call_irqtail)

r0.l = 1f;
r0.h = 1f;
reti = r0;
rti
/-------

return from interrupt and continue here
---------/
1:
[--sp] = rets;
[--sp] = ( r7:4, p5:3 );
p0.l = ___ipipe_irq_tail_hook;
p0.h = ___ipipe_irq_tail_hook;
p0 = [p0] ;
sp += -12 ;
call (p0) ;
sp += 12;
( r7:4, p5:3 ) = [sp++];
rets = [sp++];

/------------
Call ipipe_irq_tail_hook
------------/

[--sp] = reti;
reti = [sp++]; /* IRQs are off. */

/-----------
Store reti, and disalbe interrupt nesting
------------/

r0.h = 3f;
r0.l = 3f;
p0.l = lo(EVT14);
p0.h = hi(EVT14);
[p0] = r0;
csync;
r0 = 0x401f;
sti r0;
/----------------------------------
enable interrupt: only enalbe IVG14
----------------------------------/

raise 14;
[--sp] = reti; /* IRQs on. */
2:
jump 2b /* Likely paranoid. */
3:
sp += 4; /* Discard saved RETI */
r0.h = _evt14_softirq;
r0.l = _evt14_softirq;
p0.l = lo(EVT14);
p0.h = hi(EVT14);
[p0] = r0;
csync
p0.l = _irq_flags;
p0.h = _irq_flags;
r0 = [p0];
sti r0
rts;

Blog Archive