/*
* __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
* interrupt protection log is maintained here for each domain. Hw
* interrupts are masked on entry.
*/
void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
{
/--------------------
irq: starts from 6
This function can be called from:
ipipe_grab_irq() - HW
ipipe_trigger_irq() - SW, SW triggered IRQ regs == NULL, so do not need
to acknowledge the HW.
---------------------/
struct ipipe_domain *this_domain;
struct list_head *head, *pos;
int m_ack, s_ack, s = -1;
ipipe_declare_cpuid;
m_ack = (regs == NULL); /* Software-triggered IRQs do not need
* any ack. */
/--------------------
Software triggered IRQ has not regs
See:
ipipe-core.c: ipipe_trigger_irq(): __ipipe_handle_irq(irq, NULL);
---------------------/
ipipe_load_cpuid();
this_domain = ipipe_percpu_domain[cpuid];
/-------------------------
struct ipipe_domain *ipipe_percpu_domain[IPIPE_NR_CPUS] =
{[0 ... IPIPE_NR_CPUS - 1] = &ipipe_root };
What does "ipipe_percpu_domain[cpuid]" stands for?
Current domain on "this" CPU (meaningful for SMP system). cpuid always =
0 on Blackfin.
--------------------------/
s_ack = m_ack;
if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
head = &this_domain->p_link;
else
head = __ipipe_pipeline.next;
/----------------------------
__ipipe_pipeline: global list of domains, in priority order
head starts from this_domain(current domain in which interrupt happens)
or starting from the highest priority domain.
-----------------------------/
/* Ack the interrupt. */
pos = head;
while (pos != &__ipipe_pipeline) {
struct ipipe_domain *next_domain =
list_entry(pos, struct ipipe_domain, p_link);
/--------------------------------
Starting from "head"
---------------------------------/
/*
* For each domain handling the incoming IRQ, mark it as
* pending in its log.
*/
if (test_bit(IPIPE_HANDLE_FLAG,
&next_domain->irqs[irq].control)) {
/*
* Domains that handle this IRQ are polled for
* acknowledging it by decreasing priority order. The
* interrupt must be made pending _first_ in the
* domain's status flags before the PIC is unlocked.
*/
next_domain->cpudata[cpuid].irq_counters[irq].total_hits++;
next_domain->cpudata[cpuid].irq_counters[irq].pending_hits++;
__ipipe_set_irq_bit(next_domain, cpuid, irq);
/--------------------
struct ipcpudata {
unsigned long status;
unsigned long irq_pending_hi;
unsigned long irq_pending_lo[IPIPE_IRQ_IWORDS];
struct ipirqcnt {
unsigned long pending_hits;
unsigned long total_hits;
} irq_counters[IPIPE_NR_IRQS];
} ____cacheline_aligned_in_smp cpudata[IPIPE_NR_CPUS];
#define __ipipe_set_irq_bit(ipd,cpuid,irq) do { if (!
test_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control)) { __set_bit(irq &
IPIPE_IRQ_IMASK,&(ipd)->cpudata[cpuid].irq_pending_lo[irq >>
IPIPE_IRQ_ISHIFT]); __set_bit(irq >>
IPIPE_IRQ_ISHIFT,&(ipd)->cpudata[cpuid].irq_pending_hi); } } while(0)
(The "a" bit of irq_pending_hi is set)
irq = a * 32 + irq_pending_lo[a]
--------------------/
ipipe_mark_irq_receipt(next_domain, irq, cpuid);
/---------
meaningful when enable CONFIG_IPIPE_STATS
-------------/
/*
* Always get the first master acknowledge available.
* Once we've got it, allow slave acknowledge
* handlers to run (until one of them stops us).
*/
if (next_domain->irqs[irq].acknowledge != NULL) {
if (!m_ack)
m_ack = next_domain->irqs[irq].acknowledge(irq);
else if (test_bit
(IPIPE_SHARED_FLAG,
&next_domain->irqs[irq].control) && !s_ack)
s_ack = next_domain->irqs[irq].acknowledge(irq);
}
}
/-----------------------------------------------------------------------------------
Any usage for m_ack and s_ack?
Call the HW ack routine here. Mainly to mask the "irq" while handling
it.
irqs[irq].acknowledege(irq) -> __ipipe_ack_irq() --> see bellow
ints-priority-sc.c
static struct irqchip bf533_core_irqchip = {
#ifdef CONFIG_IPIPE
.ack = bf533_core_mask_irq,
#else /* !CONFIG_IPIPE */
.ack = ack_noop,
No comments:
Post a Comment