Thursday, May 29, 2008

blackfin irq handler

 

/* Interrupt flows:
EVT table -->
INTERRUPT_ENTRY(N) -->
common_int_entry() -->
do_irq() -->
asm_do_IRQ() -->
generic_handle_irq() -->
irq_desc[irq]-> handler()
--> {
handle_simple_irq();
handle_level_irq();
...
} -> handle_IRQ_event() -->
action->handler() (request_irq() registers the real handler)

*/

/* ============Adam: init the EVT table ========================*/
mach-common/ints-priority.c:
-----------------------------
void __init init_exception_vectors(void)
{
SSYNC();

/* cannot program in software:
* evt0 - emulation (jtag)
* evt1 - reset
*/
bfin_write_EVT2(evt_nmi);
bfin_write_EVT3(trap);
bfin_write_EVT5(evt_ivhw);
bfin_write_EVT6(evt_timer);
bfin_write_EVT7(evt_evt7);
bfin_write_EVT8(evt_evt8);
bfin_write_EVT9(evt_evt9);
bfin_write_EVT10(evt_evt10);
bfin_write_EVT11(evt_evt11);
bfin_write_EVT12(evt_evt12);
bfin_write_EVT13(evt_evt13);
bfin_write_EVT14(evt14_softirq);
bfin_write_EVT15(evt_system_call);
CSYNC();
}

mach-common/interrupt.S
--------------------------------------------------------------------------------
/* Interrupt routine for evt2 (NMI).
* We don't actually use this, so just return.
* For inner circle type details, please see:
* http://docs.blackfin.uclinux.org/doku.php?id=linux:nmi
*/
ENTRY(_evt_nmi)
.weak _evt_nmi
rtn;
ENDPROC(_evt_nmi)

/* interrupt routine for core timer - 6 */
ENTRY(_evt_timer)
TIMER_INTERRUPT_ENTRY(EVT_IVTMR_P)

/* interrupt routine for evt7 - 7 */
ENTRY(_evt_evt7)
INTERRUPT_ENTRY(EVT_IVG7_P)
ENTRY(_evt_evt8)
INTERRUPT_ENTRY(EVT_IVG8_P)
ENTRY(_evt_evt9)
INTERRUPT_ENTRY(EVT_IVG9_P)
ENTRY(_evt_evt10)
INTERRUPT_ENTRY(EVT_IVG10_P)
ENTRY(_evt_evt11)
INTERRUPT_ENTRY(EVT_IVG11_P)
ENTRY(_evt_evt12)
INTERRUPT_ENTRY(EVT_IVG12_P)
ENTRY(_evt_evt13)
INTERRUPT_ENTRY(EVT_IVG13_P)


include/asm-blackfin/entry.h:
----------------------------------------------------------------------------
/*
* NOTE! The single-stepping code assumes that all interrupt handlers
* start by saving SYSCFG on the stack with their first instruction.
*/

/* This one is used for exceptions, emulation, and NMI. It doesn't push
RETI and doesn't do cli. */
#define SAVE_ALL_SYS save_context_no_interrupts
/* This is used for all normal interrupts. It saves a minimum of registers
to the stack, loads the IRQ number, and jumps to common code. */
#define INTERRUPT_ENTRY(N) \
[--sp] = SYSCFG; \
\
[--sp] = P0; /*orig_p0*/ \
[--sp] = R0; /*orig_r0*/ \
[--sp] = (R7:0,P5:0); \
R0 = (N); \
jump __common_int_entry;

/* For timer interrupts, we need to save IPEND, since the user_mode
macro accesses it to determine where to account time. */
#define TIMER_INTERRUPT_ENTRY(N) \
[--sp] = SYSCFG; \
\
[--sp] = P0; /*orig_p0*/ \
[--sp] = R0; /*orig_r0*/ \
[--sp] = (R7:0,P5:0); \
p0.l = lo(IPEND); \
p0.h = hi(IPEND); \
r1 = [p0]; \
R0 = (N); \
jump __common_int_entry;



mach-common/interrupt.S
--------------------------------------------------------------------------------
/* Common interrupt entry code. First we do CLI, then push
* RETI, to keep interrupts disabled, but to allow this state to be changed
* by local_bh_enable.
* R0 contains the interrupt number, while R1 may contain the value of IPEND,
* or garbage if IPEND won't be needed by the ISR. */
__common_int_entry:
[--sp] = fp;
[--sp] = usp;

[--sp] = i0;
[--sp] = i1;
[--sp] = i2;
[--sp] = i3;

[--sp] = m0;
[--sp] = m1;
[--sp] = m2;
[--sp] = m3;

[--sp] = l0;
[--sp] = l1;
[--sp] = l2;
[--sp] = l3;

[--sp] = b0;
[--sp] = b1;
[--sp] = b2;
[--sp] = b3;
[--sp] = a0.x;
[--sp] = a0.w;
[--sp] = a1.x;
[--sp] = a1.w;

[--sp] = LC0;
[--sp] = LC1;
[--sp] = LT0;
[--sp] = LT1;
[--sp] = LB0;
[--sp] = LB1;

[--sp] = ASTAT;

[--sp] = r0; /* Skip reserved */
[--sp] = RETS;
r2 = RETI;
[--sp] = r2;
[--sp] = RETX;
[--sp] = RETN;
[--sp] = RETE;
[--sp] = SEQSTAT;
[--sp] = r1; /* IPEND - R1 may or may not be set up before jumping here. */

/* Switch to other method of keeping interrupts disabled. */
#ifdef CONFIG_DEBUG_HWERR
r1 = 0x3f;
sti r1;
#else
cli r1;
#endif
[--sp] = RETI; /* orig_pc */
/* Clear all L registers. */
r1 = 0 (x);
l0 = r1;
l1 = r1;
l2 = r1;
l3 = r1;
#ifdef CONFIG_FRAME_POINTER
fp = 0;
#endif

#if ANOMALY_05000283 || ANOMALY_05000315
cc = r7 == r7;
p5.h = HI(CHIPID);
p5.l = LO(CHIPID);
if cc jump 1f;
r7.l = W[p5];
1:
#endif
r1 = sp;
SP += -12;
call _do_irq;
SP += 12;
call _return_from_int;
.Lcommon_restore_context:
RESTORE_CONTEXT
rti;

/* Adam: ====================== do_irq() =========================*/
mach-common/ints-priority.c
----------------------------------------------------------------------
#ifdef CONFIG_DO_IRQ_L1
__attribute__((l1_text))
#endif
void do_irq(int vec, struct pt_regs *fp)
{
if (vec == EVT_IVTMR_P) {
vec = IRQ_CORETMR;
} else {
struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
unsigned long sic_status[3];

sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
#ifdef CONFIG_BF54x
sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
#endif
for (;; ivg++) {
if (ivg >= ivg_stop) {
atomic_inc(&num_spurious);
return;
}
if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
break;
}
#else
unsigned long sic_status;

sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();

for (;; ivg++) {
if (ivg >= ivg_stop) {
atomic_inc(&num_spurious);
return;
} else if (sic_status & ivg->isrflag)
break;
}
#endif
vec = ivg->irqno;
}
asm_do_IRQ(vec, fp);

#ifdef CONFIG_KGDB
kgdb_process_breakpoint();
#endif
}

asm/kernel/irqchip.c
-----------------------------------------------------------------------------------
#ifdef CONFIG_DO_IRQ_L1
__attribute__((l1_text))
#endif
asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs;
struct irq_desc *desc = irq_desc + irq;
unsigned short pending, other_ints;

old_regs = set_irq_regs(regs);

/*
* Some hardware gives randomly wrong interrupts. Rather
* than crashing, do something sensible.
*/
if (irq >= NR_IRQS)
desc = &bad_irq_desc;

irq_enter();

generic_handle_irq(irq);

/* If we're the only interrupt running (ignoring IRQ15 which is for
syscalls), lower our priority to IRQ14 so that softirqs run at
that level. If there's another, lower-level interrupt, irq_exit
will defer softirqs to that. */
CSYNC();
pending = bfin_read_IPEND() & ~0x8000;
other_ints = pending & (pending - 1);
if (other_ints == 0)
lower_to_irq14();
irq_exit();

set_irq_regs(old_regs);
}

include/linux/irq.h:
----------------------------------------------------------------------------------------
/*
* Architectures call this to let the generic IRQ layer
* handle an interrupt. If the descriptor is attached to an
* irqchip-style controller then we call the ->handle_irq() handler,
* and it calls __do_IRQ() if it's attached to an irqtype-style controller.
*/
static inline void generic_handle_irq(unsigned int irq)
{
struct irq_desc *desc = irq_desc + irq;

#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
desc->handle_irq(irq, desc);
#else
if (likely(desc->handle_irq))
desc->handle_irq(irq, desc);
else
__do_IRQ(irq);
#endif
}

/* Adam: =============== Global struct irq_desc irq_desc[NR_IRQS], struct irqchip ======== */

kernel/irq/handle.c
-----------------------------------------------------------------------
/*
* Linux has a controller-independent interrupt architecture.
* Every controller has a 'controller-template', that is used
* by the main code to do the right thing. Each driver-visible
* interrupt source is transparently wired to the appropriate
* controller. Thus drivers need not be aware of the
* interrupt-controller.
*
* The code is designed to be easily extended with new/different
* interrupt controllers, without having to do assembly magic or
* having to touch the generic code.
*
* Controller mappings for all interrupt sources:
*/
struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
[0 ... NR_IRQS-1] = {
.status = IRQ_DISABLED,
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
#ifdef CONFIG_SMP
.affinity = CPU_MASK_ALL
#endif
}
};

/**
* struct irq_desc - interrupt descriptor
*
* @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
* @chip: low level interrupt hardware access
* @msi_desc: MSI descriptor
* @handler_data: per-IRQ data for the irq_chip methods
* @chip_data: platform-specific per-chip private data for the chip
* methods, to allow shared chip implementations
* @action: the irq action chain
* @status: status information
* @depth: disable-depth, for nested irq_disable() calls
* @wake_depth: enable depth, for multiple set_irq_wake() callers
* @irq_count: stats field to detect stalled irqs
* @irqs_unhandled: stats field for spurious unhandled interrupts
* @last_unhandled: aging timer for unhandled count
* @lock: locking for SMP
* @affinity: IRQ affinity on SMP
* @cpu: cpu index useful for balancing
* @pending_mask: pending rebalanced interrupts
* @dir: /proc/irq/ procfs entry
* @affinity_entry: /proc/irq/smp_affinity procfs entry on SMP
* @name: flow handler name for /proc/interrupts output
*/
struct irq_desc {
irq_flow_handler_t handle_irq;
struct irq_chip *chip;
struct msi_desc *msi_desc;
void *handler_data;
void *chip_data;
struct irqaction *action; /* IRQ action list */
unsigned int status; /* IRQ status */

unsigned int depth; /* nested irq disables */
unsigned int wake_depth; /* nested wake enables */
unsigned int irq_count; /* For detecting broken IRQs */
unsigned int irqs_unhandled;
unsigned long last_unhandled; /* Aging timer for unhandled count */
spinlock_t lock;
#ifdef CONFIG_SMP
cpumask_t affinity;
unsigned int cpu;
#endif
#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
cpumask_t pending_mask;
#endif
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *dir;
#endif
const char *name;
} ____cacheline_internodealigned_in_smp;


arch/blackfin/mach-common/ints-priority.c:
------------------------------------------------------------
static struct irq_chip bfin_core_irqchip = {
.ack = bfin_ack_noop,
.mask = bfin_core_mask_irq,
.unmask = bfin_core_unmask_irq,
};

static struct irq_chip bfin_internal_irqchip = {
.ack = bfin_ack_noop,
.mask = bfin_internal_mask_irq,
.unmask = bfin_internal_unmask_irq,
.mask_ack = bfin_internal_mask_irq,
.disable = bfin_internal_mask_irq,
.enable = bfin_internal_unmask_irq,
#ifdef CONFIG_PM
.set_wake = bfin_internal_set_wake,
#endif
};

static struct irq_chip bfin_generic_error_irqchip = {
.ack = bfin_ack_noop,
.mask_ack = bfin_generic_error_mask_irq,
.mask = bfin_generic_error_mask_irq,
.unmask = bfin_generic_error_unmask_irq,
};


static struct irq_chip bfin_gpio_irqchip = {
.ack = bfin_gpio_ack_irq,
.mask = bfin_gpio_mask_irq,
.mask_ack = bfin_gpio_mask_ack_irq,
.unmask = bfin_gpio_unmask_irq,
.set_type = bfin_gpio_irq_type,
.startup = bfin_gpio_irq_startup,
.shutdown = bfin_gpio_irq_shutdown,
#ifdef CONFIG_PM
.set_wake = bfin_gpio_set_wake,
#endif
};


/* Adam: =============== How everything get initialized ======== */

blackfin/kernel/irqchip.c:
-------------------------------------------------------
void __init init_IRQ(void)
{
struct irq_desc *desc;
int irq;

spin_lock_init(&irq_controller_lock);
for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
*desc = bad_irq_desc;
}

init_arch_irq();

#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
/* Now that evt_ivhw is set up, turn this on */
trace_buff_offset = 0;
bfin_write_TBUFCTL(BFIN_TRACE_ON);
printk(KERN_INFO "Hardware Trace expanded to %ik\n",
1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN);
#endif
}

blackfin/mach-common/ints-prioirty.c
-------------------------------------------------------------
/*
* This function should be called during kernel startup to initialize
* the BFin IRQ handling routines.
*/
int __init init_arch_irq(void)
{
int irq;
unsigned long ilat = 0;
/* Disable all the peripheral intrs - page 4-29 HW Ref manual */
#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
# ifdef CONFIG_BF54x
bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
# endif
#else
bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
#endif

local_irq_disable();

#ifdef CONFIG_BF54x
# ifdef CONFIG_PINTx_REASSIGN
pint[0]->assign = CONFIG_PINT0_ASSIGN;
pint[1]->assign = CONFIG_PINT1_ASSIGN;
pint[2]->assign = CONFIG_PINT2_ASSIGN;
pint[3]->assign = CONFIG_PINT3_ASSIGN;
# endif
/* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
init_pint_lut();
#endif

for (irq = 0; irq <= SYS_IRQS; irq++) {
if (irq <= IRQ_CORETMR)
set_irq_chip(irq, &bfin_core_irqchip);
else
set_irq_chip(irq, &bfin_internal_irqchip);

switch (irq) {
#if defined(CONFIG_BF53x)
case IRQ_PROG_INTA:
# if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
case IRQ_MAC_RX:
# endif
#elif defined(CONFIG_BF54x)
case IRQ_PINT0:
case IRQ_PINT1:
case IRQ_PINT2:
case IRQ_PINT3:
#elif defined(CONFIG_BF52x)
case IRQ_PORTF_INTA:
case IRQ_PORTG_INTA:
case IRQ_PORTH_INTA:
#elif defined(CONFIG_BF561)
case IRQ_PROG0_INTA:
case IRQ_PROG1_INTA:
case IRQ_PROG2_INTA:
#endif
set_irq_chained_handler(irq,
bfin_demux_gpio_irq);
break;
#ifdef BF537_GENERIC_ERROR_INT_DEMUX
case IRQ_GENERIC_ERROR:
set_irq_handler(irq, bfin_demux_error_irq);

break;
#endif
default:
set_irq_handler(irq, handle_simple_irq);
break;
}
}

#ifdef BF537_GENERIC_ERROR_INT_DEMUX
for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++)
set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip,
handle_level_irq);
#endif

/* if configured as edge, then will be changed to do_edge_IRQ */
for (irq = GPIO_IRQ_BASE; irq < NR_IRQS; irq++)
set_irq_chip_and_handler(irq, &bfin_gpio_irqchip,
handle_level_irq);


bfin_write_IMASK(0);
CSYNC();
ilat = bfin_read_ILAT();
CSYNC();
bfin_write_ILAT(ilat);
CSYNC();

printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
/* IMASK=xxx is equivalent to STI xx or irq_flags=xx,
* local_irq_enable()
*/
program_IAR();
/* Therefore it's better to setup IARs before interrupts enabled */
search_IAR();

/* Enable interrupts IVG7-15 */
irq_flags = irq_flags | IMASK_IVG15 |
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;

#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
bfin_write_SIC_IWR0(IWR_ENABLE_ALL);
bfin_write_SIC_IWR1(IWR_ENABLE_ALL);
# ifdef CONFIG_BF54x
bfin_write_SIC_IWR2(IWR_ENABLE_ALL);
# endif
#else
bfin_write_SIC_IWR(IWR_ENABLE_ALL);
#endif



return 0;
}

kernel/irq/chip.c:
------------------------------------------------------
/**
* handle_simple_irq - Simple and software-decoded IRQs.
* @irq: the interrupt number
* @desc: the interrupt description structure for this irq
*
* Simple interrupts are either sent from a demultiplexing interrupt
* handler or come from hardware, where no interrupt hardware control
* is necessary.
*
* Note: The caller is expected to handle the ack, clear, mask and
* unmask issues if necessary.
*/
void fastcall
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
{
struct irqaction *action;
irqreturn_t action_ret;
const unsigned int cpu = smp_processor_id();

spin_lock(&desc->lock);

if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock;
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
kstat_cpu(cpu).irqs[irq]++;

action = desc->action;
if (unlikely(!action || (desc->status & IRQ_DISABLED)))
goto out_unlock;

desc->status |= IRQ_INPROGRESS;
spin_unlock(&desc->lock);

action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);

spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out_unlock:
spin_unlock(&desc->lock);
}

/**
* handle_level_irq - Level type irq handler
* @irq: the interrupt number
* @desc: the interrupt description structure for this irq
*
* Level type interrupts are active as long as the hardware line has
* the active level. This may require to mask the interrupt and unmask
* it after the associated handler has acknowledged the device, so the
* interrupt line is back to inactive.
*/
void fastcall
handle_level_irq(unsigned int irq, struct irq_desc *desc)
{
unsigned int cpu = smp_processor_id();
struct irqaction *action;
irqreturn_t action_ret;

spin_lock(&desc->lock);
mask_ack_irq(desc, irq);

if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock;
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
kstat_cpu(cpu).irqs[irq]++;

/*
* If its disabled or no action available
* keep it masked and get out of here
*/
action = desc->action;
if (unlikely(!action || (desc->status & IRQ_DISABLED)))
goto out_unlock;

desc->status |= IRQ_INPROGRESS;
spin_unlock(&desc->lock);

action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);

spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
desc->chip->unmask(irq);
out_unlock:
spin_unlock(&desc->lock);
}

kernel/irq/handle.c:
------------------------------------------------------
/**
* handle_IRQ_event - irq action chain handler
* @irq: the interrupt number
* @action: the interrupt action chain for this irq
*
* Handles the action chain of an irq event
*/
irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
{
irqreturn_t ret, retval = IRQ_NONE;
unsigned int status = 0;

handle_dynamic_tick(action);

if (!(action->flags & IRQF_DISABLED))
local_irq_enable_in_hardirq();

do {
ret = action->handler(irq, action->dev_id);
if (ret == IRQ_HANDLED)
status |= action->flags;
retval |= ret;
action = action->next;
} while (action);

if (status & IRQF_SAMPLE_RANDOM)
add_interrupt_randomness(irq);
local_irq_disable();

return retval;
}

 

/* Interrupt flows:
EVT table -->
INTERRUPT_ENTRY(N) -->
common_int_entry() -->
do_irq() -->
asm_do_IRQ() -->
generic_handle_irq() -->
irq_desc[irq]-> handler()
--> {
handle_simple_irq();
handle_level_irq();
...
} -> handle_IRQ_event() -->
action->handler() (request_irq() registers the real handler)

*/

/* ============Adam: init the EVT table ========================*/
mach-common/ints-priority.c:
-----------------------------
void __init init_exception_vectors(void)
{
SSYNC();

/* cannot program in software:
* evt0 - emulation (jtag)
* evt1 - reset
*/
bfin_write_EVT2(evt_nmi);
bfin_write_EVT3(trap);
bfin_write_EVT5(evt_ivhw);
bfin_write_EVT6(evt_timer);
bfin_write_EVT7(evt_evt7);
bfin_write_EVT8(evt_evt8);
bfin_write_EVT9(evt_evt9);
bfin_write_EVT10(evt_evt10);
bfin_write_EVT11(evt_evt11);
bfin_write_EVT12(evt_evt12);
bfin_write_EVT13(evt_evt13);
bfin_write_EVT14(evt14_softirq);
bfin_write_EVT15(evt_system_call);
CSYNC();
}

mach-common/interrupt.S
--------------------------------------------------------------------------------
/* Interrupt routine for evt2 (NMI).
* We don't actually use this, so just return.
* For inner circle type details, please see:
* http://docs.blackfin.uclinux.org/doku.php?id=linux:nmi
*/
ENTRY(_evt_nmi)
.weak _evt_nmi
rtn;
ENDPROC(_evt_nmi)

/* interrupt routine for core timer - 6 */
ENTRY(_evt_timer)
TIMER_INTERRUPT_ENTRY(EVT_IVTMR_P)

/* interrupt routine for evt7 - 7 */
ENTRY(_evt_evt7)
INTERRUPT_ENTRY(EVT_IVG7_P)
ENTRY(_evt_evt8)
INTERRUPT_ENTRY(EVT_IVG8_P)
ENTRY(_evt_evt9)
INTERRUPT_ENTRY(EVT_IVG9_P)
ENTRY(_evt_evt10)
INTERRUPT_ENTRY(EVT_IVG10_P)
ENTRY(_evt_evt11)
INTERRUPT_ENTRY(EVT_IVG11_P)
ENTRY(_evt_evt12)
INTERRUPT_ENTRY(EVT_IVG12_P)
ENTRY(_evt_evt13)
INTERRUPT_ENTRY(EVT_IVG13_P)


include/asm-blackfin/entry.h:
----------------------------------------------------------------------------
/*
* NOTE! The single-stepping code assumes that all interrupt handlers
* start by saving SYSCFG on the stack with their first instruction.
*/

/* This one is used for exceptions, emulation, and NMI. It doesn't push
RETI and doesn't do cli. */
#define SAVE_ALL_SYS save_context_no_interrupts
/* This is used for all normal interrupts. It saves a minimum of registers
to the stack, loads the IRQ number, and jumps to common code. */
#define INTERRUPT_ENTRY(N) \
[--sp] = SYSCFG; \
\
[--sp] = P0; /*orig_p0*/ \
[--sp] = R0; /*orig_r0*/ \
[--sp] = (R7:0,P5:0); \
R0 = (N); \
jump __common_int_entry;

/* For timer interrupts, we need to save IPEND, since the user_mode
macro accesses it to determine where to account time. */
#define TIMER_INTERRUPT_ENTRY(N) \
[--sp] = SYSCFG; \
\
[--sp] = P0; /*orig_p0*/ \
[--sp] = R0; /*orig_r0*/ \
[--sp] = (R7:0,P5:0); \
p0.l = lo(IPEND); \
p0.h = hi(IPEND); \
r1 = [p0]; \
R0 = (N); \
jump __common_int_entry;



mach-common/interrupt.S
--------------------------------------------------------------------------------
/* Common interrupt entry code. First we do CLI, then push
* RETI, to keep interrupts disabled, but to allow this state to be changed
* by local_bh_enable.
* R0 contains the interrupt number, while R1 may contain the value of IPEND,
* or garbage if IPEND won't be needed by the ISR. */
__common_int_entry:
[--sp] = fp;
[--sp] = usp;

[--sp] = i0;
[--sp] = i1;
[--sp] = i2;
[--sp] = i3;

[--sp] = m0;
[--sp] = m1;
[--sp] = m2;
[--sp] = m3;

[--sp] = l0;
[--sp] = l1;
[--sp] = l2;
[--sp] = l3;

[--sp] = b0;
[--sp] = b1;
[--sp] = b2;
[--sp] = b3;
[--sp] = a0.x;
[--sp] = a0.w;
[--sp] = a1.x;
[--sp] = a1.w;

[--sp] = LC0;
[--sp] = LC1;
[--sp] = LT0;
[--sp] = LT1;
[--sp] = LB0;
[--sp] = LB1;

[--sp] = ASTAT;

[--sp] = r0; /* Skip reserved */
[--sp] = RETS;
r2 = RETI;
[--sp] = r2;
[--sp] = RETX;
[--sp] = RETN;
[--sp] = RETE;
[--sp] = SEQSTAT;
[--sp] = r1; /* IPEND - R1 may or may not be set up before jumping here. */

/* Switch to other method of keeping interrupts disabled. */
#ifdef CONFIG_DEBUG_HWERR
r1 = 0x3f;
sti r1;
#else
cli r1;
#endif
[--sp] = RETI; /* orig_pc */
/* Clear all L registers. */
r1 = 0 (x);
l0 = r1;
l1 = r1;
l2 = r1;
l3 = r1;
#ifdef CONFIG_FRAME_POINTER
fp = 0;
#endif

#if ANOMALY_05000283 || ANOMALY_05000315
cc = r7 == r7;
p5.h = HI(CHIPID);
p5.l = LO(CHIPID);
if cc jump 1f;
r7.l = W[p5];
1:
#endif
r1 = sp;
SP += -12;
call _do_irq;
SP += 12;
call _return_from_int;
.Lcommon_restore_context:
RESTORE_CONTEXT
rti;

/* Adam: ====================== do_irq() =========================*/
mach-common/ints-priority.c
----------------------------------------------------------------------
#ifdef CONFIG_DO_IRQ_L1
__attribute__((l1_text))
#endif
void do_irq(int vec, struct pt_regs *fp)
{
if (vec == EVT_IVTMR_P) {
vec = IRQ_CORETMR;
} else {
struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
unsigned long sic_status[3];

sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
#ifdef CONFIG_BF54x
sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
#endif
for (;; ivg++) {
if (ivg >= ivg_stop) {
atomic_inc(&num_spurious);
return;
}
if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
break;
}
#else
unsigned long sic_status;

sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();

for (;; ivg++) {
if (ivg >= ivg_stop) {
atomic_inc(&num_spurious);
return;
} else if (sic_status & ivg->isrflag)
break;
}
#endif
vec = ivg->irqno;
}
asm_do_IRQ(vec, fp);

#ifdef CONFIG_KGDB
kgdb_process_breakpoint();
#endif
}

asm/kernel/irqchip.c
-----------------------------------------------------------------------------------
#ifdef CONFIG_DO_IRQ_L1
__attribute__((l1_text))
#endif
asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs;
struct irq_desc *desc = irq_desc + irq;
unsigned short pending, other_ints;

old_regs = set_irq_regs(regs);

/*
* Some hardware gives randomly wrong interrupts. Rather
* than crashing, do something sensible.
*/
if (irq >= NR_IRQS)
desc = &bad_irq_desc;

irq_enter();

generic_handle_irq(irq);

/* If we're the only interrupt running (ignoring IRQ15 which is for
syscalls), lower our priority to IRQ14 so that softirqs run at
that level. If there's another, lower-level interrupt, irq_exit
will defer softirqs to that. */
CSYNC();
pending = bfin_read_IPEND() & ~0x8000;
other_ints = pending & (pending - 1);
if (other_ints == 0)
lower_to_irq14();
irq_exit();

set_irq_regs(old_regs);
}

include/linux/irq.h:
----------------------------------------------------------------------------------------
/*
* Architectures call this to let the generic IRQ layer
* handle an interrupt. If the descriptor is attached to an
* irqchip-style controller then we call the ->handle_irq() handler,
* and it calls __do_IRQ() if it's attached to an irqtype-style controller.
*/
static inline void generic_handle_irq(unsigned int irq)
{
struct irq_desc *desc = irq_desc + irq;

#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
desc->handle_irq(irq, desc);
#else
if (likely(desc->handle_irq))
desc->handle_irq(irq, desc);
else
__do_IRQ(irq);
#endif
}

/* Adam: =============== Global struct irq_desc irq_desc[NR_IRQS], struct irqchip ======== */

kernel/irq/handle.c
-----------------------------------------------------------------------
/*
* Linux has a controller-independent interrupt architecture.
* Every controller has a 'controller-template', that is used
* by the main code to do the right thing. Each driver-visible
* interrupt source is transparently wired to the appropriate
* controller. Thus drivers need not be aware of the
* interrupt-controller.
*
* The code is designed to be easily extended with new/different
* interrupt controllers, without having to do assembly magic or
* having to touch the generic code.
*
* Controller mappings for all interrupt sources:
*/
struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
[0 ... NR_IRQS-1] = {
.status = IRQ_DISABLED,
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
#ifdef CONFIG_SMP
.affinity = CPU_MASK_ALL
#endif
}
};

/**
* struct irq_desc - interrupt descriptor
*
* @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
* @chip: low level interrupt hardware access
* @msi_desc: MSI descriptor
* @handler_data: per-IRQ data for the irq_chip methods
* @chip_data: platform-specific per-chip private data for the chip
* methods, to allow shared chip implementations
* @action: the irq action chain
* @status: status information
* @depth: disable-depth, for nested irq_disable() calls
* @wake_depth: enable depth, for multiple set_irq_wake() callers
* @irq_count: stats field to detect stalled irqs
* @irqs_unhandled: stats field for spurious unhandled interrupts
* @last_unhandled: aging timer for unhandled count
* @lock: locking for SMP
* @affinity: IRQ affinity on SMP
* @cpu: cpu index useful for balancing
* @pending_mask: pending rebalanced interrupts
* @dir: /proc/irq/ procfs entry
* @affinity_entry: /proc/irq/smp_affinity procfs entry on SMP
* @name: flow handler name for /proc/interrupts output
*/
struct irq_desc {
irq_flow_handler_t handle_irq;
struct irq_chip *chip;
struct msi_desc *msi_desc;
void *handler_data;
void *chip_data;
struct irqaction *action; /* IRQ action list */
unsigned int status; /* IRQ status */

unsigned int depth; /* nested irq disables */
unsigned int wake_depth; /* nested wake enables */
unsigned int irq_count; /* For detecting broken IRQs */
unsigned int irqs_unhandled;
unsigned long last_unhandled; /* Aging timer for unhandled count */
spinlock_t lock;
#ifdef CONFIG_SMP
cpumask_t affinity;
unsigned int cpu;
#endif
#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
cpumask_t pending_mask;
#endif
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *dir;
#endif
const char *name;
} ____cacheline_internodealigned_in_smp;


arch/blackfin/mach-common/ints-priority.c:
------------------------------------------------------------
static struct irq_chip bfin_core_irqchip = {
.ack = bfin_ack_noop,
.mask = bfin_core_mask_irq,
.unmask = bfin_core_unmask_irq,
};

static struct irq_chip bfin_internal_irqchip = {
.ack = bfin_ack_noop,
.mask = bfin_internal_mask_irq,
.unmask = bfin_internal_unmask_irq,
.mask_ack = bfin_internal_mask_irq,
.disable = bfin_internal_mask_irq,
.enable = bfin_internal_unmask_irq,
#ifdef CONFIG_PM
.set_wake = bfin_internal_set_wake,
#endif
};

static struct irq_chip bfin_generic_error_irqchip = {
.ack = bfin_ack_noop,
.mask_ack = bfin_generic_error_mask_irq,
.mask = bfin_generic_error_mask_irq,
.unmask = bfin_generic_error_unmask_irq,
};


static struct irq_chip bfin_gpio_irqchip = {
.ack = bfin_gpio_ack_irq,
.mask = bfin_gpio_mask_irq,
.mask_ack = bfin_gpio_mask_ack_irq,
.unmask = bfin_gpio_unmask_irq,
.set_type = bfin_gpio_irq_type,
.startup = bfin_gpio_irq_startup,
.shutdown = bfin_gpio_irq_shutdown,
#ifdef CONFIG_PM
.set_wake = bfin_gpio_set_wake,
#endif
};


/* Adam: =============== How everything get initialized ======== */

blackfin/kernel/irqchip.c:
-------------------------------------------------------
void __init init_IRQ(void)
{
struct irq_desc *desc;
int irq;

spin_lock_init(&irq_controller_lock);
for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
*desc = bad_irq_desc;
}

init_arch_irq();

#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
/* Now that evt_ivhw is set up, turn this on */
trace_buff_offset = 0;
bfin_write_TBUFCTL(BFIN_TRACE_ON);
printk(KERN_INFO "Hardware Trace expanded to %ik\n",
1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN);
#endif
}

blackfin/mach-common/ints-prioirty.c
-------------------------------------------------------------
/*
* This function should be called during kernel startup to initialize
* the BFin IRQ handling routines.
*/
int __init init_arch_irq(void)
{
int irq;
unsigned long ilat = 0;
/* Disable all the peripheral intrs - page 4-29 HW Ref manual */
#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
# ifdef CONFIG_BF54x
bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
# endif
#else
bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
#endif

local_irq_disable();

#ifdef CONFIG_BF54x
# ifdef CONFIG_PINTx_REASSIGN
pint[0]->assign = CONFIG_PINT0_ASSIGN;
pint[1]->assign = CONFIG_PINT1_ASSIGN;
pint[2]->assign = CONFIG_PINT2_ASSIGN;
pint[3]->assign = CONFIG_PINT3_ASSIGN;
# endif
/* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
init_pint_lut();
#endif

for (irq = 0; irq <= SYS_IRQS; irq++) {
if (irq <= IRQ_CORETMR)
set_irq_chip(irq, &bfin_core_irqchip);
else
set_irq_chip(irq, &bfin_internal_irqchip);

switch (irq) {
#if defined(CONFIG_BF53x)
case IRQ_PROG_INTA:
# if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
case IRQ_MAC_RX:
# endif
#elif defined(CONFIG_BF54x)
case IRQ_PINT0:
case IRQ_PINT1:
case IRQ_PINT2:
case IRQ_PINT3:
#elif defined(CONFIG_BF52x)
case IRQ_PORTF_INTA:
case IRQ_PORTG_INTA:
case IRQ_PORTH_INTA:
#elif defined(CONFIG_BF561)
case IRQ_PROG0_INTA:
case IRQ_PROG1_INTA:
case IRQ_PROG2_INTA:
#endif
set_irq_chained_handler(irq,
bfin_demux_gpio_irq);
break;
#ifdef BF537_GENERIC_ERROR_INT_DEMUX
case IRQ_GENERIC_ERROR:
set_irq_handler(irq, bfin_demux_error_irq);

break;
#endif
default:
set_irq_handler(irq, handle_simple_irq);
break;
}
}

#ifdef BF537_GENERIC_ERROR_INT_DEMUX
for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++)
set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip,
handle_level_irq);
#endif

/* if configured as edge, then will be changed to do_edge_IRQ */
for (irq = GPIO_IRQ_BASE; irq < NR_IRQS; irq++)
set_irq_chip_and_handler(irq, &bfin_gpio_irqchip,
handle_level_irq);


bfin_write_IMASK(0);
CSYNC();
ilat = bfin_read_ILAT();
CSYNC();
bfin_write_ILAT(ilat);
CSYNC();

printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
/* IMASK=xxx is equivalent to STI xx or irq_flags=xx,
* local_irq_enable()
*/
program_IAR();
/* Therefore it's better to setup IARs before interrupts enabled */
search_IAR();

/* Enable interrupts IVG7-15 */
irq_flags = irq_flags | IMASK_IVG15 |
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;

#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
bfin_write_SIC_IWR0(IWR_ENABLE_ALL);
bfin_write_SIC_IWR1(IWR_ENABLE_ALL);
# ifdef CONFIG_BF54x
bfin_write_SIC_IWR2(IWR_ENABLE_ALL);
# endif
#else
bfin_write_SIC_IWR(IWR_ENABLE_ALL);
#endif

return 0;
}

kernel/irq/chip.c:
------------------------------------------------------
/**
* handle_simple_irq - Simple and software-decoded IRQs.
* @irq: the interrupt number
* @desc: the interrupt description structure for this irq
*
* Simple interrupts are either sent from a demultiplexing interrupt
* handler or come from hardware, where no interrupt hardware control
* is necessary.
*
* Note: The caller is expected to handle the ack, clear, mask and
* unmask issues if necessary.
*/
void fastcall
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
{
struct irqaction *action;
irqreturn_t action_ret;
const unsigned int cpu = smp_processor_id();

spin_lock(&desc->lock);

if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock;
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
kstat_cpu(cpu).irqs[irq]++;

action = desc->action;
if (unlikely(!action || (desc->status & IRQ_DISABLED)))
goto out_unlock;

desc->status |= IRQ_INPROGRESS;
spin_unlock(&desc->lock);

action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);

spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out_unlock:
spin_unlock(&desc->lock);
}

/**
* handle_level_irq - Level type irq handler
* @irq: the interrupt number
* @desc: the interrupt description structure for this irq
*
* Level type interrupts are active as long as the hardware line has
* the active level. This may require to mask the interrupt and unmask
* it after the associated handler has acknowledged the device, so the
* interrupt line is back to inactive.
*/
void fastcall
handle_level_irq(unsigned int irq, struct irq_desc *desc)
{
unsigned int cpu = smp_processor_id();
struct irqaction *action;
irqreturn_t action_ret;

spin_lock(&desc->lock);
mask_ack_irq(desc, irq);

if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock;
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
kstat_cpu(cpu).irqs[irq]++;

/*
* If its disabled or no action available
* keep it masked and get out of here
*/
action = desc->action;
if (unlikely(!action || (desc->status & IRQ_DISABLED)))
goto out_unlock;

desc->status |= IRQ_INPROGRESS;
spin_unlock(&desc->lock);

action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);

spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
desc->chip->unmask(irq);
out_unlock:
spin_unlock(&desc->lock);
}

kernel/irq/handle.c:
------------------------------------------------------
/**
* handle_IRQ_event - irq action chain handler
* @irq: the interrupt number
* @action: the interrupt action chain for this irq
*
* Handles the action chain of an irq event
*/
irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
{
irqreturn_t ret, retval = IRQ_NONE;
unsigned int status = 0;

handle_dynamic_tick(action);

if (!(action->flags & IRQF_DISABLED))
local_irq_enable_in_hardirq();

do {
ret = action->handler(irq, action->dev_id);
if (ret == IRQ_HANDLED)
status |= action->flags;
retval |= ret;
action = action->next;
} while (action);

if (status & IRQF_SAMPLE_RANDOM)
add_interrupt_randomness(irq);
local_irq_disable();

return retval;
}

Blog Archive