Wednesday, March 11, 2009

bfin generic irq handling

linux/Document/Docbook/genericirq/ (make htmldoc)

LWN: A new generic IRQ layer -


"struct irq_desc" represents an IRQ. "struct irq_chip" - chip level handling.
Arch code need to define its "irq_chip" structures. See bellow.

All the chip level initialization is done in init_arch_irq(), it calls set_irq_handler(), set_irq_chip() to initialize each "irq desc" structures.
This setups the the generic irq framework.

A driver need to call request_irq() to register an irq handler.

When an irq happens, the arch code finally calls into irq_desc->handler(), (this handler is intialized in init_arch_irq()), then finally, a list of irq_action is walked through,
the driver's registered handler is finally called.

irq chips:
static struct irq_chip bfin_core_irqchip = {
.name = "CORE",
.ack = bfin_ack_noop,
.mask = bfin_core_mask_irq,
.unmask = bfin_core_unmask_irq,

static struct irq_chip bfin_internal_irqchip = {
.name = "INTN",
.ack = bfin_ack_noop,
.mask = bfin_internal_mask_irq,
.unmask = bfin_internal_unmask_irq,
.mask_ack = bfin_internal_mask_irq,
.disable = bfin_internal_mask_irq,
.enable = bfin_internal_unmask_irq,
#ifdef CONFIG_PM
.set_wake = bfin_internal_set_wake,
static struct irq_chip bfin_generic_error_irqchip = {
.name = "ERROR",
.ack = bfin_ack_noop,
.mask_ack = bfin_generic_error_mask_irq,
.mask = bfin_generic_error_mask_irq,
.unmask = bfin_generic_error_unmask_irq,

static struct irq_chip bfin_gpio_irqchip = {
.name = "GPIO",
.ack = bfin_gpio_ack_irq,
.mask = bfin_gpio_mask_irq,
.mask_ack = bfin_gpio_mask_ack_irq,
.unmask = bfin_gpio_unmask_irq,
.disable = bfin_gpio_mask_irq,
.enable = bfin_gpio_unmask_irq,
.set_type = bfin_gpio_irq_type,
.startup = bfin_gpio_irq_startup,
.shutdown = bfin_gpio_irq_shutdown,
#ifdef CONFIG_PM
.set_wake = bfin_gpio_set_wake,

struct irq_desc:
Each interrupt is described by an interrupt descriptor structure irq_desc

* struct irq_desc - interrupt descriptor
* @irq: interrupt number for this descriptor
* @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
* @chip: low level interrupt hardware access
* @msi_desc: MSI descriptor
* @handler_data: per-IRQ data for the irq_chip methods
* @chip_data: platform-specific per-chip private data for the chip
* methods, to allow shared chip implementations
* @action: the irq action chain
* @status: status information
* @depth: disable-depth, for nested irq_disable() calls
* @wake_depth: enable depth, for multiple set_irq_wake() callers
* @irq_count: stats field to detect stalled irqs
* @irqs_unhandled: stats field for spurious unhandled interrupts
* @last_unhandled: aging timer for unhandled count
* @lock: locking for SMP
* @affinity: IRQ affinity on SMP
* @cpu: cpu index useful for balancing
* @pending_mask: pending rebalanced interrupts
* @dir: /proc/irq/ procfs entry
* @name: flow handler name for /proc/interrupts output
struct irq_desc {
unsigned int irq;
irq_flow_handler_t handle_irq;
struct irq_chip *chip;
struct msi_desc *msi_desc;
void *handler_data;
void *chip_data;
struct irqaction *action; /* IRQ action list */
unsigned int status; /* IRQ status */

unsigned int depth; /* nested irq disables */
unsigned int wake_depth; /* nested wake enables */
unsigned int irq_count; /* For detecting broken IRQs */
unsigned int irqs_unhandled;
unsigned long last_unhandled; /* Aging timer for unhandled count */
spinlock_t lock;
cpumask_t affinity;
unsigned int cpu;
cpumask_t pending_mask;
struct proc_dir_entry *dir;
const char *name;
} ____cacheline_internodealigned_in_smp;

struct irq_chip:
* struct irq_chip - hardware interrupt chip descriptor
* @name: name for /proc/interrupts
* @startup: start up the interrupt (defaults to ->enable if NULL)
* @shutdown: shut down the interrupt (defaults to ->disable if NULL)
* @enable: enable the interrupt (defaults to chip->unmask if NULL)
* @disable: disable the interrupt (defaults to chip->mask if NULL)
* @ack: start of a new interrupt
* @mask: mask an interrupt source
* @mask_ack: ack and mask an interrupt source
* @unmask: unmask an interrupt source
* @eoi: end of interrupt - chip level
* @end: end of interrupt - flow level
* @set_affinity: set the CPU affinity on SMP machines
* @retrigger: resend an IRQ to the CPU
* @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
* @set_wake: enable/disable power-management wake-on of an IRQ
* @release: release function solely used by UML
* @typename: obsoleted by name, kept as migration helper
struct irq_chip {
const char *name;
unsigned int (*startup)(unsigned int irq);
void (*shutdown)(unsigned int irq);
void (*enable)(unsigned int irq);
void (*disable)(unsigned int irq);

void (*ack)(unsigned int irq);irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
irqreturn_t ret, retval = IRQ_NONE;
unsigned int status = 0;

if (!(action->flags & IRQF_DISABLED))

do {
ret = action->handler(irq, action->dev_id);
if (ret == IRQ_HANDLED)
status |= action->flags;
retval |= ret;
action = action->next;
} while (action);

if (status & IRQF_SAMPLE_RANDOM)

return retval;
void (*mask)(unsigned int irq);
void (*mask_ack)(unsigned int irq);
void (*unmask)(unsigned int irq);
void (*eoi)(unsigned int irq);

void (*end)(unsigned int irq);
void (*set_affinity)(unsigned int irq, cpumask_t dest);
int (*retrigger)(unsigned int irq);
int (*set_type)(unsigned int irq, unsigned int flow_type);
int (*set_wake)(unsigned int irq, unsigned int on);

/* Currently used only by UML, might disappear one day.*/
void (*release)(unsigned int irq, void *dev_id);
* For compatibility, ->typename is copied into ->name.
* Will disappear.
const char *typename;

irq flow
Low level arch code will finally calls
"irq_desc[irq]-> handler()"

asm_do_IRQ() -->
generic_handle_irq() -->
desc->handle_irq() --> (Or __do_IRQ() if desc->handle_irq == NULL)
(In init_arch_irq() desc->handle_irq() is assigned to default handlers: handle_simple_irq(), handle_level_irq(), etc.)
--> handle_IRQ_event() (handle the irqaction chains).

static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
desc->handle_irq(irq, desc);
if (likely(desc->handle_irq))
desc->handle_irq(irq, desc);

Register an IRQ:
request_irq() -->
__setup_irq() -->
__irq_set_trigger() -->
chip->set_type(irq, flags & IRQF_TRIGGER_MASK) -->
bfin_gpio_irq_type() [mach-common/ints-priority.c]

static int
__setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
struct irqaction *old, **p;
const char *old_name = NULL;
unsigned long flags;
int shared = 0;
int ret;

if (!desc)
return -EINVAL;

if (desc->chip == &no_irq_chip)
return -ENOSYS;
* Some drivers like serial.c use request_irq() heavily,
* so we have to be careful not to interfere with a
* running system.
if (new->flags & IRQF_SAMPLE_RANDOM) {
* This function might sleep, we want to call it first,
* outside of the atomic block.
* Yes, this might clear the entropy pool if the wrong
* driver is attempted to be loaded, without actually
* installing a new handler, but is this really a problem,
* only the sysadmin is able to do this.

* The following block of code has to be executed atomically
spin_lock_irqsave(&desc->lock, flags);
p = &desc->action;
old = *p;
if (old) {
* Can't share interrupts unless both agree to and are
* the same type (level, edge, polarity). So both flag
* fields must have IRQF_SHARED set and the bits which
* set the trigger type must match.
if (!((old->flags & new->flags) & IRQF_SHARED) ||
((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) {
old_name = old->name;
goto mismatch;

#if defined(CONFIG_IRQ_PER_CPU)
/* All handlers must agree on per-cpuness */
if ((old->flags & IRQF_PERCPU) !=
(new->flags & IRQF_PERCPU))
goto mismatch;

/* add new interrupt at end of irq queue */
do {
p = &old->next;
old = *p;
} while (old);
shared = 1;

if (!shared) {

/* Setup the type (level, edge polarity) if configured: */
if (new->flags & IRQF_TRIGGER_MASK) {
ret = __irq_set_trigger(desc, irq, new->flags);

if (ret) {
spin_unlock_irqrestore(&desc->lock, flags);
return ret;
} else
#if defined(CONFIG_IRQ_PER_CPU)
if (new->flags & IRQF_PERCPU)
desc->status |= IRQ_PER_CPU;

desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING |

if (!(desc->status & IRQ_NOAUTOEN)) {
desc->depth = 0;
desc->status &= ~IRQ_DISABLED;
} else
/* Undo nested disables: */
desc->depth = 1;

/* Exclude IRQ from balancing if requested */
if (new->flags & IRQF_NOBALANCING)
desc->status |= IRQ_NO_BALANCING;

/* Set default affinity mask once everything is setup */
do_irq_select_affinity(irq, desc);

} else if ((new->flags & IRQF_TRIGGER_MASK)
&& (new->flags & IRQF_TRIGGER_MASK)
!= (desc->status & IRQ_TYPE_SENSE_MASK)) {
/* hope the handler works with the actual trigger mode... */
pr_warning("IRQ %d uses trigger mode %d; requested %d\n",
irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK),
(int)(new->flags & IRQF_TRIGGER_MASK));

*p = new;

/* Reset broken irq detection when installing new handler */
desc->irq_count = 0;
desc->irqs_unhandled = 0;

* Check whether we disabled the irq via the spurious handler
* before. Reenable it and give it another chance.
if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
desc->status &= ~IRQ_SPURIOUS_DISABLED;
__enable_irq(desc, irq);

spin_unlock_irqrestore(&desc->lock, flags);

new->irq = irq;
register_irq_proc(irq, desc);
new->dir = NULL;
register_handler_proc(irq, new);

return 0;

if (!(new->flags & IRQF_PROBE_SHARED)) {
printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
if (old_name)
printk(KERN_ERR "current handler: %s\n", old_name);
spin_unlock_irqrestore(&desc->lock, flags);
return -EBUSY;

Blog Archive