Sunday, August 27, 2006

rt_task_set_period - resend


/*
* __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
* interrupt protection log is maintained here for each domain. Hw
* interrupts are masked on entry.
*/
void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
{
/--------------------
irq: starts from 6

This function can be called from:

ipipe_grab_irq() - HW
ipipe_trigger_irq() - SW, SW triggered IRQ regs == NULL, so do not need
to acknowledge the HW.
---------------------/

struct ipipe_domain *this_domain;
struct list_head *head, *pos;
int m_ack, s_ack, s = -1;
ipipe_declare_cpuid;

m_ack = (regs == NULL); /* Software-triggered IRQs do not need
* any ack. */

/--------------------

Software triggered IRQ has not regs
See:
ipipe-core.c: ipipe_trigger_irq(): __ipipe_handle_irq(irq, NULL);
---------------------/

ipipe_load_cpuid();

this_domain = ipipe_percpu_domain[cpuid];
/-------------------------

struct ipipe_domain *ipipe_percpu_domain[IPIPE_NR_CPUS] =
{[0 ... IPIPE_NR_CPUS - 1] = &ipipe_root };

What does "ipipe_percpu_domain[cpuid]" stands for?

Current domain on "this" CPU (meaningful for SMP system). cpuid always =
0 on Blackfin.
--------------------------/

s_ack = m_ack;

if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
head = &this_domain->p_link;
else
head = __ipipe_pipeline.next;

/----------------------------
__ipipe_pipeline: global list of domains, in priority order
head starts from this_domain(current domain in which interrupt happens)
or starting from the highest priority domain.
-----------------------------/

/* Ack the interrupt. */

pos = head;

while (pos != &__ipipe_pipeline) {
struct ipipe_domain *next_domain =
list_entry(pos, struct ipipe_domain, p_link);

/--------------------------------
Starting from "head"
---------------------------------/

/*
* For each domain handling the incoming IRQ, mark it as
* pending in its log.
*/
if (test_bit(IPIPE_HANDLE_FLAG,
&next_domain->irqs[irq].control)) {
/*
* Domains that handle this IRQ are polled for
* acknowledging it by decreasing priority order. The
* interrupt must be made pending _first_ in the
* domain's status flags before the PIC is unlocked.
*/

next_domain->cpudata[cpuid].irq_counters[irq].total_hits++;
next_domain->cpudata[cpuid].irq_counters[irq].pending_hits++;
__ipipe_set_irq_bit(next_domain, cpuid, irq);
/--------------------

struct ipcpudata {
unsigned long status;
unsigned long irq_pending_hi;
unsigned long irq_pending_lo[IPIPE_IRQ_IWORDS];
struct ipirqcnt {
unsigned long pending_hits;
unsigned long total_hits;
} irq_counters[IPIPE_NR_IRQS];
} ____cacheline_aligned_in_smp cpudata[IPIPE_NR_CPUS];

#define __ipipe_set_irq_bit(ipd,cpuid,irq) do { if (!
test_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control)) { __set_bit(irq &
IPIPE_IRQ_IMASK,&(ipd)->cpudata[cpuid].irq_pending_lo[irq >>
IPIPE_IRQ_ISHIFT]); __set_bit(irq >>
IPIPE_IRQ_ISHIFT,&(ipd)->cpudata[cpuid].irq_pending_hi); } } while(0)

(The "a" bit of irq_pending_hi is set)
irq = a * 32 + irq_pending_lo[a]

--------------------/

ipipe_mark_irq_receipt(next_domain, irq, cpuid);

/---------
meaningful when enable CONFIG_IPIPE_STATS
-------------/

/*
* Always get the first master acknowledge available.
* Once we've got it, allow slave acknowledge
* handlers to run (until one of them stops us).
*/
if (next_domain->irqs[irq].acknowledge != NULL) {
if (!m_ack)
m_ack = next_domain->irqs[irq].acknowledge(irq);
else if (test_bit
(IPIPE_SHARED_FLAG,
&next_domain->irqs[irq].control) && !s_ack)
s_ack = next_domain->irqs[irq].acknowledge(irq);
}
}

/-----------------------------------------------------------------------------------

Any usage for m_ack and s_ack?

Call the HW ack routine here. Mainly to mask the "irq" while handling
it.

irqs[irq].acknowledege(irq) -> __ipipe_ack_irq() --> see bellow

ints-priority-sc.c
static struct irqchip bf533_core_irqchip = {
#ifdef CONFIG_IPIPE
.ack = bf533_core_mask_irq,
#else /* !CONFIG_IPIPE */
.ack = ack_noop,
#==endif /* CONFIG_IPIPE */
.mask = bf533_core_mask_irq,
.unmask = bf533_core_unmask_irq,
};

static struct irqchip bf533_core_irqchip = {
#ifdef CONFIG_IPIPE
.ack = bf533_core_mask_irq,
#else /* !CONFIG_IPIPE */
.ack = ack_noop,
#==endif /* CONFIG_IPIPE */
.mask = bf533_core_mask_irq,
.unmask = bf533_core_unmask_irq,
};

static void bf533_internal_mask_irq(unsigned int irq)
{
*pSIC_IMASK &= ~(1 << (irq - (IRQ_CORETMR + 1)));
__builtin_bfin_ssync();
}

--------------------------------------------------------------------/

/*
* If the domain does not want the IRQ to be passed down the
* interrupt pipe, exit the loop now.
*/

if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
break;

pos = next_domain->p_link.next;
}

/*
* Now walk the pipeline, yielding control to the highest
* priority domain that has pending interrupt(s) or
* immediately to the current domain if the interrupt has been
* marked as 'sticky'. This search does not go beyond the
* current domain in the pipeline. We also enforce the
* additional root stage lock (blackfin-specific). */

if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
s = __test_and_set_bit(IPIPE_STALL_FLAG,
&ipipe_root_domain->cpudata[cpuid].status);

/----------------------------------------
test_and_set_bit() return the old value of this bit.
If IPIPE_ROOTLOCK_FLAG, stalls the root domain.
See __ipipe_sync_stage() bellow.
-----------------------------------------/

__ipipe_walk_pipeline(head, cpuid);

if (!s)
__clear_bit(IPIPE_STALL_FLAG,
&ipipe_root_domain->cpudata[cpuid].status);
}
/---------------------------
restore the original status value

----------------------------/

==============================================================================
kernel/ipipe-root.c

int __ipipe_ack_irq(unsigned irq)
{
struct irqdesc *desc = irq_desc + irq;
unsigned long flags;
ipipe_declare_cpuid;

if (irq == IRQ_SYSTMR) {
/* Clear interrupt latch for TIMER0, don't mask. */
*pTIMER_STATUS = 1;
__builtin_bfin_ssync();
return 1;
}

/*
* No need to mask IRQs at hw level: we are always called from
* __ipipe_handle_irq(), so interrupts are already off. We
* stall the pipeline so that spin_lock_irq*() ops won't
* unintentionally flush it, since this could cause infinite
* recursion.
*/

ipipe_load_cpuid();
flags = ipipe_test_and_stall_pipeline();
preempt_disable();
desc->chip->ack(irq);
preempt_enable_no_resched();
ipipe_restore_pipeline_nosync(ipipe_percpu_domain[cpuid], flags,
cpuid);

return 1;
}

================================================================================

ipipe/core.c:

/* __ipipe_walk_pipeline(): Plays interrupts pending in the log. Must
be called with local hw interrupts disabled. */

void fastcall __ipipe_walk_pipeline(struct list_head *pos, int cpuid)
{
struct ipipe_domain *this_domain = ipipe_percpu_domain[cpuid];

while (pos != &__ipipe_pipeline) {
struct ipipe_domain *next_domain =
list_entry(pos, struct ipipe_domain, p_link);

/-----------------------------------------------
Note: this_domain next_domain may be different. According to
ipipe_handle_irq(), next_domain is either head or current domain.
-----------------------------------------------/

if (test_bit
(IPIPE_STALL_FLAG, &next_domain->cpudata[cpuid].status))
break; /* Stalled stage -- do not go further. */

/-------------------------------------
The domains with lower priority than next domain
all ignored
-------------------------------------/

if (next_domain->cpudata[cpuid].irq_pending_hi != 0) {

if (next_domain == this_domain)
__ipipe_sync_stage(IPIPE_IRQMASK_ANY);
else {
__ipipe_switch_to(this_domain, next_domain,
cpuid);

ipipe_load_cpuid(); /* Processor might have changed. */

if (this_domain->cpudata[cpuid].
irq_pending_hi != 0
&& !test_bit(IPIPE_STALL_FLAG,
&this_domain->cpudata[cpuid].status))
__ipipe_sync_stage(IPIPE_IRQMASK_ANY);
}

break;
} else if (next_domain == this_domain)
break;

pos = next_domain->p_link.next;
}
}

======================================================================
include/linux/ipipe.h

/* Called with hw interrupts off. */
static inline void __ipipe_switch_to(struct ipipe_domain *out,
struct ipipe_domain *in, int cpuid)
{
void ipipe_suspend_domain(void);

/*
* "in" is guaranteed to be closer than "out" from the head of the
* pipeline (and obviously different).
*/

ipipe_percpu_domain[cpuid] = in;

/------------------------------------------------
Current domain has changed.
-------------------------------------------------/

ipipe_suspend_domain(); /* Sync stage and propagate interrupts.
*/
/-------------------------------------------------
When exit form suspend_domain(), curent_domain is still "in" (?)
--------------------------------------------------/

ipipe_load_cpuid(); /* Processor might have changed. */

if (ipipe_percpu_domain[cpuid] == in)
/*
* Otherwise, something has changed the current domain under
* our feet recycling the register set; do not override.
*/
ipipe_percpu_domain[cpuid] = out;

/--------------------------------------------
When exit from ipipe_switch_to(), current domain is "out"?
---------------------------------------------/

}

==========================================================================
kernel/ipipe/core.c

/*
* ipipe_suspend_domain() -- Suspend the current domain, switching to
* the next one which has pending work down the pipeline.
*/
void ipipe_suspend_domain(void)
{
struct ipipe_domain *this_domain, *next_domain;
struct list_head *ln;
unsigned long flags;
ipipe_declare_cpuid;

ipipe_lock_cpu(flags);

this_domain = next_domain = ipipe_percpu_domain[cpuid];

__clear_bit(IPIPE_STALL_FLAG, &this_domain->cpudata[cpuid].status);

ipipe_mark_domain_unstall(this_domain, cpuid);

if (this_domain->cpudata[cpuid].irq_pending_hi != 0)
goto sync_stage;

for (;;) {
ln = next_domain->p_link.next;

if (ln == &__ipipe_pipeline)
break;

next_domain = list_entry(ln, struct ipipe_domain, p_link);

if (test_bit(IPIPE_STALL_FLAG,
&next_domain->cpudata[cpuid].status))
break;

if (next_domain->cpudata[cpuid].irq_pending_hi == 0)
continue;

ipipe_percpu_domain[cpuid] = next_domain;
/-----------------------------------------------------------------------
Change current domain
-------------------------------------------------------------------------/

sync_stage:

__ipipe_sync_stage(IPIPE_IRQMASK_ANY);

ipipe_load_cpuid(); /* Processor might have changed. */

if (ipipe_percpu_domain[cpuid] != next_domain)
/*
* Something has changed the current domain under our
* feet, recycling the register set; take note.
*/
this_domain = ipipe_percpu_domain[cpuid];
}

ipipe_percpu_domain[cpuid] = this_domain;

/----------------------------------------------------
Restore current domain
------------------------------------------------------/

ipipe_unlock_cpu(flags);
}

===========================================================================

kernel/ipipe-core.c

/*
* __ipipe_sync_stage() -- Flush the pending IRQs for the current
* domain (and processor). This routine flushes the interrupt log
* (see "Optimistic interrupt protection" from D. Stodolsky et al. for
* more on the deferred interrupt scheme). Every interrupt that
* occurred while the pipeline was stalled gets played. WARNING:
* callers on SMP boxen should always check for CPU migration on
* return of this routine. One can control the kind of interrupts
* which are going to be sync'ed using the syncmask
* parameter. IPIPE_IRQMASK_ANY plays them all, IPIPE_IRQMASK_VIRT
* plays virtual interrupts only. This routine must be called with hw
* interrupts off.
*/
void __ipipe_sync_stage(unsigned long syncmask)
{
unsigned long mask, submask;
struct ipcpudata *cpudata;
struct ipipe_domain *ipd;
ipipe_declare_cpuid;
int level, rank;
unsigned irq;

ipipe_load_cpuid();
ipd = ipipe_percpu_domain[cpuid];
cpudata = &ipd->cpudata[cpuid];

if (__test_and_set_bit(IPIPE_SYNC_FLAG, &cpudata->status))
return;
/------------
Is "sync_stage" already been called? If so, return.
-------------/

/*
* The policy here is to keep the dispatching code interrupt-free
* by stalling the current stage. If the upper domain handler
* (which we call) wants to re-enable interrupts while in a safe
* portion of the code (e.g. SA_INTERRUPT flag unset for Linux's
* sigaction()), it will have to unstall (then stall again before
* returning to us!) the stage when it sees fit.
*/
while ((mask = (cpudata->irq_pending_hi & syncmask)) != 0) {
level = ffs(mask) - 1;
__clear_bit(level, &cpudata->irq_pending_hi);

while ((submask = cpudata->irq_pending_lo[level]) != 0) {

if (ipd == ipipe_root_domain &&
test_bit(IPIPE_ROOTLOCK_FLAG, &ipd->flags)) {
__set_bit(level, &cpudata->irq_pending_hi);
goto done;

/---------------------------------------------------------
If ROOTLOCK_FLAG is set, do not run interrupt handler here
----------------------------------------------------------/

}

rank = ffs(submask) - 1;
irq = (level << IPIPE_IRQ_ISHIFT) + rank;

/------------------------------------
Get the "irq" num
-------------------------------------/

if (test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control)) {
__clear_bit(rank,
&cpudata->irq_pending_lo[level]);
continue;

/------------------------------------------
If LOCK_FLAG is set, do not handle this "irq"
-------------------------------------------/
}

if (--cpudata->irq_counters[irq].pending_hits == 0) {
/---------------------------------------------
Every pending "irq" is going to be handled handled
----------------------------------------------/
__clear_bit(rank,
&cpudata->irq_pending_lo[level]);
ipipe_mark_irq_delivery(ipd,irq,cpuid);
}

__set_bit(IPIPE_STALL_FLAG, &cpudata->status);
ipipe_mark_domain_stall(ipd, cpuid);

if (ipd == ipipe_root_domain) {
/*
* Note: the I-pipe implements a
* threaded interrupt model on this
* arch for Linux external IRQs. The
* interrupt handler we call here only
* wakes up the associated IRQ thread.
*/
if (ipipe_virtual_irq_p(irq)) {

/----------------
Is this a virtual irq?

-----------------/
/* No irqtail here; virtual interrupts have
no effect on IPEND so there is no need for
processing deferral. */
local_irq_enable_hw();
ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);
local_irq_disable_hw();
} else
/* No need to run the irqtail here either; we are not
preemptable by hw IRQs, so non-Linux IRQs cannot
stack over the short thread wakeup code. Which in turn
means that no irqtail condition could be pending
for domains above Linux in the pipeline. */
((void (*)(unsigned, struct pt_regs *))
ipd->irqs[irq].handler) (irq, __ipipe_tick_regs + cpuid);

/-----------------------------------------------------
Run HW interrupt handler
--------------------------------------------------------/

} else {
__clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);
ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);
/* Attempt to exit the outer interrupt
* level before starting the deferred
* IRQ processing. */
__ipipe_run_irqtail();
/-----------------
see bellow
-----------------/

__set_bit(IPIPE_SYNC_FLAG, &cpudata->status);
}
#ifdef CONFIG_SMP
{
int _cpuid = ipipe_processor_id();

if (_cpuid != cpuid) { /* Handle CPU migration. */
/* We expect any domain to clear the SYNC bit each
time it switches in a new task, so that preemptions
and/or CPU migrations (in the SMP case) over the
ISR do not lock out the log syncer for some
indefinite amount of time. In the Linux case,
schedule() handles this (see kernel/sched.c). For
this reason, we don't bother clearing it here for
the source CPU in the migration handling case,
since it must have scheduled another task in by
now. */
cpuid = _cpuid;
cpudata = &ipd->cpudata[cpuid];
__set_bit(IPIPE_SYNC_FLAG,&cpudata->status);
}
}
#==endif /* CONFIG_SMP */
__clear_bit(IPIPE_STALL_FLAG, &cpudata->status);
ipipe_mark_domain_unstall(ipd, cpuid);
}
}

done:
__clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);
}

==============================================================
kernel/ipipe-core.c

static inline void __ipipe_run_irqtail(void)
{
asmlinkage void __ipipe_call_irqtail(void);
unsigned long pending;

__builtin_bfin_csync();

pending = *pIPEND;
if (pending & 0x8000) {

/----------------------------
IVG 15 is pending - there is interrupt pending?? Or in kernel mode?
-----------------------------/

pending &= ~0x8010;

/------------------------------
clear IVG15
---------------------------/

if (pending && (pending & (pending - 1)) == 0)
__ipipe_call_irqtail();
/--------------
See bellow:

This is only one interrupt - no nested interrupts.

---------------/
}
}

===============================================
ENTRY(___ipipe_call_irqtail)

r0.l = 1f;
r0.h = 1f;
reti = r0;
rti
/-------

return from interrupt and continue here
---------/
1:
[--sp] = rets;
[--sp] = ( r7:4, p5:3 );
p0.l = ___ipipe_irq_tail_hook;
p0.h = ___ipipe_irq_tail_hook;
p0 = [p0] ;
sp += -12 ;
call (p0) ;
sp += 12;
( r7:4, p5:3 ) = [sp++];
rets = [sp++];

/------------
Call ipipe_irq_tail_hook
asm-blackfin/system.h:

static inline int xnarch_init (void)

{
int err;

__ipipe_irq_tail_hook = (unsigned long)&xnpod_schedule_deferred;

...
}

------------/

[--sp] = reti;
reti = [sp++]; /* IRQs are off. */

/-----------
Store reti, and disalbe interrupt nesting:

Instructions that access the RETI register do have an implicit site
effect— reading the RETI register enables interrupt nesting. Writing to
it disables nesting again.

------------/

r0.h = 3f;
r0.l = 3f;
p0.l = lo(EVT14);
p0.h = hi(EVT14);
[p0] = r0;
csync;
r0 = 0x401f;
sti r0;
/----------------------------------
enable interrupt: only enalbe IVG14 --?? Why doing bellow operation?
----------------------------------/

raise 14;

[--sp] = reti; /* IRQs on. */
2:
jump 2b /* Likely paranoid. */
3:
sp += 4; /* Discard saved RETI */
r0.h = _evt14_softirq;
r0.l = _evt14_softirq;
p0.l = lo(EVT14);
p0.h = hi(EVT14);
[p0] = r0;
csync

/* Restore EVT14 */
p0.l = _irq_flags;
p0.h = _irq_flags;
r0 = [p0];
sti r0
rts;

==========================================================
#ifdef __KERNEL__

void xnpod_schedule_deferred (void)

{
if (nkpod && xnsched_resched_p())
xnpod_schedule();
}

#==endif /* __KERNEL__ */

===========================================================

No comments:

Blog Archive