Wednesday, August 30, 2006

gcc macro resend

Very good reference:

http://gcc.gnu.org/onlinedocs/gcc-3.1.1/cpp/Macros.html#Macros


#define __IRQ_SYSTMR(_X) IRQ_TMR ## _X
#define _IRQ_SYSTMR(_X) __IRQ_SYSTMR(_X)
#define IRQ_SYSTMR _IRQ_SYSTMR(CONFIG_IPIPE_SYS_TIMER)

#ifdef CONFIG_BF533
#define __IRQ_PRIOTMR(_X) CONFIG_TIMER ## _X
#else
#define __IRQ_PRIOTMR(_X) CONFIG_IRQ_TMR ## _X
#==endif

#define _IRQ_PRIOTMR(_X) __IRQ_PRIOTMR(_X)
#define IRQ_PRIOTMR _IRQ_PRIOTMR(CONFIG_IPIPE_SYS_TIMER)

#define _TIMER_CONFIG(_XX) *pTIMER ## _XX ## _CONFIG
#define _TIMER_PERIOD(_XX) *pTIMER ## _XX ## _PERIOD
#define _TIMER_WIDTH(_XX) *pTIMER ## _XX ## _WIDTH
#define _TIMER_COUNTER(_XX) *pTIMER ## _XX ## _COUNTER
#define GP_TIMER_CONFIG(_XX) _TIMER_CONFIG(_XX)
#define GP_TIMER_PERIOD(_XX) _TIMER_PERIOD(_XX)
#define GP_TIMER_WIDTH(_XX) _TIMER_WIDTH(_XX)
#define GP_TIMER_COUNTER(_XX) _TIMER_COUNTER(_XX)

Sunday, August 27, 2006

rt_task_set_period - resend


/*
* __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
* interrupt protection log is maintained here for each domain. Hw
* interrupts are masked on entry.
*/
void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
{
/--------------------
irq: starts from 6

This function can be called from:

ipipe_grab_irq() - HW
ipipe_trigger_irq() - SW, SW triggered IRQ regs == NULL, so do not need
to acknowledge the HW.
---------------------/

struct ipipe_domain *this_domain;
struct list_head *head, *pos;
int m_ack, s_ack, s = -1;
ipipe_declare_cpuid;

m_ack = (regs == NULL); /* Software-triggered IRQs do not need
* any ack. */

/--------------------

Software triggered IRQ has not regs
See:
ipipe-core.c: ipipe_trigger_irq(): __ipipe_handle_irq(irq, NULL);
---------------------/

ipipe_load_cpuid();

this_domain = ipipe_percpu_domain[cpuid];
/-------------------------

struct ipipe_domain *ipipe_percpu_domain[IPIPE_NR_CPUS] =
{[0 ... IPIPE_NR_CPUS - 1] = &ipipe_root };

What does "ipipe_percpu_domain[cpuid]" stands for?

Current domain on "this" CPU (meaningful for SMP system). cpuid always =
0 on Blackfin.
--------------------------/

s_ack = m_ack;

if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
head = &this_domain->p_link;
else
head = __ipipe_pipeline.next;

/----------------------------
__ipipe_pipeline: global list of domains, in priority order
head starts from this_domain(current domain in which interrupt happens)
or starting from the highest priority domain.
-----------------------------/

/* Ack the interrupt. */

pos = head;

while (pos != &__ipipe_pipeline) {
struct ipipe_domain *next_domain =
list_entry(pos, struct ipipe_domain, p_link);

/--------------------------------
Starting from "head"
---------------------------------/

/*
* For each domain handling the incoming IRQ, mark it as
* pending in its log.
*/
if (test_bit(IPIPE_HANDLE_FLAG,
&next_domain->irqs[irq].control)) {
/*
* Domains that handle this IRQ are polled for
* acknowledging it by decreasing priority order. The
* interrupt must be made pending _first_ in the
* domain's status flags before the PIC is unlocked.
*/

next_domain->cpudata[cpuid].irq_counters[irq].total_hits++;
next_domain->cpudata[cpuid].irq_counters[irq].pending_hits++;
__ipipe_set_irq_bit(next_domain, cpuid, irq);
/--------------------

struct ipcpudata {
unsigned long status;
unsigned long irq_pending_hi;
unsigned long irq_pending_lo[IPIPE_IRQ_IWORDS];
struct ipirqcnt {
unsigned long pending_hits;
unsigned long total_hits;
} irq_counters[IPIPE_NR_IRQS];
} ____cacheline_aligned_in_smp cpudata[IPIPE_NR_CPUS];

#define __ipipe_set_irq_bit(ipd,cpuid,irq) do { if (!
test_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control)) { __set_bit(irq &
IPIPE_IRQ_IMASK,&(ipd)->cpudata[cpuid].irq_pending_lo[irq >>
IPIPE_IRQ_ISHIFT]); __set_bit(irq >>
IPIPE_IRQ_ISHIFT,&(ipd)->cpudata[cpuid].irq_pending_hi); } } while(0)

(The "a" bit of irq_pending_hi is set)
irq = a * 32 + irq_pending_lo[a]

--------------------/

ipipe_mark_irq_receipt(next_domain, irq, cpuid);

/---------
meaningful when enable CONFIG_IPIPE_STATS
-------------/

/*
* Always get the first master acknowledge available.
* Once we've got it, allow slave acknowledge
* handlers to run (until one of them stops us).
*/
if (next_domain->irqs[irq].acknowledge != NULL) {
if (!m_ack)
m_ack = next_domain->irqs[irq].acknowledge(irq);
else if (test_bit
(IPIPE_SHARED_FLAG,
&next_domain->irqs[irq].control) && !s_ack)
s_ack = next_domain->irqs[irq].acknowledge(irq);
}
}

/-----------------------------------------------------------------------------------

Any usage for m_ack and s_ack?

Call the HW ack routine here. Mainly to mask the "irq" while handling
it.

irqs[irq].acknowledege(irq) -> __ipipe_ack_irq() --> see bellow

ints-priority-sc.c
static struct irqchip bf533_core_irqchip = {
#ifdef CONFIG_IPIPE
.ack = bf533_core_mask_irq,
#else /* !CONFIG_IPIPE */
.ack = ack_noop,
#==endif /* CONFIG_IPIPE */
.mask = bf533_core_mask_irq,
.unmask = bf533_core_unmask_irq,
};

static struct irqchip bf533_core_irqchip = {
#ifdef CONFIG_IPIPE
.ack = bf533_core_mask_irq,
#else /* !CONFIG_IPIPE */
.ack = ack_noop,
#==endif /* CONFIG_IPIPE */
.mask = bf533_core_mask_irq,
.unmask = bf533_core_unmask_irq,
};

static void bf533_internal_mask_irq(unsigned int irq)
{
*pSIC_IMASK &= ~(1 << (irq - (IRQ_CORETMR + 1)));
__builtin_bfin_ssync();
}

--------------------------------------------------------------------/

/*
* If the domain does not want the IRQ to be passed down the
* interrupt pipe, exit the loop now.
*/

if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
break;

pos = next_domain->p_link.next;
}

/*
* Now walk the pipeline, yielding control to the highest
* priority domain that has pending interrupt(s) or
* immediately to the current domain if the interrupt has been
* marked as 'sticky'. This search does not go beyond the
* current domain in the pipeline. We also enforce the
* additional root stage lock (blackfin-specific). */

if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
s = __test_and_set_bit(IPIPE_STALL_FLAG,
&ipipe_root_domain->cpudata[cpuid].status);

/----------------------------------------
test_and_set_bit() return the old value of this bit.
If IPIPE_ROOTLOCK_FLAG, stalls the root domain.
See __ipipe_sync_stage() bellow.
-----------------------------------------/

__ipipe_walk_pipeline(head, cpuid);

if (!s)
__clear_bit(IPIPE_STALL_FLAG,
&ipipe_root_domain->cpudata[cpuid].status);
}
/---------------------------
restore the original status value

----------------------------/

==============================================================================
kernel/ipipe-root.c

int __ipipe_ack_irq(unsigned irq)
{
struct irqdesc *desc = irq_desc + irq;
unsigned long flags;
ipipe_declare_cpuid;

if (irq == IRQ_SYSTMR) {
/* Clear interrupt latch for TIMER0, don't mask. */
*pTIMER_STATUS = 1;
__builtin_bfin_ssync();
return 1;
}

/*
* No need to mask IRQs at hw level: we are always called from
* __ipipe_handle_irq(), so interrupts are already off. We
* stall the pipeline so that spin_lock_irq*() ops won't
* unintentionally flush it, since this could cause infinite
* recursion.
*/

ipipe_load_cpuid();
flags = ipipe_test_and_stall_pipeline();
preempt_disable();
desc->chip->ack(irq);
preempt_enable_no_resched();
ipipe_restore_pipeline_nosync(ipipe_percpu_domain[cpuid], flags,
cpuid);

return 1;
}

================================================================================

ipipe/core.c:

/* __ipipe_walk_pipeline(): Plays interrupts pending in the log. Must
be called with local hw interrupts disabled. */

void fastcall __ipipe_walk_pipeline(struct list_head *pos, int cpuid)
{
struct ipipe_domain *this_domain = ipipe_percpu_domain[cpuid];

while (pos != &__ipipe_pipeline) {
struct ipipe_domain *next_domain =
list_entry(pos, struct ipipe_domain, p_link);

/-----------------------------------------------
Note: this_domain next_domain may be different. According to
ipipe_handle_irq(), next_domain is either head or current domain.
-----------------------------------------------/

if (test_bit
(IPIPE_STALL_FLAG, &next_domain->cpudata[cpuid].status))
break; /* Stalled stage -- do not go further. */

/-------------------------------------
The domains with lower priority than next domain
all ignored
-------------------------------------/

if (next_domain->cpudata[cpuid].irq_pending_hi != 0) {

if (next_domain == this_domain)
__ipipe_sync_stage(IPIPE_IRQMASK_ANY);
else {
__ipipe_switch_to(this_domain, next_domain,
cpuid);

ipipe_load_cpuid(); /* Processor might have changed. */

if (this_domain->cpudata[cpuid].
irq_pending_hi != 0
&& !test_bit(IPIPE_STALL_FLAG,
&this_domain->cpudata[cpuid].status))
__ipipe_sync_stage(IPIPE_IRQMASK_ANY);
}

break;
} else if (next_domain == this_domain)
break;

pos = next_domain->p_link.next;
}
}

======================================================================
include/linux/ipipe.h

/* Called with hw interrupts off. */
static inline void __ipipe_switch_to(struct ipipe_domain *out,
struct ipipe_domain *in, int cpuid)
{
void ipipe_suspend_domain(void);

/*
* "in" is guaranteed to be closer than "out" from the head of the
* pipeline (and obviously different).
*/

ipipe_percpu_domain[cpuid] = in;

/------------------------------------------------
Current domain has changed.
-------------------------------------------------/

ipipe_suspend_domain(); /* Sync stage and propagate interrupts.
*/
/-------------------------------------------------
When exit form suspend_domain(), curent_domain is still "in" (?)
--------------------------------------------------/

ipipe_load_cpuid(); /* Processor might have changed. */

if (ipipe_percpu_domain[cpuid] == in)
/*
* Otherwise, something has changed the current domain under
* our feet recycling the register set; do not override.
*/
ipipe_percpu_domain[cpuid] = out;

/--------------------------------------------
When exit from ipipe_switch_to(), current domain is "out"?
---------------------------------------------/

}

==========================================================================
kernel/ipipe/core.c

/*
* ipipe_suspend_domain() -- Suspend the current domain, switching to
* the next one which has pending work down the pipeline.
*/
void ipipe_suspend_domain(void)
{
struct ipipe_domain *this_domain, *next_domain;
struct list_head *ln;
unsigned long flags;
ipipe_declare_cpuid;

ipipe_lock_cpu(flags);

this_domain = next_domain = ipipe_percpu_domain[cpuid];

__clear_bit(IPIPE_STALL_FLAG, &this_domain->cpudata[cpuid].status);

ipipe_mark_domain_unstall(this_domain, cpuid);

if (this_domain->cpudata[cpuid].irq_pending_hi != 0)
goto sync_stage;

for (;;) {
ln = next_domain->p_link.next;

if (ln == &__ipipe_pipeline)
break;

next_domain = list_entry(ln, struct ipipe_domain, p_link);

if (test_bit(IPIPE_STALL_FLAG,
&next_domain->cpudata[cpuid].status))
break;

if (next_domain->cpudata[cpuid].irq_pending_hi == 0)
continue;

ipipe_percpu_domain[cpuid] = next_domain;
/-----------------------------------------------------------------------
Change current domain
-------------------------------------------------------------------------/

sync_stage:

__ipipe_sync_stage(IPIPE_IRQMASK_ANY);

ipipe_load_cpuid(); /* Processor might have changed. */

if (ipipe_percpu_domain[cpuid] != next_domain)
/*
* Something has changed the current domain under our
* feet, recycling the register set; take note.
*/
this_domain = ipipe_percpu_domain[cpuid];
}

ipipe_percpu_domain[cpuid] = this_domain;

/----------------------------------------------------
Restore current domain
------------------------------------------------------/

ipipe_unlock_cpu(flags);
}

===========================================================================

kernel/ipipe-core.c

/*
* __ipipe_sync_stage() -- Flush the pending IRQs for the current
* domain (and processor). This routine flushes the interrupt log
* (see "Optimistic interrupt protection" from D. Stodolsky et al. for
* more on the deferred interrupt scheme). Every interrupt that
* occurred while the pipeline was stalled gets played. WARNING:
* callers on SMP boxen should always check for CPU migration on
* return of this routine. One can control the kind of interrupts
* which are going to be sync'ed using the syncmask
* parameter. IPIPE_IRQMASK_ANY plays them all, IPIPE_IRQMASK_VIRT
* plays virtual interrupts only. This routine must be called with hw
* interrupts off.
*/
void __ipipe_sync_stage(unsigned long syncmask)
{
unsigned long mask, submask;
struct ipcpudata *cpudata;
struct ipipe_domain *ipd;
ipipe_declare_cpuid;
int level, rank;
unsigned irq;

ipipe_load_cpuid();
ipd = ipipe_percpu_domain[cpuid];
cpudata = &ipd->cpudata[cpuid];

if (__test_and_set_bit(IPIPE_SYNC_FLAG, &cpudata->status))
return;
/------------
Is "sync_stage" already been called? If so, return.
-------------/

/*
* The policy here is to keep the dispatching code interrupt-free
* by stalling the current stage. If the upper domain handler
* (which we call) wants to re-enable interrupts while in a safe
* portion of the code (e.g. SA_INTERRUPT flag unset for Linux's
* sigaction()), it will have to unstall (then stall again before
* returning to us!) the stage when it sees fit.
*/
while ((mask = (cpudata->irq_pending_hi & syncmask)) != 0) {
level = ffs(mask) - 1;
__clear_bit(level, &cpudata->irq_pending_hi);

while ((submask = cpudata->irq_pending_lo[level]) != 0) {

if (ipd == ipipe_root_domain &&
test_bit(IPIPE_ROOTLOCK_FLAG, &ipd->flags)) {
__set_bit(level, &cpudata->irq_pending_hi);
goto done;

/---------------------------------------------------------
If ROOTLOCK_FLAG is set, do not run interrupt handler here
----------------------------------------------------------/

}

rank = ffs(submask) - 1;
irq = (level << IPIPE_IRQ_ISHIFT) + rank;

/------------------------------------
Get the "irq" num
-------------------------------------/

if (test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control)) {
__clear_bit(rank,
&cpudata->irq_pending_lo[level]);
continue;

/------------------------------------------
If LOCK_FLAG is set, do not handle this "irq"
-------------------------------------------/
}

if (--cpudata->irq_counters[irq].pending_hits == 0) {
/---------------------------------------------
Every pending "irq" is going to be handled handled
----------------------------------------------/
__clear_bit(rank,
&cpudata->irq_pending_lo[level]);
ipipe_mark_irq_delivery(ipd,irq,cpuid);
}

__set_bit(IPIPE_STALL_FLAG, &cpudata->status);
ipipe_mark_domain_stall(ipd, cpuid);

if (ipd == ipipe_root_domain) {
/*
* Note: the I-pipe implements a
* threaded interrupt model on this
* arch for Linux external IRQs. The
* interrupt handler we call here only
* wakes up the associated IRQ thread.
*/
if (ipipe_virtual_irq_p(irq)) {

/----------------
Is this a virtual irq?

-----------------/
/* No irqtail here; virtual interrupts have
no effect on IPEND so there is no need for
processing deferral. */
local_irq_enable_hw();
ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);
local_irq_disable_hw();
} else
/* No need to run the irqtail here either; we are not
preemptable by hw IRQs, so non-Linux IRQs cannot
stack over the short thread wakeup code. Which in turn
means that no irqtail condition could be pending
for domains above Linux in the pipeline. */
((void (*)(unsigned, struct pt_regs *))
ipd->irqs[irq].handler) (irq, __ipipe_tick_regs + cpuid);

/-----------------------------------------------------
Run HW interrupt handler
--------------------------------------------------------/

} else {
__clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);
ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);
/* Attempt to exit the outer interrupt
* level before starting the deferred
* IRQ processing. */
__ipipe_run_irqtail();
/-----------------
see bellow
-----------------/

__set_bit(IPIPE_SYNC_FLAG, &cpudata->status);
}
#ifdef CONFIG_SMP
{
int _cpuid = ipipe_processor_id();

if (_cpuid != cpuid) { /* Handle CPU migration. */
/* We expect any domain to clear the SYNC bit each
time it switches in a new task, so that preemptions
and/or CPU migrations (in the SMP case) over the
ISR do not lock out the log syncer for some
indefinite amount of time. In the Linux case,
schedule() handles this (see kernel/sched.c). For
this reason, we don't bother clearing it here for
the source CPU in the migration handling case,
since it must have scheduled another task in by
now. */
cpuid = _cpuid;
cpudata = &ipd->cpudata[cpuid];
__set_bit(IPIPE_SYNC_FLAG,&cpudata->status);
}
}
#==endif /* CONFIG_SMP */
__clear_bit(IPIPE_STALL_FLAG, &cpudata->status);
ipipe_mark_domain_unstall(ipd, cpuid);
}
}

done:
__clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);
}

==============================================================
kernel/ipipe-core.c

static inline void __ipipe_run_irqtail(void)
{
asmlinkage void __ipipe_call_irqtail(void);
unsigned long pending;

__builtin_bfin_csync();

pending = *pIPEND;
if (pending & 0x8000) {

/----------------------------
IVG 15 is pending - there is interrupt pending?? Or in kernel mode?
-----------------------------/

pending &= ~0x8010;

/------------------------------
clear IVG15
---------------------------/

if (pending && (pending & (pending - 1)) == 0)
__ipipe_call_irqtail();
/--------------
See bellow:

This is only one interrupt - no nested interrupts.

---------------/
}
}

===============================================
ENTRY(___ipipe_call_irqtail)

r0.l = 1f;
r0.h = 1f;
reti = r0;
rti
/-------

return from interrupt and continue here
---------/
1:
[--sp] = rets;
[--sp] = ( r7:4, p5:3 );
p0.l = ___ipipe_irq_tail_hook;
p0.h = ___ipipe_irq_tail_hook;
p0 = [p0] ;
sp += -12 ;
call (p0) ;
sp += 12;
( r7:4, p5:3 ) = [sp++];
rets = [sp++];

/------------
Call ipipe_irq_tail_hook
asm-blackfin/system.h:

static inline int xnarch_init (void)

{
int err;

__ipipe_irq_tail_hook = (unsigned long)&xnpod_schedule_deferred;

...
}

------------/

[--sp] = reti;
reti = [sp++]; /* IRQs are off. */

/-----------
Store reti, and disalbe interrupt nesting:

Instructions that access the RETI register do have an implicit site
effect— reading the RETI register enables interrupt nesting. Writing to
it disables nesting again.

------------/

r0.h = 3f;
r0.l = 3f;
p0.l = lo(EVT14);
p0.h = hi(EVT14);
[p0] = r0;
csync;
r0 = 0x401f;
sti r0;
/----------------------------------
enable interrupt: only enalbe IVG14 --?? Why doing bellow operation?
----------------------------------/

raise 14;

[--sp] = reti; /* IRQs on. */
2:
jump 2b /* Likely paranoid. */
3:
sp += 4; /* Discard saved RETI */
r0.h = _evt14_softirq;
r0.l = _evt14_softirq;
p0.l = lo(EVT14);
p0.h = hi(EVT14);
[p0] = r0;
csync

/* Restore EVT14 */
p0.l = _irq_flags;
p0.h = _irq_flags;
r0 = [p0];
sti r0
rts;

==========================================================
#ifdef __KERNEL__

void xnpod_schedule_deferred (void)

{
if (nkpod && xnsched_resched_p())
xnpod_schedule();
}

#==endif /* __KERNEL__ */

===========================================================

IPIPE - 1 - resend


1. Grab interrups: interrpt.S

In __common_int_entry:

#ifdef CONFIG_IPIPE
call ___ipipe_grab_irq
SP += 12;
cc = r0 == 0;
if cc jump common_restore_context;
#else /* CONFIG_IPIPE */
call _do_irq;
SP += 12;
#==endif /* CONFIG_IPIPE */
call return_from_int;
common_restore_context:
RESTORE_CONTEXT
rti;

/--------------------------------
Note: if returns (r0 == 1), should not call return_from_int.
---------------------------------/

2. asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
{
struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
unsigned long sic_status;
ipipe_declare_cpuid;
int irq;

/--------------------------------------------------------
"struct ivgx {
/* irq number for request_irq, available in mach-bf533/irq.h */
int irqno;
/* corresponding bit in the SIC_ISR register */
int isrflag;
} ivg_table[NR_PERI_INTS];

struct ivg_slice {
/* position of first irq in ivg_table for given ivg */
struct ivgx *ifirst;
struct ivgx *istop;
} ivg7_13[IVG13 - IVG7 + 1];"

ivg7_13 is a table stores the mapping of peripheral interrupt to Core
interrupt IVG7-13.

ivg_table contains "IRQ NUM" and "Position in SIC_ISR" of peripheral
interrupts.
------------------------------------/

if (likely(vec == EVT_IVTMR_P)) {
irq = IRQ_CORETMR;
goto handle_irq;
}

/-----------------------------------------
CORETMR interrupt is likely to happen
-----------------------------------------/

__builtin_bfin_ssync();
sic_status = *pSIC_IMASK & *pSIC_ISR;

/----------------------------------------

sic_status: system interrupt happened and not masked.

------------------------------------------/

for(;; ivg++) {
if (ivg >= ivg_stop) {
num_spurious++;
return 0;
}
else if (sic_status & ivg->isrflag)
break;

/---------------------------------------
Find the system interrupt who triggers "vec"
----------------------------------------/
}

irq = ivg->irqno;
/---------------
irq defined:

See irq.h - peripheral interrupts IRQ NUM starts from 7 - sames order as
SIC_ISR
----------------/

ipipe_load_cpuid();

if (irq == IRQ_SYSTMR) {
*pTIMER_STATUS = 1; /* Latch TIMIL0 */
/* for update_process_times() */
__ipipe_tick_regs[cpuid].ipend = regs->ipend;
}
/--------------
IRQ_SYSTMR is peripheral, while CORE TIMER is CORE interrupt.
Why it need regs->ipend here?
-----------------/

handle_irq:

__ipipe_handle_irq(irq, regs);

return (ipipe_percpu_domain[cpuid] == ipipe_root_domain &&
!test_bit(IPIPE_STALL_FLAG,
&ipipe_root_domain->cpudata[cpuid].status));

/--------------
If this is Linux domain and Linux want to handle interrupt - return 1.
That means, this interrupt is handled the same way as Linux
---------------/

}

System call in Blackfin - Cont' - resend


================================================
arch/blackfin/mach-common/ints-priority-sc.c:

int init_arch_irq()
{

#ifndef CONFIG_KGDB
*pEVT0 = evt_emulation;
#==endif
*pEVT2 = evt_evt2;
*pEVT3 = trap;
*pEVT5 = evt_ivhw;
*pEVT6 = evt_timer;
*pEVT7 = evt_evt7;
*pEVT8 = evt_evt8;
*pEVT9 = evt_evt9;
*pEVT10 = evt_evt10;
*pEVT11 = evt_evt11;
*pEVT12 = evt_evt12;
*pEVT13 = evt_evt13;
*pEVT14 = evt14_softirq;
*pEVT15 = evt_system_call;
__builtin_bfin_csync();
}
=======================================================
arch/blackfin/mach-common/entry.S:

ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor
mode)*/
/* Since the kernel stack can be anywhere, it's not guaranteed
to be
* covered by a CPLB. Switch to an exception stack; use RETN as
a
* scratch register (for want of a better option).
*/
/----------------------------------
#define ENTRY(name) .globl name; ALIGN; name:
-----------------------------------/

retn = sp;
/-------------------------------
Use RETN just as a tmp register?
--------------------------------/

sp.l = exception_stack_top;
sp.h = exception_stack_top;
/------------------------------------
In entry.S
/* Put this in the kernel data section - that should always be covered
by
* a CPLB.
*/
exception_stack:
.rept 1024
.long 0;
.endr
exception_stack_top:
----------------------------------------/

/* Try to deal with syscalls quickly. */
[--sp] = ASTAT;
[--sp] = (R7:6, P5:4);
r7 = SEQSTAT; /* reason code is in bit 5:0 */
r6.l = lo(SEQSTAT_EXCAUSE);
r6.h = hi(SEQSTAT_EXCAUSE);
r7 = r7 & r6;
p5.h = extable;
p5.l = extable;
p4 = r7;
p5 = p5 + (p4 << 2);
p4 = [p5];
jump (p4);
common_restore_context
/---------------------------------------------
What is the relationship between the extable and the exception handler
entry?? When is extable get initialized?
-----------------------------------------------/

badsys:
r7 = -ENOSYS; /* signextending enough */
[sp + PT_R0] = r7; /* return value from system call */
jump syscall_really_exit;

ENTRY(ex_syscall)
(R7:6,P5:4) = [sp++];
ASTAT = [sp++];
raise 15; /* invoked by TRAP #0, for sys call */
sp = retn;
rtx

==============================================
mach-comm/interrupt.S

/* interrupt routine for system_call - 15 */
ENTRY(_evt_system_call)
SAVE_CONTEXT_SYSCALL
#ifdef CONFIG_FRAME_POINTER
fp = 0;
#==endif
call system_call;
jump common_restore_context;
/------------------------------------
Note: It does not go through common_int_entry:
-------------------------------------/

==============================================
entry.S:

ENTRY(system_call)
/* Store IPEND */
p2.l = lo(IPEND);
p2.h = hi(IPEND);
csync;
r0 = [p2];
[sp + PT_IPEND] = r0;

/* Store RETS for now */
r0 = rets;
/--------------------------------
call system_call;
jump common_restore_context;
--------------------------------/

[sp + PT_RESERVED] = r0;
/* Set the stack for the current process */
r7 = sp;
r6.l = lo(ALIGN_PAGE_MASK);
r6.h = hi(ALIGN_PAGE_MASK);
r7 = r7 & r6; /*thread_info*/
p2 = r7;
p2 = [p2];

[p2+(TASK_THREAD+THREAD_KSP)] = sp;
#ifdef CONFIG_IPIPE
r0 = sp;
SP += -12;
call ___ipipe_syscall_root;
SP += 12;
cc = r0 == 1;
/--------------------------------
Should not pass to Linux, no tail work
------------------------------------/
if cc jump syscall_really_exit;
cc = r0 == -1;
/-------------------------------------
Should not pass to Linux, tail work (handling signal)
-------------------------------------/
if cc jump resume_userspace;
/-----------------------------------------
Should pass to Linux
------------------------------------------/
r3 = [sp + PT_R3];
r4 = [sp + PT_R4];
p0 = [sp + PT_ORIG_P0];
#==endif /* CONFIG_IPIPE */

/* Check the System Call */
r7 = __NR_syscall;
/*System call number is passed in P0 */
r5 = p0;
cc = r5 < r7;
if ! cc jump badsys;

/------------------------------------
Check whether the sys call is valid or not.
--------------------------------------/

/* Execute the appropriate system call */

p4 = r5;
p5.l = sys_call_table;
p5.h = sys_call_table;
p5 = p5 + (p4 << 2);
r0 = [sp + PT_R0];
r1 = [sp + PT_R1];
r2 = [sp + PT_R2];
p5 = [p5];

/* are we tracing syscalls?*/
r7 = sp;
r6.l = lo(ALIGN_PAGE_MASK);
r6.h = hi(ALIGN_PAGE_MASK);
r7 = r7 & r6;
/----------------------------
Get thread_info:
see asm-blackfin/thread_info.h:
/* Given a task stack pointer, you can find it's task structure
* just by masking it to the 8K boundary.
*/
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
__asm__("%0 = sp;": "=&d"(ti):
);
return (struct thread_info *)((long)ti & ~8191UL);
}
-----------------------------/
p2 = r7;
r7 = [p2+TI_FLAGS];
CC = BITTST(r7,TIF_SYSCALL_TRACE);
if CC JUMP sys_trace;

[--sp] = r4;
[--sp] = r3;
SP += -12;
call (p5);
SP += 20;
[sp + PT_R0] = r0;
/-----------------------------
Call the real syscall: r0 is the return value??
-----------------------------/

resume_userspace:
r7 = sp;
r4.l = lo(ALIGN_PAGE_MASK);
r4.h = hi(ALIGN_PAGE_MASK);
r7 = r7 & r4; /*thread_info->flags*/
p5 = r7;
resume_userspace_1:
/* Disable interrupts. */
[--sp] = reti;
reti = [sp++];

/----------------------------------------------
To disable nesting interrupts - IRQ15

Instructions that access the RETI register do have an implicit site
effect — reading the RETI register enables interrupt nesting. Writing to
it disables nesting again.
------------------------------------------------/

r7 = [p5 + TI_FLAGS];
r4.l = lo(_TIF_WORK_MASK);
r4.h = hi(_TIF_WORK_MASK);
r7 = r7 & r4;

syscall_resched:
cc = BITTST(r7, TIF_NEED_RESCHED);

/-----------------------
The only exit for "syscall_resched"
------------------------/
if !cc jump syscall_sigpending;

/--------------------------------------
OK. Need reschedule
-----------------------------------------/
/* Reenable interrupts. */
[--sp] = reti;
r0 = [sp++];

SP += -12;
call _schedule;
SP += 12;

jump resume_userspace_1;
/-----------------------------------
Check again - until there is not need to do reschedule
------------------------------------/

syscall_sigpending:
cc = BITTST(r7, TIF_RESTORE_SIGMASK);
if cc jump syscall_do_signals;
cc = BITTST(r7, TIF_SIGPENDING);
if !cc jump syscall_really_exit;
syscall_do_signals:
/* Reenable interrupts. */
[--sp] = reti;
r0 = [sp++];

r0 = sp;
SP += -12;
call _do_signal;
SP += 12;

syscall_really_exit:
#ifdef CONFIG_IPIPE
[--sp] = reti;
r5 = [sp++];
#==endif /* CONFIG_IPIPE */
r5 = [sp + PT_RESERVED];
rets = r5;
rts;
/-----------------------------
Return to common_restore_context:

Then "rti" to exit from IRQ15, then "rtx" to exit from exception.

"The difference between a JUMP and a CALL is that
a CALL automatically loads the return address into the RETS register.
The return address is the next sequential address after the CALL
instruction."

-------------------------------/

sys_trace:
[--sp] = r3;
[--sp] = r2;
[--sp] = r1;
[--sp] = r0;
[--sp] = p5;
[--sp] = p2;
[--sp] = p1;
[--sp] = p0;
r1 = 0;
call _syscall_trace;
p0 = [sp++];
p1 = [sp++];
p2 = [sp++];
p5 = [sp++];
r0 = [sp++];
r1 = [sp++];
r2 = [sp++];
r3 = [sp++];

[--sp] = r4;
[--sp] = r3;
SP += -12;
call (p5);
SP += 20;
[sp + PT_R0] = r0;

[--sp] = r3;
[--sp] = r2;
[--sp] = r1;
[--sp] = r0;
[--sp] = p5;
[--sp] = p2;
[--sp] = p1;
[--sp] = p0;
r1 = 1;
call _syscall_trace;
p0 = [sp++];
p1 = [sp++];
p2 = [sp++];
p5 = [sp++];
r0 = [sp++];
r1 = [sp++];
r2 = [sp++];
r3 = [sp++];

jump resume_userspace;

ipipe-root.c
====================================================
asmlinkage int __ipipe_syscall_root(struct pt_regs *regs)
{
/---------------------------------------
r0 = sp; -- kernel stack
----------------------------------------/

ipipe_declare_cpuid;
unsigned long flags;

/*
* This routine either returns:
* 0 -- if the syscall is to be passed to Linux;
* 1 -- if the syscall should not be passed to Linux, and no
* tail work should be performed;
* -1 -- if the syscall should not be passed to Linux but the
* tail work has to be performed (for handling signals etc).
*/

/-----------------------------------------------------
#define IPIPE_EVENT_SYSCALL (IPIPE_FIRST_EVENT)
#define IPIPE_EVENT_SCHEDULE (IPIPE_FIRST_EVENT + 1)
#define IPIPE_EVENT_SIGWAKE (IPIPE_FIRST_EVENT + 2)
#define IPIPE_EVENT_SETSCHED (IPIPE_FIRST_EVENT + 3)
#define IPIPE_EVENT_EXIT (IPIPE_FIRST_EVENT + 4)
-------------------------------------------------------/

if (__ipipe_event_pipelined_p(IPIPE_EVENT_SYSCALL) &&
__ipipe_dispatch_event(IPIPE_EVENT_SYSCALL,regs) > 0) {

/------------------------------------------
Do not propagate the event to Linux
-------------------------------------------/
/*
* We might enter here over a non-root domain and exit
* over the root one as a result of the syscall
* (i.e. by recycling the register set of the current
* context across the migration), so we need to fixup
* the interrupt flag upon return too, so that
* __ipipe_unstall_iret_root() resets the correct
* stall bit on exit.
*/
if (ipipe_current_domain == ipipe_root_domain && !
in_atomic()) {

/-------------------------------------------
???
--------------------------------------------/

/*
* Sync pending VIRQs before _TIF_NEED_RESCHED
* is tested.
*/
ipipe_lock_cpu(flags);
if
((ipipe_root_domain->cpudata[cpuid].irq_pending_hi &
IPIPE_IRQMASK_VIRT) != 0)
__ipipe_sync_stage(IPIPE_IRQMASK_VIRT);
ipipe_unlock_cpu(flags);
return -1;
}
return 1;
}

return 0;
}

kernel/ipipe/core.c
==================================================================
/* __ipipe_dispatch_event() -- Low-level event dispatcher. */

int fastcall __ipipe_dispatch_event (unsigned event, void *data)
{
struct ipipe_domain *start_domain, *this_domain, *next_domain;
struct list_head *pos, *npos;
unsigned long flags;
ipipe_declare_cpuid;
int propagate = 1;

ipipe_lock_cpu(flags);

start_domain = this_domain = ipipe_percpu_domain[cpuid];

list_for_each_safe(pos,npos,&__ipipe_pipeline) {
/-----------------------------------------
Starting from the highest priority domain
------------------------------------------/

next_domain = list_entry(pos,struct
ipipe_domain,p_link);

/*
* Note: Domain migration may occur while running
* event or interrupt handlers, in which case the
* current register set is going to be recycled for a
* different domain than the initiating one. We do
* care for that, always tracking the current domain
* descriptor upon return from those handlers.
*/
if (next_domain->evhand[event] != NULL) {
/--------------------------------------------
#define rthal_catch_taskexit(hdlr) ipipe_catch_event(ipipe_root_domain,IPIPE_EVENT_EXIT,hdlr)
#define rthal_catch_sigwake(hdlr) ipipe_catch_event(ipipe_root_domain,IPIPE_EVENT_SIGWAKE,hdlr)
#define rthal_catch_schedule(hdlr) ipipe_catch_event(ipipe_root_domain,IPIPE_EVENT_SCHEDULE,hdlr)
#define rthal_catch_setsched(hdlr) ipipe_catch_event(&rthal_domain,IPIPE_EVENT_SETSCHED,hdlr)
#define rthal_catch_losyscall(hdlr) ipipe_catch_event(ipipe_root_domain,IPIPE_EVENT_SYSCALL,hdlr)
#define rthal_catch_hisyscall(hdlr) ipipe_catch_event(&rthal_domain,IPIPE_EVENT_SYSCALL,hdlr)
#define rthal_catch_exception(ex,hdlr) ipipe_catch_event(&rthal_domain,ex|IPIPE_EVENT_SELF,hdlr)

---------------------------------------------/

ipipe_percpu_domain[cpuid] = next_domain;
ipipe_unlock_cpu(flags);
propagate = !
next_domain->evhand[event](event,start_domain,data);
ipipe_lock_cpu(flags);
if (ipipe_percpu_domain[cpuid] != next_domain)
this_domain =
ipipe_percpu_domain[cpuid];
}

if (next_domain != ipipe_root_domain && /* NEVER sync
the root stage
here. */
next_domain->cpudata[cpuid].irq_pending_hi != 0 &&
!
test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status)) {
ipipe_percpu_domain[cpuid] = next_domain;
__ipipe_sync_stage(IPIPE_IRQMASK_ANY);
ipipe_load_cpuid();
if (ipipe_percpu_domain[cpuid] != next_domain)
this_domain =
ipipe_percpu_domain[cpuid];
}

ipipe_percpu_domain[cpuid] = this_domain;

if (next_domain == this_domain || !propagate)
break;
}

ipipe_unlock_cpu(flags);

return !propagate;
}

System call in Blackfin - resend


================================================
arch/blackfin/mach-common/ints-priority-sc.c:

int init_arch_irq()
{

#ifndef CONFIG_KGDB
*pEVT0 = evt_emulation;
#==endif
*pEVT2 = evt_evt2;
*pEVT3 = trap;
*pEVT5 = evt_ivhw;
*pEVT6 = evt_timer;
*pEVT7 = evt_evt7;
*pEVT8 = evt_evt8;
*pEVT9 = evt_evt9;
*pEVT10 = evt_evt10;
*pEVT11 = evt_evt11;
*pEVT12 = evt_evt12;
*pEVT13 = evt_evt13;
*pEVT14 = evt14_softirq;
*pEVT15 = evt_system_call;
__builtin_bfin_csync();
}
=======================================================
arch/blackfin/mach-common/entry.S:

ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor
mode)*/
/* Since the kernel stack can be anywhere, it's not guaranteed to be
* covered by a CPLB. Switch to an exception stack; use RETN as a
* scratch register (for want of a better option).
*/
/----------------------------------
#define ENTRY(name) .globl name; ALIGN; name:
-----------------------------------/

retn = sp;
/-------------------------------
Use RETN just as a tmp register?
--------------------------------/

sp.l = exception_stack_top;
sp.h = exception_stack_top;
/------------------------------------
In entry.S
/* Put this in the kernel data section - that should always be covered
by
* a CPLB.
*/
exception_stack:
.rept 1024
.long 0;
.endr
exception_stack_top:
----------------------------------------/

/* Try to deal with syscalls quickly. */
[--sp] = ASTAT;
[--sp] = (R7:6, P5:4);
r7 = SEQSTAT; /* reason code is in bit 5:0 */
r6.l = lo(SEQSTAT_EXCAUSE);
r6.h = hi(SEQSTAT_EXCAUSE);
r7 = r7 & r6;
p5.h = extable;
p5.l = extable;
p4 = r7;
p5 = p5 + (p4 << 2);
p4 = [p5];
jump (p4);

/---------------------------------------------
What is the relationship between the extable and the exception handler
entry?? When is extable get initialized?
-----------------------------------------------/

badsys:
r7 = -ENOSYS; /* signextending enough */
[sp + PT_R0] = r7; /* return value from system call */
jump syscall_really_exit;

ENTRY(ex_syscall)
(R7:6,P5:4) = [sp++];
ASTAT = [sp++];
raise 15; /* invoked by TRAP #0, for sys call */
sp = retn;
rtx

==============================================
mach-comm/interrupt.S

/* interrupt routine for system_call - 15 */
ENTRY(_evt_system_call)
SAVE_CONTEXT_SYSCALL
#ifdef CONFIG_FRAME_POINTER
fp = 0;
#==endif
call system_call;
jump common_restore_context;
/------------------------------------
Note: It does not go through common_int_entry:
-------------------------------------/

==============================================
entry.S:

ENTRY(system_call)
/* Store IPEND */
p2.l = lo(IPEND);
p2.h = hi(IPEND);
csync;
r0 = [p2];
[sp + PT_IPEND] = r0;

/* Store RETS for now */
r0 = rets;
/--------------------------------
call system_call;
jump common_restore_context;
--------------------------------/

[sp + PT_RESERVED] = r0;
/* Set the stack for the current process */
r7 = sp;
r6.l = lo(ALIGN_PAGE_MASK);
r6.h = hi(ALIGN_PAGE_MASK);
r7 = r7 & r6; /*thread_info*/
p2 = r7;
p2 = [p2];

[p2+(TASK_THREAD+THREAD_KSP)] = sp;
#ifdef CONFIG_IPIPE
r0 = sp;
SP += -12;
call ___ipipe_syscall_root;
SP += 12;
cc = r0 == 1;
/--------------------------------
Should not pass to Linux, no tail work
------------------------------------/
if cc jump syscall_really_exit;
cc = r0 == -1;
/-------------------------------------
Should not pass to Linux, tail work (handling signal)
-------------------------------------/
if cc jump resume_userspace;
/-----------------------------------------
Should pass to Linux
------------------------------------------/
r3 = [sp + PT_R3];
r4 = [sp + PT_R4];
p0 = [sp + PT_ORIG_P0];
#==endif /* CONFIG_IPIPE */

/* Check the System Call */
r7 = __NR_syscall;
/*System call number is passed in P0 */
r5 = p0;
cc = r5 < r7;
if ! cc jump badsys;

/* Execute the appropriate system call */

p4 = r5;
p5.l = sys_call_table;
p5.h = sys_call_table;
p5 = p5 + (p4 << 2);
r0 = [sp + PT_R0];
r1 = [sp + PT_R1];
r2 = [sp + PT_R2];
p5 = [p5];

/* are we tracing syscalls?*/
r7 = sp;
r6.l = lo(ALIGN_PAGE_MASK);
r6.h = hi(ALIGN_PAGE_MASK);
r7 = r7 & r6;
p2 = r7;
r7 = [p2+TI_FLAGS];
CC = BITTST(r7,TIF_SYSCALL_TRACE);
if CC JUMP sys_trace;

[--sp] = r4;
[--sp] = r3;
SP += -12;
call (p5);
SP += 20;
[sp + PT_R0] = r0;

resume_userspace:
r7 = sp;
r4.l = lo(ALIGN_PAGE_MASK);
r4.h = hi(ALIGN_PAGE_MASK);
r7 = r7 & r4; /*thread_info->flags*/
p5 = r7;
resume_userspace_1:
/* Disable interrupts. */
[--sp] = reti;
reti = [sp++];

r7 = [p5 + TI_FLAGS];
r4.l = lo(_TIF_WORK_MASK);
r4.h = hi(_TIF_WORK_MASK);
r7 = r7 & r4;

syscall_resched:
cc = BITTST(r7, TIF_NEED_RESCHED);
if !cc jump syscall_sigpending;

/* Reenable interrupts. */
[--sp] = reti;
r0 = [sp++];

SP += -12;
call _schedule;
SP += 12;

jump resume_userspace_1;

syscall_sigpending:
cc = BITTST(r7, TIF_RESTORE_SIGMASK);
if cc jump syscall_do_signals;
cc = BITTST(r7, TIF_SIGPENDING);
if !cc jump syscall_really_exit;
syscall_do_signals:
/* Reenable interrupts. */
[--sp] = reti;
r0 = [sp++];

r0 = sp;
SP += -12;
call _do_signal;
SP += 12;

syscall_really_exit:
#ifdef CONFIG_IPIPE
[--sp] = reti;
r5 = [sp++];
#==endif /* CONFIG_IPIPE */
r5 = [sp + PT_RESERVED];
rets = r5;
rts;

sys_trace:
[--sp] = r3;
[--sp] = r2;
[--sp] = r1;
[--sp] = r0;
[--sp] = p5;
[--sp] = p2;
[--sp] = p1;
[--sp] = p0;
r1 = 0;
call _syscall_trace;
p0 = [sp++];
p1 = [sp++];
p2 = [sp++];
p5 = [sp++];
r0 = [sp++];
r1 = [sp++];
r2 = [sp++];
r3 = [sp++];

[--sp] = r4;
[--sp] = r3;
SP += -12;
call (p5);
SP += 20;
[sp + PT_R0] = r0;

[--sp] = r3;
[--sp] = r2;
[--sp] = r1;
[--sp] = r0;
[--sp] = p5;
[--sp] = p2;
[--sp] = p1;
[--sp] = p0;
r1 = 1;
call _syscall_trace;
p0 = [sp++];
p1 = [sp++];
p2 = [sp++];
p5 = [sp++];
r0 = [sp++];
r1 = [sp++];
r2 = [sp++];
r3 = [sp++];

jump resume_userspace;

ipipe-root.c
====================================================
asmlinkage int __ipipe_syscall_root(struct pt_regs *regs)
{
/---------------------------------------
r0 = sp; -- kernel stack
----------------------------------------/

ipipe_declare_cpuid;
unsigned long flags;

/*
* This routine either returns:
* 0 -- if the syscall is to be passed to Linux;
* 1 -- if the syscall should not be passed to Linux, and no
* tail work should be performed;
* -1 -- if the syscall should not be passed to Linux but the
* tail work has to be performed (for handling signals etc).
*/

if (__ipipe_event_pipelined_p(IPIPE_EVENT_SYSCALL) &&
__ipipe_dispatch_event(IPIPE_EVENT_SYSCALL,regs) > 0) {
/*
* We might enter here over a non-root domain and exit
* over the root one as a result of the syscall
* (i.e. by recycling the register set of the current
* context across the migration), so we need to fixup
* the interrupt flag upon return too, so that
* __ipipe_unstall_iret_root() resets the correct
* stall bit on exit.
*/
if (ipipe_current_domain == ipipe_root_domain && !in_atomic()) {
/*
* Sync pending VIRQs before _TIF_NEED_RESCHED
* is tested.
*/
ipipe_lock_cpu(flags);
if ((ipipe_root_domain->cpudata[cpuid].irq_pending_hi &
IPIPE_IRQMASK_VIRT) != 0)
__ipipe_sync_stage(IPIPE_IRQMASK_VIRT);
ipipe_unlock_cpu(flags);
return -1;
}
return 1;
}

return 0;
}

kernel/ipipe/core.c
==================================================================
/* __ipipe_dispatch_event() -- Low-level event dispatcher. */

int fastcall __ipipe_dispatch_event (unsigned event, void *data)
{
struct ipipe_domain *start_domain, *this_domain, *next_domain;
struct list_head *pos, *npos;
unsigned long flags;
ipipe_declare_cpuid;
int propagate = 1;

ipipe_lock_cpu(flags);

start_domain = this_domain = ipipe_percpu_domain[cpuid];

list_for_each_safe(pos,npos,&__ipipe_pipeline) {

next_domain = list_entry(pos,struct ipipe_domain,p_link);

/*
* Note: Domain migration may occur while running
* event or interrupt handlers, in which case the
* current register set is going to be recycled for a
* different domain than the initiating one. We do
* care for that, always tracking the current domain
* descriptor upon return from those handlers.
*/
if (next_domain->evhand[event] != NULL) {
ipipe_percpu_domain[cpuid] = next_domain;
ipipe_unlock_cpu(flags);
propagate = !next_domain->evhand[event](event,start_domain,data);
ipipe_lock_cpu(flags);
if (ipipe_percpu_domain[cpuid] != next_domain)
this_domain = ipipe_percpu_domain[cpuid];
}

if (next_domain != ipipe_root_domain && /* NEVER sync the root stage
here. */
next_domain->cpudata[cpuid].irq_pending_hi != 0 &&
!test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status)) {
ipipe_percpu_domain[cpuid] = next_domain;
__ipipe_sync_stage(IPIPE_IRQMASK_ANY);
ipipe_load_cpuid();
if (ipipe_percpu_domain[cpuid] != next_domain)
this_domain = ipipe_percpu_domain[cpuid];
}

ipipe_percpu_domain[cpuid] = this_domain;

if (next_domain == this_domain || !propagate)
break;
}

ipipe_unlock_cpu(flags);

return !propagate;
}

kernel start up -blackfin arch - resend


mach-bf537/head.S:

.text

ENTRY(__start)
ENTRY(__stext)
/* R0: argument of command line string, passed from uboot, save it */
R7 = R0;
/* Set the SYSCFG register */
R0 = 0x36;
SYSCFG = R0; /*Enable Cycle Counter and Nesting Of Interrupts(3rd
Bit)*/
/-------------------------------------------------------
SYSCFG: Only three settings

SYSCFG[2]: SNEN - self-nesting interrupt enable
SYSCFG[1]: CCEN - cycle count
SYSCFG[0]: SSSTEP - Supervisor Single Step

--------------------------------------------------------/

R0 = 0;

/*Clear Out All the data and pointer Registers*/
R1 = R0;
R2 = R0;
R3 = R0;
R4 = R0;
R5 = R0;
R6 = R0;

P0 = R0;
P1 = R0;
P2 = R0;
P3 = R0;
P4 = R0;
P5 = R0;

LC0 = r0;
LC1 = r0;
L0 = r0;
L1 = r0;
L2 = r0;
L3 = r0;

/*Clear Out All the DAG Registers*/
B0 = r0;
B1 = r0;
B2 = r0;
B3 = r0;

I0 = r0;
I1 = r0;
I2 = r0;
I3 = r0;

M0 = r0;
M1 = r0;
M2 = r0;
M3 = r0;

/* Turn off the icache */
p0.l = (IMEM_CONTROL & 0xFFFF);
p0.h = (IMEM_CONTROL >> 16);
R1 = [p0];
R0 = ~ENICPLB;
R0 = R0 & R1;
/--------------------------
Disable ICPLB
---------------------------/

/* Anamoly 05000125 */
CLI R2;
SSYNC;
[p0] = R0;
SSYNC;
STI R2;

/* Turn off the dcache */
p0.l = (DMEM_CONTROL & 0xFFFF);
p0.h = (DMEM_CONTROL >> 16);
R1 = [p0];
R0 = ~ENDCPLB;
R0 = R0 & R1;
/----------------------------
Disable DCPLB
------------------------------/

/* Anamoly 05000125 */
CLI R2;
SSYNC;
[p0] = R0;
SSYNC;
STI R2;

/* Initialise General-Purpose I/O Modules on BF537 */
/* Rev 0.0 Anomaly 05000212 - PORTx_FER, PORT_MUX Registers Do Not
accept "writes" correctly: */
p0.h = hi(PORT_MUX);
p0.l = lo(PORT_MUX);
R0.L = W[P0]; //Read
SSYNC;
R0 = (PGDE_UART | PFTE_UART)(Z);
W[P0] = R0.L; //Write
SSYNC;
W[P0] = R0.L; /* Enable both UARTS */
SSYNC;

p0.h = hi(PORTF_FER);
p0.l = lo(PORTF_FER);
R0.L = W[P0]; //Read
SSYNC;
R0 = 0x000F(Z);
W[P0] = R0.L; //Write
SSYNC;
W[P0] = R0.L; /* Enable peripheral function of PORTF for UART0 and
UART1 */
SSYNC;

p0.h = hi(EMAC_SYSTAT);
p0.l = lo(EMAC_SYSTAT);
R0.h = 0xFFFF; /* Clear EMAC Interrupt Status bits */
R0.l = 0xFFFF;
[P0] = R0;
SSYNC;

/*Initialise UART*/
p0.h = hi(UART_LCR);
p0.l = lo(UART_LCR);
r0 = 0x0(Z);
w[p0] = r0.L; /* To enable DLL writes */
ssync;

p0.h = hi(UART_DLL);
p0.l = lo(UART_DLL);
r0 = 0x00(Z);
w[p0] = r0.L;
ssync;

p0.h = hi(UART_DLH);
p0.l = lo(UART_DLH);
r0 = 0x00(Z);
w[p0] = r0.L;
ssync;

p0.h = hi(UART_GCTL);
p0.l = lo(UART_GCTL);
r0 = 0x0(Z);
w[p0] = r0.L; /* To enable UART clock */
ssync;

/* Initialize stack pointer */
sp.l = lo(INITIAL_STACK);
sp.h = hi(INITIAL_STACK);
fp = sp;
usp = sp;

/*Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM*/
call _bf53x_relocate_l1_mem;
#if CONFIG_BFIN_KERNEL_CLOCK
call start_dma_code;
#==endif
/* Code for initializing Async memory banks */

p2.h = hi(EBIU_AMBCTL1);
p2.l = lo(EBIU_AMBCTL1);
r0.h = hi(AMBCTL1VAL);
r0.l = lo(AMBCTL1VAL);
[p2] = r0;
ssync;

p2.h = hi(EBIU_AMBCTL0);
p2.l = lo(EBIU_AMBCTL0);
r0.h = hi(AMBCTL0VAL);
r0.l = lo(AMBCTL0VAL);
[p2] = r0;
ssync;

p2.h = hi(EBIU_AMGCTL);
p2.l = lo(EBIU_AMGCTL);
r0 = AMGCTLVAL;
w[p2] = r0;
ssync;
call _real_start;

/* This section keeps the processor in supervisor mode
* during kernel boot. Switches to user mode at end of boot.
* See page 3-9 of Hardware Reference manual for documentation.
*/

/* EVT15 = _real_start */

p0.l = lo(EVT15);
p0.h = hi(EVT15);
p1.l = _real_start;
p1.h = _real_start;
[p0] = p1;
csync;

p0.l = lo(IMASK);
p0.h = hi(IMASK);
p1.l = IMASK_IVG15;
p1.h = 0x0;
[p0] = p1;
csync;

raise 15;
p0.l = WAIT_HERE;
p0.h = WAIT_HERE;
reti = p0;
rti;

WAIT_HERE:
jump WAIT_HERE;

ENTRY(_real_start)
[ -- sp ] = reti;
p0.l = lo(WDOG_CTL);
p0.h = hi(WDOG_CTL);
r0 = 0xAD6(z);
w[p0] = r0; /* watchdog off for now */
ssync;

/* Code update for BSS size == 0
* Zero out the bss region.
*/

p1.l = ___bss_start;
p1.h = ___bss_start;
p2.l = ___bss_stop;
p2.h = ___bss_stop;
r0 = 0;
p2 -= p1;
lsetup (_clear_bss, _clear_bss ) lc0 = p2;
_clear_bss:
B[p1++] = r0;

/* In case there is a NULL pointer reference
* Zero out region before stext
*/

p1.l = 0x0;
p1.h = 0x0;
r0.l = __stext;
r0.h = __stext;
r0 = r0 >> 1;
p2 = r0;
r0 = 0;
lsetup (_clear_zero, _clear_zero ) lc0 = p2;
_clear_zero:
W[p1++] = r0;

/* pass the uboot arguments to the global value command line */
R0 = R7;
call _cmdline_init;

p1.l = __rambase;
p1.h = __rambase;
r0.l = __sdata;
r0.h = __sdata;
[p1] = r0;

p1.l = __ramstart;
p1.h = __ramstart;
p3.l = ___bss_stop;
p3.h = ___bss_stop;

r1 = p3;
[p1] = r1;

r0.l = lo(RAM_END);
r0.h = hi(RAM_END);
p1.l = __ramend;
p1.h = __ramend;
[p1] = r0;

/*
* load the current thread pointer and stack
*/
r1.l = _init_thread_union;
r1.h = _init_thread_union;

r2.l = 0x2000;
r2.h = 0x0000;
r1 = r1 + r2;
sp = r1;
usp = sp;
fp = sp;
call _start_kernel;
_exit:
jump.s _exit;

xenomai initialize - resend

modules.c:

int __init __xeno_sys_init(void)

include/asm-blackfin/system.h:

static inline int xnarch_init (void)

{
int err;

/* Register the irq_tail_hook - do rescheduling */

__ipipe_irq_tail_hook = (unsigned long)&xnpod_schedule_deferred;

/* Register Xenomai domain */
err = rthal_init();

if (err)
return err;

#ifdef CONFIG_SMP
/* The HAL layer also sets the same CPU affinity so that both
modules keep their execution sequence on SMP boxen. */
set_cpus_allowed(current,cpumask_of_cpu(0));
#==endif /* CONFIG_SMP */

err = xnarch_calibrate_sched();

if (err)
return err;

xnarch_escalation_virq = rthal_alloc_virq();

if (xnarch_escalation_virq == 0)
return -ENOSYS;

rthal_virtualize_irq(&rthal_domain,
xnarch_escalation_virq,
(rthal_irq_handler_t)&xnpod_schedule_handler,
NULL,
NULL,
IPIPE_HANDLE_MASK | IPIPE_WIRED_MASK);

xnarch_old_trap_handler = rthal_trap_catch(&xnarch_trap_fault);

return 0;

ipipe anonated -1

adam's home

1. Grab interrups: interrpt.S

In __common_int_entry:

#ifdef CONFIG_IPIPE
call ___ipipe_grab_irq
SP += 12;
cc = r0 == 0;
if cc jump common_restore_context;
#else /* CONFIG_IPIPE */
call _do_irq;
SP += 12;
#endif /* CONFIG_IPIPE */
call return_from_int;
common_restore_context:
RESTORE_CONTEXT
rti;

/--------------------------------
Note: if returns (r0 == 1), should not call return_from_int.
---------------------------------/

2. asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
{
struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
unsigned long sic_status;
ipipe_declare_cpuid;
int irq;

/--------------------------------------------------------
"struct ivgx {
/* irq number for request_irq, available in mach-bf533/irq.h */
int irqno;
/* corresponding bit in the SIC_ISR register */
int isrflag;
} ivg_table[NR_PERI_INTS];

struct ivg_slice {
/* position of first irq in ivg_table for given ivg */
struct ivgx *ifirst;
struct ivgx *istop;
} ivg7_13[IVG13 - IVG7 + 1];"

ivg7_13 is a table stores the mapping of peripheral interrupt to Core
interrupt IVG7-13.

ivg_table contains "IRQ NUM" and "Position in SIC_ISR" of peripheral
interrupts.
------------------------------------/

if (likely(vec == EVT_IVTMR_P)) {
irq = IRQ_CORETMR;
goto handle_irq;
}

/-----------------------------------------
CORETMR interrupt is likely to happen
-----------------------------------------/

__builtin_bfin_ssync();
sic_status = *pSIC_IMASK & *pSIC_ISR;

/----------------------------------------

sic_status: system interrupt happened and not masked.

------------------------------------------/

for(;; ivg++) {
if (ivg >= ivg_stop) {
num_spurious++;
return 0;
}
else if (sic_status & ivg->isrflag)
break;

/---------------------------------------
Find the system interrupt who triggers "vec"
----------------------------------------/
}

irq = ivg->irqno;
/---------------
irq defined:

See irq.h - peripheral interrupts IRQ NUM starts from 7 - sames order as
SIC_ISR
----------------/


ipipe_load_cpuid();

if (irq == IRQ_SYSTMR) {
*pTIMER_STATUS = 1; /* Latch TIMIL0 */
/* for update_process_times() */
__ipipe_tick_regs[cpuid].ipend = regs->ipend;
}
/--------------
IRQ_SYSTMR is peripheral, while CORE TIMER is CORE interrupt.
Why it need regs->ipend here?
-----------------/


handle_irq:

__ipipe_handle_irq(irq, regs);

return (ipipe_percpu_domain[cpuid] == ipipe_root_domain &&
!test_bit(IPIPE_STALL_FLAG,
&ipipe_root_domain->cpudata[cpuid].status));

/--------------
If this is Linux domain and Linux want to handle interrupt - return 1.
That means, this interrupt is handled the same way as Linux
---------------/

Saturday, August 26, 2006

adam's home

adam's home

gcc核心扩展


===========================
Linux 内核使用的 GNU C 扩展
===========================

GNC CC 是一个功能非常强大的跨平台 C 编译器,它对 C 语言提供了很多扩展,
这些扩展对优化、目标代码布局、更安全的检查等方面提供了很强的支持。本文把
支持 GNU 扩展的 C 语言称为 GNU C。

Linux 内核代码使用了大量的 GNU C 扩展,以至于能够编译 Linux 内核的唯一编
译器是 GNU CC,以前甚至出现过编译 Linux 内核要使用特殊的 GNU CC 版本的情
况。本文是对 Linux 内核使用的 GNU C 扩展的一个汇总,希望当你读内核源码遇
到不理解的语法和语义时,能从本文找到一个初步的解答,更详细的信息可以查看
gcc.info。文中的例子取自 Linux 2.4.18。


语句表达式
==========

GNU C 把包含在括号中的复合语句看做是一个表达式,称为语句表达式,它可以出
现在任何允许表达式的地方,你可以在语句表达式中使用循环、局部变量等,原本
只能在复合语句中使用。例如:

++++ include/linux/kernel.h
159: #define min_t(type,x,y) 160: ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
++++ net/ipv4/tcp_output.c
654: int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));

复合语句的最后一个语句应该是一个表达式,它的值将成为这个语句表达式的值。
这里定义了一个安全的求最小值的宏,在标准 C 中,通常定义为:

#define min(x,y) ((x) < (y) ? (x) : (y))

这个定义计算 x 和 y 分别两次,当参数有副作用时,将产生不正确的结果,使用
语句表达式只计算参数一次,避免了可能的错误。语句表达式通常用于宏定义。


Typeof
======

使用前一节定义的宏需要知道参数的类型,利用 typeof 可以定义更通用的宏,不
必事先知道参数的类型,例如:

++++ include/linux/kernel.h
141: #define min(x,y) ({ 142: const typeof(x) _x = (x); 143: const typeof(y) _y = (y); 144: (void) (&_x == &_y); 145: _x < _y ? _x : _y; })

这里 typeof(x) 表示 x 的值类型,第 142 行定义了一个与 x 类型相同的局部变
量 _x 并初使化为 x,注意第 144 行的作用是检查参数 x 和 y 的类型是否相同。
typeof 可以用在任何类型可以使用的地方,通常用于宏定义。


零长度数组
==========

GNU C 允许使用零长度数组,在定义变长对象的头结构时,这个特性非常有用。例
如:

++++ include/linux/minix_fs.h
85: struct minix_dir_entry {
86: __u16 inode;
87: char name[0];
88: };

结构的最后一个元素定义为零长度数组,它不占结构的空间。在标准 C 中则需要
定义数组长度为 1,分配时计算对象大小比较复杂。


可变参数宏
==========

在 GNU C 中,宏可以接受可变数目的参数,就象函数一样,例如:

++++ include/linux/kernel.h
110: #define pr_debug(fmt,arg...) 111: printk(KERN_DEBUG fmt,##arg)

这里 arg 表示其余的参数,可以是零个或多个,这些参数以及参数之间的逗号构
成 arg 的值,在宏扩展时替换 arg,例如:

pr_debug("%s:%d",filename,line)

扩展为

printk("<7>" "%s:%d", filename, line)

使用 ## 的原因是处理 arg 不匹配任何参数的情况,这时 arg 的值为空,GNU
C 预处理器在这种特殊情况下,丢弃 ## 之前的逗号,这样

pr_debug("success!\n")

扩展为

printk("<7>" "success!\n")

注意最后没有逗号。


标号元素
========

标准 C 要求数组或结构变量的初使化值必须以固定的顺序出现,在 GNU C 中,通
过指定索引或结构域名,允许初始化值以任意顺序出现。指定数组索引的方法是在
初始化值前写 '[INDEX] =',要指定一个范围使用 '[FIRST ... LAST] =' 的形式,
例如:

+++++ arch/i386/kernel/irq.c
1079: static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };

将数组的所有元素初使化为 ~0UL,这可以看做是一种简写形式。

要指定结构元素,在元素值前写 'FIELDNAME:',例如:

++++ fs/ext2/file.c
41: struct file_operations ext2_file_operations = {
42: llseek: generic_file_llseek,
43: read: generic_file_read,
44: write: generic_file_write,
45: ioctl: ext2_ioctl,
46: mmap: generic_file_mmap,
47: open: generic_file_open,
48: release: ext2_release_file,
49: fsync: ext2_sync_file,
50 };

将结构 ext2_file_operations 的元素 llseek 初始化为 generic_file_llseek,
元素 read 初始化为 genenric_file_read,依次类推。我觉得这是 GNU C 扩展中
最好的特性之一,当结构的定义变化以至元素的偏移改变时,这种初始化方法仍然
保证已知元素的正确性。对于未出现在初始化中的元素,其初值为 0。


Case 范围
=========

GNU C 允许在一个 case 标号中指定一个连续范围的值,例如:

++++ arch/i386/kernel/irq.c
1062: case '0' ... '9': c -= '0'; break;
1063: case 'a' ... 'f': c -= 'a'-10; break;
1064: case 'A' ... 'F': c -= 'A'-10; break;

case '0' ... '9':

相当于

case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':


声明的特殊属性
==============

GNU C 允许声明函数、变量和类型的特殊属性,以便手工的代码优化和更仔细的代
码检查。要指定一个声明的属性,在声明后写

__attribute__ (( ATTRIBUTE ))

其中 ATTRIBUTE 是属性说明,多个属性以逗号分隔。GNU C 支持十几个属性,这
里介绍最常用的:

* noreturn

属性 noreturn 用于函数,表示该函数从不返回。这可以让编译器生成稍微优化的
代码,最重要的是可以消除不必要的警告信息比如未初使化的变量。例如:

++++ include/linux/kernel.h
47: # define ATTRIB_NORET __attribute__((noreturn)) ....
61: asmlinkage NORET_TYPE void do_exit(long error_code)
ATTRIB_NORET;

* format (ARCHETYPE, STRING-INDEX, FIRST-TO-CHECK)

属性 format 用于函数,表示该函数使用 printf, scanf 或 strftime 风格的参
数,使用这类函数最容易犯的错误是格式串与参数不匹配,指定 format 属性可以
让编译器根据格式串检查参数类型。例如:

++++ include/linux/kernel.h?
89: asmlinkage int printk(const char * fmt, ...)
90: __attribute__ ((format (printf, 1, 2)));

表示第一个参数是格式串,从第二个参数起根据格式串检查参数。

* unused

属性 unused 用于函数和变量,表示该函数或变量可能不使用,这个属性可以避免
编译器产生警告信息。

* section ("section-name")

属性 section 用于函数和变量,通常编译器将函数放在 .text 节,变量放在
.data 或 .bss 节,使用 section 属性,可以让编译器将函数或变量放在指定的
节中。例如:

++++ include/linux/init.h
78: #define __init __attribute__ ((__section__ (".text.init")))
79: #define __exit __attribute__ ((unused, __section__(".text.exit")))
80: #define __initdata __attribute__ ((__section__ (".data.init")))
81: #define __exitdata __attribute__ ((unused, __section__ (".data.exit")))
82: #define __initsetup __attribute__ ((unused,__section__ (".setup.init")))
83: #define __init_call __attribute__ ((unused,__section__ (".initcall.init")))
84: #define __exit_call __attribute__ ((unused,__section__ (".exitcall.exit")))

连接器可以把相同节的代码或数据安排在一起,Linux 内核很喜欢使用这种技术,
例如系统的初始化代码被安排在单独的一个节,在初始化结束后就可以释放这部分
内存。

* aligned (ALIGNMENT)

属性 aligned 用于变量、结构或联合类型,指定变量、结构域、结构或联合的对
齐量,以字节为单位,例如:

++++ include/asm-i386/processor.h
294: struct i387_fxsave_struct {
295: unsigned short cwd;
296: unsigned short swd;
297: unsigned short twd;
298: unsigned short fop;
299: long fip;
300: long fcs;
301: long foo;
......
308: } __attribute__ ((aligned (16)));

表示该结构类型的变量以 16 字节对齐。通常编译器会选择合适的对齐量,显示指
定对齐通常是由于体系限制、优化等原因。

* packed

属性 packed 用于变量和类型,用于变量或结构域时表示使用最小可能的对齐,用
于枚举、结构或联合类型时表示该类型使用最小的内存。例如:

++++ include/asm-i386/desc.h
51: struct Xgt_desc_struct {
52: unsigned short size;
53: unsigned long address __attribute__((packed));
54: };

域 address 将紧接着 size 分配。属性 packed 的用途大多是定义硬件相关的结
构,使元素之间没有因对齐而造成的空洞。


当前函数名
==========

GNU CC 预定义了两个标志符保存当前函数的名字,__FUNCTION__ 保存函数在源码
中的名字,__PRETTY_FUNCTION__ 保存带语言特色的名字。在 C 函数中,这两个
名字是相同的,在 C++ 函数中,__PRETTY_FUNCTION__ 包括函数返回类型等额外
信息,Linux 内核只使用了 __FUNCTION__。

++++ fs/ext2/super.c
98: void ext2_update_dynamic_rev(struct super_block *sb)
99: {
100: struct ext2_super_block *es = EXT2_SB(sb)->s_es;
101:
102: if (le32_to_cpu(es->s_rev_level) > EXT2_GOOD_OLD_REV)
103: return;
104:
105: ext2_warning(sb, __FUNCTION__,
106: "updating to rev %d because of new feature flag, "
107: "running e2fsck is recommended",
108: EXT2_DYNAMIC_REV);

这里 __FUNCTION__ 将被替换为字符串 "ext2_update_dynamic_rev"。虽然
__FUNCTION__ 看起来类似于标准 C 中的 __FILE__,但实际上 __FUNCTION__
是被编译器替换的,不象 __FILE__ 被预处理器替换。


内建函数
========

GNU C 提供了大量的内建函数,其中很多是标准 C 库函数的内建版本,例如
memcpy,它们与对应的 C 库函数功能相同,本文不讨论这类函数,其他内建函数
的名字通常以 __builtin 开始。

* __builtin_return_address (LEVEL)

内建函数 __builtin_return_address 返回当前函数或其调用者的返回地址,参数
LEVEL 指定在栈上搜索框架的个数,0 表示当前函数的返回地址,1 表示当前函数
的调用者的返回地址,依此类推。例如:

++++ kernel/sched.c
437: printk(KERN_ERR "schedule_timeout: wrong timeout "
438: "value %lx from %p\n", timeout,
439: __builtin_return_address(0));

* __builtin_constant_p(EXP)

内建函数 __builtin_constant_p 用于判断一个值是否为编译时常数,如果参数
EXP 的值是常数,函数返回 1,否则返回 0。例如:

++++ include/asm-i386/bitops.h
249: #define test_bit(nr,addr) 250: (__builtin_constant_p(nr) ? 251: constant_test_bit((nr),(addr)) : 252: variable_test_bit((nr),(addr)))

很多计算或操作在参数为常数时有更优化的实现,在 GNU C 中用上面的方法可以
根据参数是否为常数,只编译常数版本或非常数版本,这样既不失通用性,又能在
参数是常数时编译出最优化的代码。

* __builtin_expect(EXP, C)

内建函数 __builtin_expect 用于为编译器提供分支预测信息,其返回值是整数表
达式 EXP 的值,C 的值必须是编译时常数。例如:

++++ include/linux/compiler.h
13: #define likely(x) __builtin_expect((x),1)
14: #define unlikely(x) __builtin_expect((x),0)
++++ kernel/sched.c
564: if (unlikely(in_interrupt())) {
565: printk("Scheduling in interrupt\n");
566: BUG();
567: }

这个内建函数的语义是 EXP 的预期值是 C,编译器可以根据这个信息适当地重排
语句块的顺序,使程序在预期的情况下有更高的执行效率。上面的例子表示处于中
断上下文是很少发生的,第 565-566 行的目标码可能会放在较远的位置,以保证
经常执行的目标码更紧凑。

Sunday, August 20, 2006

System call in Blackfin - Cont'

================================================
arch/blackfin/mach-common/ints-priority-sc.c:

int init_arch_irq()
{

#ifndef CONFIG_KGDB
*pEVT0 = evt_emulation;

Friday, August 18, 2006

System call in Blackfin

================================================
arch/blackfin/mach-common/ints-priority-sc.c:

int init_arch_irq()
{

#ifndef CONFIG_KGDB
*pEVT0 = evt_emulation;

kernel start up -blackfin arch

mach-bf537/head.S:

.text

ENTRY(__start)
ENTRY(__stext)
/* R0: argument of command line string, passed from uboot, save it */
R7 = R0;
/* Set the SYSCFG register */
R0 = 0x36;
SYSCFG = R0; /*Enable Cycle Counter and Nesting Of Interrupts(3rd
Bit)*/
/-------------------------------------------------------
SYSCFG: Only three settings

SYSCFG[2]: SNEN - self-nesting interrupt enable
SYSCFG[1]: CCEN - cycle count
SYSCFG[0]: SSSTEP - Supervisor Single Step

--------------------------------------------------------/

R0 = 0;

/*Clear Out All the data and pointer Registers*/
R1 = R0;
R2 = R0;
R3 = R0;
R4 = R0;
R5 = R0;
R6 = R0;

P0 = R0;
P1 = R0;
P2 = R0;
P3 = R0;
P4 = R0;
P5 = R0;

LC0 = r0;
LC1 = r0;
L0 = r0;
L1 = r0;
L2 = r0;
L3 = r0;

/*Clear Out All the DAG Registers*/
B0 = r0;
B1 = r0;
B2 = r0;
B3 = r0;

I0 = r0;
I1 = r0;
I2 = r0;
I3 = r0;

M0 = r0;
M1 = r0;
M2 = r0;
M3 = r0;

/* Turn off the icache */
p0.l = (IMEM_CONTROL & 0xFFFF);
p0.h = (IMEM_CONTROL >> 16);
R1 = [p0];
R0 = ~ENICPLB;
R0 = R0 & R1;
/--------------------------
Disable ICPLB
---------------------------/

/* Anamoly 05000125 */
CLI R2;
SSYNC;
[p0] = R0;
SSYNC;
STI R2;

/* Turn off the dcache */
p0.l = (DMEM_CONTROL & 0xFFFF);
p0.h = (DMEM_CONTROL >> 16);
R1 = [p0];
R0 = ~ENDCPLB;
R0 = R0 & R1;
/----------------------------
Disable DCPLB
------------------------------/

/* Anamoly 05000125 */
CLI R2;
SSYNC;
[p0] = R0;
SSYNC;
STI R2;

/* Initialise General-Purpose I/O Modules on BF537 */
/* Rev 0.0 Anomaly 05000212 - PORTx_FER, PORT_MUX Registers Do Not
accept "writes" correctly: */
p0.h = hi(PORT_MUX);
p0.l = lo(PORT_MUX);
R0.L = W[P0]; //Read
SSYNC;
R0 = (PGDE_UART | PFTE_UART)(Z);
W[P0] = R0.L; //Write
SSYNC;
W[P0] = R0.L; /* Enable both UARTS */
SSYNC;

p0.h = hi(PORTF_FER);
p0.l = lo(PORTF_FER);
R0.L = W[P0]; //Read
SSYNC;
R0 = 0x000F(Z);
W[P0] = R0.L; //Write
SSYNC;
W[P0] = R0.L; /* Enable peripheral function of PORTF for UART0 and
UART1 */
SSYNC;

p0.h = hi(EMAC_SYSTAT);
p0.l = lo(EMAC_SYSTAT);
R0.h = 0xFFFF; /* Clear EMAC Interrupt Status bits */
R0.l = 0xFFFF;
[P0] = R0;
SSYNC;

/*Initialise UART*/
p0.h = hi(UART_LCR);
p0.l = lo(UART_LCR);
r0 = 0x0(Z);
w[p0] = r0.L; /* To enable DLL writes */
ssync;

p0.h = hi(UART_DLL);
p0.l = lo(UART_DLL);
r0 = 0x00(Z);
w[p0] = r0.L;
ssync;

p0.h = hi(UART_DLH);
p0.l = lo(UART_DLH);
r0 = 0x00(Z);
w[p0] = r0.L;
ssync;

p0.h = hi(UART_GCTL);
p0.l = lo(UART_GCTL);
r0 = 0x0(Z);
w[p0] = r0.L; /* To enable UART clock */
ssync;

/* Initialize stack pointer */
sp.l = lo(INITIAL_STACK);
sp.h = hi(INITIAL_STACK);
fp = sp;
usp = sp;

/*Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM*/
call _bf53x_relocate_l1_mem;
#if CONFIG_BFIN_KERNEL_CLOCK
call start_dma_code;

Thursday, August 17, 2006

GCC macro

Very good reference:

http://gcc.gnu.org/onlinedocs/gcc-3.1.1/cpp/Macros.html#Macros

#define __IRQ_SYSTMR(_X) IRQ_TMR ## _X
#define _IRQ_SYSTMR(_X) __IRQ_SYSTMR(_X)
#define IRQ_SYSTMR _IRQ_SYSTMR(CONFIG_IPIPE_SYS_TIMER)

#ifdef CONFIG_BF533
#define __IRQ_PRIOTMR(_X) CONFIG_TIMER ## _X
#else
#define __IRQ_PRIOTMR(_X) CONFIG_IRQ_TMR ## _X

create kernel patch

1. make clean
2. make mrproper
3. diff -urN -x Entries -x Entries.Log -x Repository -x Root -x .config
-x .version -x .config.* -x rootfs.img -x *.cmd -x *.o -x Tag -x linux
-x *.rej -x *.orig -x *.bak $1 $2

Tuesday, August 08, 2006

xenomai initialize

modules.c:

int __init __xeno_sys_init(void)

include/asm-blackfin/system.h:

static inline int xnarch_init (void)

{
int err;

/* Register the irq_tail_hook - do rescheduling */

__ipipe_irq_tail_hook = (unsigned long)&xnpod_schedule_deferred;

/* Register Xenomai domain */
err = rthal_init();

if (err)
return err;

#ifdef CONFIG_SMP
/* The HAL layer also sets the same CPU affinity so that both
modules keep their execution sequence on SMP boxen. */
set_cpus_allowed(current,cpumask_of_cpu(0));

Sunday, August 06, 2006

Re: rt_task_set_period

/*
* __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
* interrupt protection log is maintained here for each domain. Hw
* interrupts are masked on entry.
*/
void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
{
/--------------------
irq: starts from 6

This function can be called from:

ipipe_grab_irq() - HW
ipipe_trigger_irq() - SW, SW triggered IRQ regs == NULL, so do not need
to acknowledge the HW.
---------------------/

struct ipipe_domain *this_domain;
struct list_head *head, *pos;
int m_ack, s_ack, s = -1;
ipipe_declare_cpuid;

m_ack = (regs == NULL); /* Software-triggered IRQs do not need
* any ack. */

/--------------------

Software triggered IRQ has not regs
See:
ipipe-core.c: ipipe_trigger_irq(): __ipipe_handle_irq(irq, NULL);
---------------------/

ipipe_load_cpuid();

this_domain = ipipe_percpu_domain[cpuid];
/-------------------------

struct ipipe_domain *ipipe_percpu_domain[IPIPE_NR_CPUS] =
{[0 ... IPIPE_NR_CPUS - 1] = &ipipe_root };

What does "ipipe_percpu_domain[cpuid]" stands for?

Current domain on "this" CPU (meaningful for SMP system). cpuid always =
0 on Blackfin.
--------------------------/

s_ack = m_ack;

if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
head = &this_domain->p_link;
else
head = __ipipe_pipeline.next;

/----------------------------
__ipipe_pipeline: global list of domains, in priority order
head starts from this_domain(current domain in which interrupt happens)
or starting from the highest priority domain.
-----------------------------/

/* Ack the interrupt. */

pos = head;

while (pos != &__ipipe_pipeline) {
struct ipipe_domain *next_domain =
list_entry(pos, struct ipipe_domain, p_link);

/--------------------------------
Starting from "head"
---------------------------------/

/*
* For each domain handling the incoming IRQ, mark it as
* pending in its log.
*/
if (test_bit(IPIPE_HANDLE_FLAG,
&next_domain->irqs[irq].control)) {
/*
* Domains that handle this IRQ are polled for
* acknowledging it by decreasing priority order. The
* interrupt must be made pending _first_ in the
* domain's status flags before the PIC is unlocked.
*/

next_domain->cpudata[cpuid].irq_counters[irq].total_hits++;
next_domain->cpudata[cpuid].irq_counters[irq].pending_hits++;
__ipipe_set_irq_bit(next_domain, cpuid, irq);
/--------------------

struct ipcpudata {
unsigned long status;
unsigned long irq_pending_hi;
unsigned long irq_pending_lo[IPIPE_IRQ_IWORDS];
struct ipirqcnt {
unsigned long pending_hits;
unsigned long total_hits;
} irq_counters[IPIPE_NR_IRQS];
} ____cacheline_aligned_in_smp cpudata[IPIPE_NR_CPUS];

#define __ipipe_set_irq_bit(ipd,cpuid,irq) do { if (!
test_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control)) { __set_bit(irq &
IPIPE_IRQ_IMASK,&(ipd)->cpudata[cpuid].irq_pending_lo[irq >>
IPIPE_IRQ_ISHIFT]); __set_bit(irq >>
IPIPE_IRQ_ISHIFT,&(ipd)->cpudata[cpuid].irq_pending_hi); } } while(0)

(The "a" bit of irq_pending_hi is set)
irq = a * 32 + irq_pending_lo[a]

--------------------/

ipipe_mark_irq_receipt(next_domain, irq, cpuid);

/---------
meaningful when enable CONFIG_IPIPE_STATS
-------------/

/*
* Always get the first master acknowledge available.
* Once we've got it, allow slave acknowledge
* handlers to run (until one of them stops us).
*/
if (next_domain->irqs[irq].acknowledge != NULL) {
if (!m_ack)
m_ack = next_domain->irqs[irq].acknowledge(irq);
else if (test_bit
(IPIPE_SHARED_FLAG,
&next_domain->irqs[irq].control) && !s_ack)
s_ack = next_domain->irqs[irq].acknowledge(irq);
}
}

/-----------------------------------------------------------------------------------

Any usage for m_ack and s_ack?

Call the HW ack routine here. Mainly to mask the "irq" while handling
it.

irqs[irq].acknowledege(irq) -> __ipipe_ack_irq() --> see bellow

ints-priority-sc.c
static struct irqchip bf533_core_irqchip = {
#ifdef CONFIG_IPIPE
.ack = bf533_core_mask_irq,
#else /* !CONFIG_IPIPE */
.ack = ack_noop,

Tuesday, August 01, 2006

kernel idle on BFIN

/*
* The idle loop on BFIN
*/
inline static void default_idle(void)
{
while (!need_resched()) {
leds_switch(LED_OFF);
ipipe_suspend_domain();
__asm__("nop;\n\t nop;\n\t nop;\n\t idle;\n\t": : :"cc");
leds_switch(LED_ON);
}
}

void (*idle)(void) = default_idle;

/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
* low exit latency (ie sit in a loop waiting for
* somebody to say that they'd like to reschedule)
*/
void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
idle();
preempt_enable_no_resched();
schedule();
preempt_disable();
}
}

Blog Archive