Sunday, August 27, 2006

rt_task_set_period - resend


/*
* __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
* interrupt protection log is maintained here for each domain. Hw
* interrupts are masked on entry.
*/
void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
{
/--------------------
irq: starts from 6

This function can be called from:

ipipe_grab_irq() - HW
ipipe_trigger_irq() - SW, SW triggered IRQ regs == NULL, so do not need
to acknowledge the HW.
---------------------/

struct ipipe_domain *this_domain;
struct list_head *head, *pos;
int m_ack, s_ack, s = -1;
ipipe_declare_cpuid;

m_ack = (regs == NULL); /* Software-triggered IRQs do not need
* any ack. */

/--------------------

Software triggered IRQ has not regs
See:
ipipe-core.c: ipipe_trigger_irq(): __ipipe_handle_irq(irq, NULL);
---------------------/

ipipe_load_cpuid();

this_domain = ipipe_percpu_domain[cpuid];
/-------------------------

struct ipipe_domain *ipipe_percpu_domain[IPIPE_NR_CPUS] =
{[0 ... IPIPE_NR_CPUS - 1] = &ipipe_root };

What does "ipipe_percpu_domain[cpuid]" stands for?

Current domain on "this" CPU (meaningful for SMP system). cpuid always =
0 on Blackfin.
--------------------------/

s_ack = m_ack;

if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
head = &this_domain->p_link;
else
head = __ipipe_pipeline.next;

/----------------------------
__ipipe_pipeline: global list of domains, in priority order
head starts from this_domain(current domain in which interrupt happens)
or starting from the highest priority domain.
-----------------------------/

/* Ack the interrupt. */

pos = head;

while (pos != &__ipipe_pipeline) {
struct ipipe_domain *next_domain =
list_entry(pos, struct ipipe_domain, p_link);

/--------------------------------
Starting from "head"
---------------------------------/

/*
* For each domain handling the incoming IRQ, mark it as
* pending in its log.
*/
if (test_bit(IPIPE_HANDLE_FLAG,
&next_domain->irqs[irq].control)) {
/*
* Domains that handle this IRQ are polled for
* acknowledging it by decreasing priority order. The
* interrupt must be made pending _first_ in the
* domain's status flags before the PIC is unlocked.
*/

next_domain->cpudata[cpuid].irq_counters[irq].total_hits++;
next_domain->cpudata[cpuid].irq_counters[irq].pending_hits++;
__ipipe_set_irq_bit(next_domain, cpuid, irq);
/--------------------

struct ipcpudata {
unsigned long status;
unsigned long irq_pending_hi;
unsigned long irq_pending_lo[IPIPE_IRQ_IWORDS];
struct ipirqcnt {
unsigned long pending_hits;
unsigned long total_hits;
} irq_counters[IPIPE_NR_IRQS];
} ____cacheline_aligned_in_smp cpudata[IPIPE_NR_CPUS];

#define __ipipe_set_irq_bit(ipd,cpuid,irq) do { if (!
test_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control)) { __set_bit(irq &
IPIPE_IRQ_IMASK,&(ipd)->cpudata[cpuid].irq_pending_lo[irq >>
IPIPE_IRQ_ISHIFT]); __set_bit(irq >>
IPIPE_IRQ_ISHIFT,&(ipd)->cpudata[cpuid].irq_pending_hi); } } while(0)

(The "a" bit of irq_pending_hi is set)
irq = a * 32 + irq_pending_lo[a]

--------------------/

ipipe_mark_irq_receipt(next_domain, irq, cpuid);

/---------
meaningful when enable CONFIG_IPIPE_STATS
-------------/

/*
* Always get the first master acknowledge available.
* Once we've got it, allow slave acknowledge
* handlers to run (until one of them stops us).
*/
if (next_domain->irqs[irq].acknowledge != NULL) {
if (!m_ack)
m_ack = next_domain->irqs[irq].acknowledge(irq);
else if (test_bit
(IPIPE_SHARED_FLAG,
&next_domain->irqs[irq].control) && !s_ack)
s_ack = next_domain->irqs[irq].acknowledge(irq);
}
}

/-----------------------------------------------------------------------------------

Any usage for m_ack and s_ack?

Call the HW ack routine here. Mainly to mask the "irq" while handling
it.

irqs[irq].acknowledege(irq) -> __ipipe_ack_irq() --> see bellow

ints-priority-sc.c
static struct irqchip bf533_core_irqchip = {
#ifdef CONFIG_IPIPE
.ack = bf533_core_mask_irq,
#else /* !CONFIG_IPIPE */
.ack = ack_noop,
#==endif /* CONFIG_IPIPE */
.mask = bf533_core_mask_irq,
.unmask = bf533_core_unmask_irq,
};

static struct irqchip bf533_core_irqchip = {
#ifdef CONFIG_IPIPE
.ack = bf533_core_mask_irq,
#else /* !CONFIG_IPIPE */
.ack = ack_noop,
#==endif /* CONFIG_IPIPE */
.mask = bf533_core_mask_irq,
.unmask = bf533_core_unmask_irq,
};

static void bf533_internal_mask_irq(unsigned int irq)
{
*pSIC_IMASK &= ~(1 << (irq - (IRQ_CORETMR + 1)));
__builtin_bfin_ssync();
}

--------------------------------------------------------------------/

/*
* If the domain does not want the IRQ to be passed down the
* interrupt pipe, exit the loop now.
*/

if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
break;

pos = next_domain->p_link.next;
}

/*
* Now walk the pipeline, yielding control to the highest
* priority domain that has pending interrupt(s) or
* immediately to the current domain if the interrupt has been
* marked as 'sticky'. This search does not go beyond the
* current domain in the pipeline. We also enforce the
* additional root stage lock (blackfin-specific). */

if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
s = __test_and_set_bit(IPIPE_STALL_FLAG,
&ipipe_root_domain->cpudata[cpuid].status);

/----------------------------------------
test_and_set_bit() return the old value of this bit.
If IPIPE_ROOTLOCK_FLAG, stalls the root domain.
See __ipipe_sync_stage() bellow.
-----------------------------------------/

__ipipe_walk_pipeline(head, cpuid);

if (!s)
__clear_bit(IPIPE_STALL_FLAG,
&ipipe_root_domain->cpudata[cpuid].status);
}
/---------------------------
restore the original status value

----------------------------/

==============================================================================
kernel/ipipe-root.c

int __ipipe_ack_irq(unsigned irq)
{
struct irqdesc *desc = irq_desc + irq;
unsigned long flags;
ipipe_declare_cpuid;

if (irq == IRQ_SYSTMR) {
/* Clear interrupt latch for TIMER0, don't mask. */
*pTIMER_STATUS = 1;
__builtin_bfin_ssync();
return 1;
}

/*
* No need to mask IRQs at hw level: we are always called from
* __ipipe_handle_irq(), so interrupts are already off. We
* stall the pipeline so that spin_lock_irq*() ops won't
* unintentionally flush it, since this could cause infinite
* recursion.
*/

ipipe_load_cpuid();
flags = ipipe_test_and_stall_pipeline();
preempt_disable();
desc->chip->ack(irq);
preempt_enable_no_resched();
ipipe_restore_pipeline_nosync(ipipe_percpu_domain[cpuid], flags,
cpuid);

return 1;
}

================================================================================

ipipe/core.c:

/* __ipipe_walk_pipeline(): Plays interrupts pending in the log. Must
be called with local hw interrupts disabled. */

void fastcall __ipipe_walk_pipeline(struct list_head *pos, int cpuid)
{
struct ipipe_domain *this_domain = ipipe_percpu_domain[cpuid];

while (pos != &__ipipe_pipeline) {
struct ipipe_domain *next_domain =
list_entry(pos, struct ipipe_domain, p_link);

/-----------------------------------------------
Note: this_domain next_domain may be different. According to
ipipe_handle_irq(), next_domain is either head or current domain.
-----------------------------------------------/

if (test_bit
(IPIPE_STALL_FLAG, &next_domain->cpudata[cpuid].status))
break; /* Stalled stage -- do not go further. */

/-------------------------------------
The domains with lower priority than next domain
all ignored
-------------------------------------/

if (next_domain->cpudata[cpuid].irq_pending_hi != 0) {

if (next_domain == this_domain)
__ipipe_sync_stage(IPIPE_IRQMASK_ANY);
else {
__ipipe_switch_to(this_domain, next_domain,
cpuid);

ipipe_load_cpuid(); /* Processor might have changed. */

if (this_domain->cpudata[cpuid].
irq_pending_hi != 0
&& !test_bit(IPIPE_STALL_FLAG,
&this_domain->cpudata[cpuid].status))
__ipipe_sync_stage(IPIPE_IRQMASK_ANY);
}

break;
} else if (next_domain == this_domain)
break;

pos = next_domain->p_link.next;
}
}

======================================================================
include/linux/ipipe.h

/* Called with hw interrupts off. */
static inline void __ipipe_switch_to(struct ipipe_domain *out,
struct ipipe_domain *in, int cpuid)
{
void ipipe_suspend_domain(void);

/*
* "in" is guaranteed to be closer than "out" from the head of the
* pipeline (and obviously different).
*/

ipipe_percpu_domain[cpuid] = in;

/------------------------------------------------
Current domain has changed.
-------------------------------------------------/

ipipe_suspend_domain(); /* Sync stage and propagate interrupts.
*/
/-------------------------------------------------
When exit form suspend_domain(), curent_domain is still "in" (?)
--------------------------------------------------/

ipipe_load_cpuid(); /* Processor might have changed. */

if (ipipe_percpu_domain[cpuid] == in)
/*
* Otherwise, something has changed the current domain under
* our feet recycling the register set; do not override.
*/
ipipe_percpu_domain[cpuid] = out;

/--------------------------------------------
When exit from ipipe_switch_to(), current domain is "out"?
---------------------------------------------/

}

==========================================================================
kernel/ipipe/core.c

/*
* ipipe_suspend_domain() -- Suspend the current domain, switching to
* the next one which has pending work down the pipeline.
*/
void ipipe_suspend_domain(void)
{
struct ipipe_domain *this_domain, *next_domain;
struct list_head *ln;
unsigned long flags;
ipipe_declare_cpuid;

ipipe_lock_cpu(flags);

this_domain = next_domain = ipipe_percpu_domain[cpuid];

__clear_bit(IPIPE_STALL_FLAG, &this_domain->cpudata[cpuid].status);

ipipe_mark_domain_unstall(this_domain, cpuid);

if (this_domain->cpudata[cpuid].irq_pending_hi != 0)
goto sync_stage;

for (;;) {
ln = next_domain->p_link.next;

if (ln == &__ipipe_pipeline)
break;

next_domain = list_entry(ln, struct ipipe_domain, p_link);

if (test_bit(IPIPE_STALL_FLAG,
&next_domain->cpudata[cpuid].status))
break;

if (next_domain->cpudata[cpuid].irq_pending_hi == 0)
continue;

ipipe_percpu_domain[cpuid] = next_domain;
/-----------------------------------------------------------------------
Change current domain
-------------------------------------------------------------------------/

sync_stage:

__ipipe_sync_stage(IPIPE_IRQMASK_ANY);

ipipe_load_cpuid(); /* Processor might have changed. */

if (ipipe_percpu_domain[cpuid] != next_domain)
/*
* Something has changed the current domain under our
* feet, recycling the register set; take note.
*/
this_domain = ipipe_percpu_domain[cpuid];
}

ipipe_percpu_domain[cpuid] = this_domain;

/----------------------------------------------------
Restore current domain
------------------------------------------------------/

ipipe_unlock_cpu(flags);
}

===========================================================================

kernel/ipipe-core.c

/*
* __ipipe_sync_stage() -- Flush the pending IRQs for the current
* domain (and processor). This routine flushes the interrupt log
* (see "Optimistic interrupt protection" from D. Stodolsky et al. for
* more on the deferred interrupt scheme). Every interrupt that
* occurred while the pipeline was stalled gets played. WARNING:
* callers on SMP boxen should always check for CPU migration on
* return of this routine. One can control the kind of interrupts
* which are going to be sync'ed using the syncmask
* parameter. IPIPE_IRQMASK_ANY plays them all, IPIPE_IRQMASK_VIRT
* plays virtual interrupts only. This routine must be called with hw
* interrupts off.
*/
void __ipipe_sync_stage(unsigned long syncmask)
{
unsigned long mask, submask;
struct ipcpudata *cpudata;
struct ipipe_domain *ipd;
ipipe_declare_cpuid;
int level, rank;
unsigned irq;

ipipe_load_cpuid();
ipd = ipipe_percpu_domain[cpuid];
cpudata = &ipd->cpudata[cpuid];

if (__test_and_set_bit(IPIPE_SYNC_FLAG, &cpudata->status))
return;
/------------
Is "sync_stage" already been called? If so, return.
-------------/

/*
* The policy here is to keep the dispatching code interrupt-free
* by stalling the current stage. If the upper domain handler
* (which we call) wants to re-enable interrupts while in a safe
* portion of the code (e.g. SA_INTERRUPT flag unset for Linux's
* sigaction()), it will have to unstall (then stall again before
* returning to us!) the stage when it sees fit.
*/
while ((mask = (cpudata->irq_pending_hi & syncmask)) != 0) {
level = ffs(mask) - 1;
__clear_bit(level, &cpudata->irq_pending_hi);

while ((submask = cpudata->irq_pending_lo[level]) != 0) {

if (ipd == ipipe_root_domain &&
test_bit(IPIPE_ROOTLOCK_FLAG, &ipd->flags)) {
__set_bit(level, &cpudata->irq_pending_hi);
goto done;

/---------------------------------------------------------
If ROOTLOCK_FLAG is set, do not run interrupt handler here
----------------------------------------------------------/

}

rank = ffs(submask) - 1;
irq = (level << IPIPE_IRQ_ISHIFT) + rank;

/------------------------------------
Get the "irq" num
-------------------------------------/

if (test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control)) {
__clear_bit(rank,
&cpudata->irq_pending_lo[level]);
continue;

/------------------------------------------
If LOCK_FLAG is set, do not handle this "irq"
-------------------------------------------/
}

if (--cpudata->irq_counters[irq].pending_hits == 0) {
/---------------------------------------------
Every pending "irq" is going to be handled handled
----------------------------------------------/
__clear_bit(rank,
&cpudata->irq_pending_lo[level]);
ipipe_mark_irq_delivery(ipd,irq,cpuid);
}

__set_bit(IPIPE_STALL_FLAG, &cpudata->status);
ipipe_mark_domain_stall(ipd, cpuid);

if (ipd == ipipe_root_domain) {
/*
* Note: the I-pipe implements a
* threaded interrupt model on this
* arch for Linux external IRQs. The
* interrupt handler we call here only
* wakes up the associated IRQ thread.
*/
if (ipipe_virtual_irq_p(irq)) {

/----------------
Is this a virtual irq?

-----------------/
/* No irqtail here; virtual interrupts have
no effect on IPEND so there is no need for
processing deferral. */
local_irq_enable_hw();
ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);
local_irq_disable_hw();
} else
/* No need to run the irqtail here either; we are not
preemptable by hw IRQs, so non-Linux IRQs cannot
stack over the short thread wakeup code. Which in turn
means that no irqtail condition could be pending
for domains above Linux in the pipeline. */
((void (*)(unsigned, struct pt_regs *))
ipd->irqs[irq].handler) (irq, __ipipe_tick_regs + cpuid);

/-----------------------------------------------------
Run HW interrupt handler
--------------------------------------------------------/

} else {
__clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);
ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);
/* Attempt to exit the outer interrupt
* level before starting the deferred
* IRQ processing. */
__ipipe_run_irqtail();
/-----------------
see bellow
-----------------/

__set_bit(IPIPE_SYNC_FLAG, &cpudata->status);
}
#ifdef CONFIG_SMP
{
int _cpuid = ipipe_processor_id();

if (_cpuid != cpuid) { /* Handle CPU migration. */
/* We expect any domain to clear the SYNC bit each
time it switches in a new task, so that preemptions
and/or CPU migrations (in the SMP case) over the
ISR do not lock out the log syncer for some
indefinite amount of time. In the Linux case,
schedule() handles this (see kernel/sched.c). For
this reason, we don't bother clearing it here for
the source CPU in the migration handling case,
since it must have scheduled another task in by
now. */
cpuid = _cpuid;
cpudata = &ipd->cpudata[cpuid];
__set_bit(IPIPE_SYNC_FLAG,&cpudata->status);
}
}
#==endif /* CONFIG_SMP */
__clear_bit(IPIPE_STALL_FLAG, &cpudata->status);
ipipe_mark_domain_unstall(ipd, cpuid);
}
}

done:
__clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);
}

==============================================================
kernel/ipipe-core.c

static inline void __ipipe_run_irqtail(void)
{
asmlinkage void __ipipe_call_irqtail(void);
unsigned long pending;

__builtin_bfin_csync();

pending = *pIPEND;
if (pending & 0x8000) {

/----------------------------
IVG 15 is pending - there is interrupt pending?? Or in kernel mode?
-----------------------------/

pending &= ~0x8010;

/------------------------------
clear IVG15
---------------------------/

if (pending && (pending & (pending - 1)) == 0)
__ipipe_call_irqtail();
/--------------
See bellow:

This is only one interrupt - no nested interrupts.

---------------/
}
}

===============================================
ENTRY(___ipipe_call_irqtail)

r0.l = 1f;
r0.h = 1f;
reti = r0;
rti
/-------

return from interrupt and continue here
---------/
1:
[--sp] = rets;
[--sp] = ( r7:4, p5:3 );
p0.l = ___ipipe_irq_tail_hook;
p0.h = ___ipipe_irq_tail_hook;
p0 = [p0] ;
sp += -12 ;
call (p0) ;
sp += 12;
( r7:4, p5:3 ) = [sp++];
rets = [sp++];

/------------
Call ipipe_irq_tail_hook
asm-blackfin/system.h:

static inline int xnarch_init (void)

{
int err;

__ipipe_irq_tail_hook = (unsigned long)&xnpod_schedule_deferred;

...
}

------------/

[--sp] = reti;
reti = [sp++]; /* IRQs are off. */

/-----------
Store reti, and disalbe interrupt nesting:

Instructions that access the RETI register do have an implicit site
effect— reading the RETI register enables interrupt nesting. Writing to
it disables nesting again.

------------/

r0.h = 3f;
r0.l = 3f;
p0.l = lo(EVT14);
p0.h = hi(EVT14);
[p0] = r0;
csync;
r0 = 0x401f;
sti r0;
/----------------------------------
enable interrupt: only enalbe IVG14 --?? Why doing bellow operation?
----------------------------------/

raise 14;

[--sp] = reti; /* IRQs on. */
2:
jump 2b /* Likely paranoid. */
3:
sp += 4; /* Discard saved RETI */
r0.h = _evt14_softirq;
r0.l = _evt14_softirq;
p0.l = lo(EVT14);
p0.h = hi(EVT14);
[p0] = r0;
csync

/* Restore EVT14 */
p0.l = _irq_flags;
p0.h = _irq_flags;
r0 = [p0];
sti r0
rts;

==========================================================
#ifdef __KERNEL__

void xnpod_schedule_deferred (void)

{
if (nkpod && xnsched_resched_p())
xnpod_schedule();
}

#==endif /* __KERNEL__ */

===========================================================

IPIPE - 1 - resend


1. Grab interrups: interrpt.S

In __common_int_entry:

#ifdef CONFIG_IPIPE
call ___ipipe_grab_irq
SP += 12;
cc = r0 == 0;
if cc jump common_restore_context;
#else /* CONFIG_IPIPE */
call _do_irq;
SP += 12;
#==endif /* CONFIG_IPIPE */
call return_from_int;
common_restore_context:
RESTORE_CONTEXT
rti;

/--------------------------------
Note: if returns (r0 == 1), should not call return_from_int.
---------------------------------/

2. asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
{
struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
unsigned long sic_status;
ipipe_declare_cpuid;
int irq;

/--------------------------------------------------------
"struct ivgx {
/* irq number for request_irq, available in mach-bf533/irq.h */
int irqno;
/* corresponding bit in the SIC_ISR register */
int isrflag;
} ivg_table[NR_PERI_INTS];

struct ivg_slice {
/* position of first irq in ivg_table for given ivg */
struct ivgx *ifirst;
struct ivgx *istop;
} ivg7_13[IVG13 - IVG7 + 1];"

ivg7_13 is a table stores the mapping of peripheral interrupt to Core
interrupt IVG7-13.

ivg_table contains "IRQ NUM" and "Position in SIC_ISR" of peripheral
interrupts.
------------------------------------/

if (likely(vec == EVT_IVTMR_P)) {
irq = IRQ_CORETMR;
goto handle_irq;
}

/-----------------------------------------
CORETMR interrupt is likely to happen
-----------------------------------------/

__builtin_bfin_ssync();
sic_status = *pSIC_IMASK & *pSIC_ISR;

/----------------------------------------

sic_status: system interrupt happened and not masked.

------------------------------------------/

for(;; ivg++) {
if (ivg >= ivg_stop) {
num_spurious++;
return 0;
}
else if (sic_status & ivg->isrflag)
break;

/---------------------------------------
Find the system interrupt who triggers "vec"
----------------------------------------/
}

irq = ivg->irqno;
/---------------
irq defined:

See irq.h - peripheral interrupts IRQ NUM starts from 7 - sames order as
SIC_ISR
----------------/

ipipe_load_cpuid();

if (irq == IRQ_SYSTMR) {
*pTIMER_STATUS = 1; /* Latch TIMIL0 */
/* for update_process_times() */
__ipipe_tick_regs[cpuid].ipend = regs->ipend;
}
/--------------
IRQ_SYSTMR is peripheral, while CORE TIMER is CORE interrupt.
Why it need regs->ipend here?
-----------------/

handle_irq:

__ipipe_handle_irq(irq, regs);

return (ipipe_percpu_domain[cpuid] == ipipe_root_domain &&
!test_bit(IPIPE_STALL_FLAG,
&ipipe_root_domain->cpudata[cpuid].status));

/--------------
If this is Linux domain and Linux want to handle interrupt - return 1.
That means, this interrupt is handled the same way as Linux
---------------/

}

System call in Blackfin - Cont' - resend


================================================
arch/blackfin/mach-common/ints-priority-sc.c:

int init_arch_irq()
{

#ifndef CONFIG_KGDB
*pEVT0 = evt_emulation;
#==endif
*pEVT2 = evt_evt2;
*pEVT3 = trap;
*pEVT5 = evt_ivhw;
*pEVT6 = evt_timer;
*pEVT7 = evt_evt7;
*pEVT8 = evt_evt8;
*pEVT9 = evt_evt9;
*pEVT10 = evt_evt10;
*pEVT11 = evt_evt11;
*pEVT12 = evt_evt12;
*pEVT13 = evt_evt13;
*pEVT14 = evt14_softirq;
*pEVT15 = evt_system_call;
__builtin_bfin_csync();
}
=======================================================
arch/blackfin/mach-common/entry.S:

ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor
mode)*/
/* Since the kernel stack can be anywhere, it's not guaranteed
to be
* covered by a CPLB. Switch to an exception stack; use RETN as
a
* scratch register (for want of a better option).
*/
/----------------------------------
#define ENTRY(name) .globl name; ALIGN; name:
-----------------------------------/

retn = sp;
/-------------------------------
Use RETN just as a tmp register?
--------------------------------/

sp.l = exception_stack_top;
sp.h = exception_stack_top;
/------------------------------------
In entry.S
/* Put this in the kernel data section - that should always be covered
by
* a CPLB.
*/
exception_stack:
.rept 1024
.long 0;
.endr
exception_stack_top:
----------------------------------------/

/* Try to deal with syscalls quickly. */
[--sp] = ASTAT;
[--sp] = (R7:6, P5:4);
r7 = SEQSTAT; /* reason code is in bit 5:0 */
r6.l = lo(SEQSTAT_EXCAUSE);
r6.h = hi(SEQSTAT_EXCAUSE);
r7 = r7 & r6;
p5.h = extable;
p5.l = extable;
p4 = r7;
p5 = p5 + (p4 << 2);
p4 = [p5];
jump (p4);
common_restore_context
/---------------------------------------------
What is the relationship between the extable and the exception handler
entry?? When is extable get initialized?
-----------------------------------------------/

badsys:
r7 = -ENOSYS; /* signextending enough */
[sp + PT_R0] = r7; /* return value from system call */
jump syscall_really_exit;

ENTRY(ex_syscall)
(R7:6,P5:4) = [sp++];
ASTAT = [sp++];
raise 15; /* invoked by TRAP #0, for sys call */
sp = retn;
rtx

==============================================
mach-comm/interrupt.S

/* interrupt routine for system_call - 15 */
ENTRY(_evt_system_call)
SAVE_CONTEXT_SYSCALL
#ifdef CONFIG_FRAME_POINTER
fp = 0;
#==endif
call system_call;
jump common_restore_context;
/------------------------------------
Note: It does not go through common_int_entry:
-------------------------------------/

==============================================
entry.S:

ENTRY(system_call)
/* Store IPEND */
p2.l = lo(IPEND);
p2.h = hi(IPEND);
csync;
r0 = [p2];
[sp + PT_IPEND] = r0;

/* Store RETS for now */
r0 = rets;
/--------------------------------
call system_call;
jump common_restore_context;
--------------------------------/

[sp + PT_RESERVED] = r0;
/* Set the stack for the current process */
r7 = sp;
r6.l = lo(ALIGN_PAGE_MASK);
r6.h = hi(ALIGN_PAGE_MASK);
r7 = r7 & r6; /*thread_info*/
p2 = r7;
p2 = [p2];

[p2+(TASK_THREAD+THREAD_KSP)] = sp;
#ifdef CONFIG_IPIPE
r0 = sp;
SP += -12;
call ___ipipe_syscall_root;
SP += 12;
cc = r0 == 1;
/--------------------------------
Should not pass to Linux, no tail work
------------------------------------/
if cc jump syscall_really_exit;
cc = r0 == -1;
/-------------------------------------
Should not pass to Linux, tail work (handling signal)
-------------------------------------/
if cc jump resume_userspace;
/-----------------------------------------
Should pass to Linux
------------------------------------------/
r3 = [sp + PT_R3];
r4 = [sp + PT_R4];
p0 = [sp + PT_ORIG_P0];
#==endif /* CONFIG_IPIPE */

/* Check the System Call */
r7 = __NR_syscall;
/*System call number is passed in P0 */
r5 = p0;
cc = r5 < r7;
if ! cc jump badsys;

/------------------------------------
Check whether the sys call is valid or not.
--------------------------------------/

/* Execute the appropriate system call */

p4 = r5;
p5.l = sys_call_table;
p5.h = sys_call_table;
p5 = p5 + (p4 << 2);
r0 = [sp + PT_R0];
r1 = [sp + PT_R1];
r2 = [sp + PT_R2];
p5 = [p5];

/* are we tracing syscalls?*/
r7 = sp;
r6.l = lo(ALIGN_PAGE_MASK);
r6.h = hi(ALIGN_PAGE_MASK);
r7 = r7 & r6;
/----------------------------
Get thread_info:
see asm-blackfin/thread_info.h:
/* Given a task stack pointer, you can find it's task structure
* just by masking it to the 8K boundary.
*/
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
__asm__("%0 = sp;": "=&d"(ti):
);
return (struct thread_info *)((long)ti & ~8191UL);
}
-----------------------------/
p2 = r7;
r7 = [p2+TI_FLAGS];
CC = BITTST(r7,TIF_SYSCALL_TRACE);
if CC JUMP sys_trace;

[--sp] = r4;
[--sp] = r3;
SP += -12;
call (p5);
SP += 20;
[sp + PT_R0] = r0;
/-----------------------------
Call the real syscall: r0 is the return value??
-----------------------------/

resume_userspace:
r7 = sp;
r4.l = lo(ALIGN_PAGE_MASK);
r4.h = hi(ALIGN_PAGE_MASK);
r7 = r7 & r4; /*thread_info->flags*/
p5 = r7;
resume_userspace_1:
/* Disable interrupts. */
[--sp] = reti;
reti = [sp++];

/----------------------------------------------
To disable nesting interrupts - IRQ15

Instructions that access the RETI register do have an implicit site
effect — reading the RETI register enables interrupt nesting. Writing to
it disables nesting again.
------------------------------------------------/

r7 = [p5 + TI_FLAGS];
r4.l = lo(_TIF_WORK_MASK);
r4.h = hi(_TIF_WORK_MASK);
r7 = r7 & r4;

syscall_resched:
cc = BITTST(r7, TIF_NEED_RESCHED);

/-----------------------
The only exit for "syscall_resched"
------------------------/
if !cc jump syscall_sigpending;

/--------------------------------------
OK. Need reschedule
-----------------------------------------/
/* Reenable interrupts. */
[--sp] = reti;
r0 = [sp++];

SP += -12;
call _schedule;
SP += 12;

jump resume_userspace_1;
/-----------------------------------
Check again - until there is not need to do reschedule
------------------------------------/

syscall_sigpending:
cc = BITTST(r7, TIF_RESTORE_SIGMASK);
if cc jump syscall_do_signals;
cc = BITTST(r7, TIF_SIGPENDING);
if !cc jump syscall_really_exit;
syscall_do_signals:
/* Reenable interrupts. */
[--sp] = reti;
r0 = [sp++];

r0 = sp;
SP += -12;
call _do_signal;
SP += 12;

syscall_really_exit:
#ifdef CONFIG_IPIPE
[--sp] = reti;
r5 = [sp++];
#==endif /* CONFIG_IPIPE */
r5 = [sp + PT_RESERVED];
rets = r5;
rts;
/-----------------------------
Return to common_restore_context:

Then "rti" to exit from IRQ15, then "rtx" to exit from exception.

"The difference between a JUMP and a CALL is that
a CALL automatically loads the return address into the RETS register.
The return address is the next sequential address after the CALL
instruction."

-------------------------------/

sys_trace:
[--sp] = r3;
[--sp] = r2;
[--sp] = r1;
[--sp] = r0;
[--sp] = p5;
[--sp] = p2;
[--sp] = p1;
[--sp] = p0;
r1 = 0;
call _syscall_trace;
p0 = [sp++];
p1 = [sp++];
p2 = [sp++];
p5 = [sp++];
r0 = [sp++];
r1 = [sp++];
r2 = [sp++];
r3 = [sp++];

[--sp] = r4;
[--sp] = r3;
SP += -12;
call (p5);
SP += 20;
[sp + PT_R0] = r0;

[--sp] = r3;
[--sp] = r2;
[--sp] = r1;
[--sp] = r0;
[--sp] = p5;
[--sp] = p2;
[--sp] = p1;
[--sp] = p0;
r1 = 1;
call _syscall_trace;
p0 = [sp++];
p1 = [sp++];
p2 = [sp++];
p5 = [sp++];
r0 = [sp++];
r1 = [sp++];
r2 = [sp++];
r3 = [sp++];

jump resume_userspace;

ipipe-root.c
====================================================
asmlinkage int __ipipe_syscall_root(struct pt_regs *regs)
{
/---------------------------------------
r0 = sp; -- kernel stack
----------------------------------------/

ipipe_declare_cpuid;
unsigned long flags;

/*
* This routine either returns:
* 0 -- if the syscall is to be passed to Linux;
* 1 -- if the syscall should not be passed to Linux, and no
* tail work should be performed;
* -1 -- if the syscall should not be passed to Linux but the
* tail work has to be performed (for handling signals etc).
*/

/-----------------------------------------------------
#define IPIPE_EVENT_SYSCALL (IPIPE_FIRST_EVENT)
#define IPIPE_EVENT_SCHEDULE (IPIPE_FIRST_EVENT + 1)
#define IPIPE_EVENT_SIGWAKE (IPIPE_FIRST_EVENT + 2)
#define IPIPE_EVENT_SETSCHED (IPIPE_FIRST_EVENT + 3)
#define IPIPE_EVENT_EXIT (IPIPE_FIRST_EVENT + 4)
-------------------------------------------------------/

if (__ipipe_event_pipelined_p(IPIPE_EVENT_SYSCALL) &&
__ipipe_dispatch_event(IPIPE_EVENT_SYSCALL,regs) > 0) {

/------------------------------------------
Do not propagate the event to Linux
-------------------------------------------/
/*
* We might enter here over a non-root domain and exit
* over the root one as a result of the syscall
* (i.e. by recycling the register set of the current
* context across the migration), so we need to fixup
* the interrupt flag upon return too, so that
* __ipipe_unstall_iret_root() resets the correct
* stall bit on exit.
*/
if (ipipe_current_domain == ipipe_root_domain && !
in_atomic()) {

/-------------------------------------------
???
--------------------------------------------/

/*
* Sync pending VIRQs before _TIF_NEED_RESCHED
* is tested.
*/
ipipe_lock_cpu(flags);
if
((ipipe_root_domain->cpudata[cpuid].irq_pending_hi &
IPIPE_IRQMASK_VIRT) != 0)
__ipipe_sync_stage(IPIPE_IRQMASK_VIRT);
ipipe_unlock_cpu(flags);
return -1;
}
return 1;
}

return 0;
}

kernel/ipipe/core.c
==================================================================
/* __ipipe_dispatch_event() -- Low-level event dispatcher. */

int fastcall __ipipe_dispatch_event (unsigned event, void *data)
{
struct ipipe_domain *start_domain, *this_domain, *next_domain;
struct list_head *pos, *npos;
unsigned long flags;
ipipe_declare_cpuid;
int propagate = 1;

ipipe_lock_cpu(flags);

start_domain = this_domain = ipipe_percpu_domain[cpuid];

list_for_each_safe(pos,npos,&__ipipe_pipeline) {
/-----------------------------------------
Starting from the highest priority domain
------------------------------------------/

next_domain = list_entry(pos,struct
ipipe_domain,p_link);

/*
* Note: Domain migration may occur while running
* event or interrupt handlers, in which case the
* current register set is going to be recycled for a
* different domain than the initiating one. We do
* care for that, always tracking the current domain
* descriptor upon return from those handlers.
*/
if (next_domain->evhand[event] != NULL) {
/--------------------------------------------
#define rthal_catch_taskexit(hdlr) ipipe_catch_event(ipipe_root_domain,IPIPE_EVENT_EXIT,hdlr)
#define rthal_catch_sigwake(hdlr) ipipe_catch_event(ipipe_root_domain,IPIPE_EVENT_SIGWAKE,hdlr)
#define rthal_catch_schedule(hdlr) ipipe_catch_event(ipipe_root_domain,IPIPE_EVENT_SCHEDULE,hdlr)
#define rthal_catch_setsched(hdlr) ipipe_catch_event(&rthal_domain,IPIPE_EVENT_SETSCHED,hdlr)
#define rthal_catch_losyscall(hdlr) ipipe_catch_event(ipipe_root_domain,IPIPE_EVENT_SYSCALL,hdlr)
#define rthal_catch_hisyscall(hdlr) ipipe_catch_event(&rthal_domain,IPIPE_EVENT_SYSCALL,hdlr)
#define rthal_catch_exception(ex,hdlr) ipipe_catch_event(&rthal_domain,ex|IPIPE_EVENT_SELF,hdlr)

---------------------------------------------/

ipipe_percpu_domain[cpuid] = next_domain;
ipipe_unlock_cpu(flags);
propagate = !
next_domain->evhand[event](event,start_domain,data);
ipipe_lock_cpu(flags);
if (ipipe_percpu_domain[cpuid] != next_domain)
this_domain =
ipipe_percpu_domain[cpuid];
}

if (next_domain != ipipe_root_domain && /* NEVER sync
the root stage
here. */
next_domain->cpudata[cpuid].irq_pending_hi != 0 &&
!
test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status)) {
ipipe_percpu_domain[cpuid] = next_domain;
__ipipe_sync_stage(IPIPE_IRQMASK_ANY);
ipipe_load_cpuid();
if (ipipe_percpu_domain[cpuid] != next_domain)
this_domain =
ipipe_percpu_domain[cpuid];
}

ipipe_percpu_domain[cpuid] = this_domain;

if (next_domain == this_domain || !propagate)
break;
}

ipipe_unlock_cpu(flags);

return !propagate;
}

System call in Blackfin - resend


================================================
arch/blackfin/mach-common/ints-priority-sc.c:

int init_arch_irq()
{

#ifndef CONFIG_KGDB
*pEVT0 = evt_emulation;
#==endif
*pEVT2 = evt_evt2;
*pEVT3 = trap;
*pEVT5 = evt_ivhw;
*pEVT6 = evt_timer;
*pEVT7 = evt_evt7;
*pEVT8 = evt_evt8;
*pEVT9 = evt_evt9;
*pEVT10 = evt_evt10;
*pEVT11 = evt_evt11;
*pEVT12 = evt_evt12;
*pEVT13 = evt_evt13;
*pEVT14 = evt14_softirq;
*pEVT15 = evt_system_call;
__builtin_bfin_csync();
}
=======================================================
arch/blackfin/mach-common/entry.S:

ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor
mode)*/
/* Since the kernel stack can be anywhere, it's not guaranteed to be
* covered by a CPLB. Switch to an exception stack; use RETN as a
* scratch register (for want of a better option).
*/
/----------------------------------
#define ENTRY(name) .globl name; ALIGN; name:
-----------------------------------/

retn = sp;
/-------------------------------
Use RETN just as a tmp register?
--------------------------------/

sp.l = exception_stack_top;
sp.h = exception_stack_top;
/------------------------------------
In entry.S
/* Put this in the kernel data section - that should always be covered
by
* a CPLB.
*/
exception_stack:
.rept 1024
.long 0;
.endr
exception_stack_top:
----------------------------------------/

/* Try to deal with syscalls quickly. */
[--sp] = ASTAT;
[--sp] = (R7:6, P5:4);
r7 = SEQSTAT; /* reason code is in bit 5:0 */
r6.l = lo(SEQSTAT_EXCAUSE);
r6.h = hi(SEQSTAT_EXCAUSE);
r7 = r7 & r6;
p5.h = extable;
p5.l = extable;
p4 = r7;
p5 = p5 + (p4 << 2);
p4 = [p5];
jump (p4);

/---------------------------------------------
What is the relationship between the extable and the exception handler
entry?? When is extable get initialized?
-----------------------------------------------/

badsys:
r7 = -ENOSYS; /* signextending enough */
[sp + PT_R0] = r7; /* return value from system call */
jump syscall_really_exit;

ENTRY(ex_syscall)
(R7:6,P5:4) = [sp++];
ASTAT = [sp++];
raise 15; /* invoked by TRAP #0, for sys call */
sp = retn;
rtx

==============================================
mach-comm/interrupt.S

/* interrupt routine for system_call - 15 */
ENTRY(_evt_system_call)
SAVE_CONTEXT_SYSCALL
#ifdef CONFIG_FRAME_POINTER
fp = 0;
#==endif
call system_call;
jump common_restore_context;
/------------------------------------
Note: It does not go through common_int_entry:
-------------------------------------/

==============================================
entry.S:

ENTRY(system_call)
/* Store IPEND */
p2.l = lo(IPEND);
p2.h = hi(IPEND);
csync;
r0 = [p2];
[sp + PT_IPEND] = r0;

/* Store RETS for now */
r0 = rets;
/--------------------------------
call system_call;
jump common_restore_context;
--------------------------------/

[sp + PT_RESERVED] = r0;
/* Set the stack for the current process */
r7 = sp;
r6.l = lo(ALIGN_PAGE_MASK);
r6.h = hi(ALIGN_PAGE_MASK);
r7 = r7 & r6; /*thread_info*/
p2 = r7;
p2 = [p2];

[p2+(TASK_THREAD+THREAD_KSP)] = sp;
#ifdef CONFIG_IPIPE
r0 = sp;
SP += -12;
call ___ipipe_syscall_root;
SP += 12;
cc = r0 == 1;
/--------------------------------
Should not pass to Linux, no tail work
------------------------------------/
if cc jump syscall_really_exit;
cc = r0 == -1;
/-------------------------------------
Should not pass to Linux, tail work (handling signal)
-------------------------------------/
if cc jump resume_userspace;
/-----------------------------------------
Should pass to Linux
------------------------------------------/
r3 = [sp + PT_R3];
r4 = [sp + PT_R4];
p0 = [sp + PT_ORIG_P0];
#==endif /* CONFIG_IPIPE */

/* Check the System Call */
r7 = __NR_syscall;
/*System call number is passed in P0 */
r5 = p0;
cc = r5 < r7;
if ! cc jump badsys;

/* Execute the appropriate system call */

p4 = r5;
p5.l = sys_call_table;
p5.h = sys_call_table;
p5 = p5 + (p4 << 2);
r0 = [sp + PT_R0];
r1 = [sp + PT_R1];
r2 = [sp + PT_R2];
p5 = [p5];

/* are we tracing syscalls?*/
r7 = sp;
r6.l = lo(ALIGN_PAGE_MASK);
r6.h = hi(ALIGN_PAGE_MASK);
r7 = r7 & r6;
p2 = r7;
r7 = [p2+TI_FLAGS];
CC = BITTST(r7,TIF_SYSCALL_TRACE);
if CC JUMP sys_trace;

[--sp] = r4;
[--sp] = r3;
SP += -12;
call (p5);
SP += 20;
[sp + PT_R0] = r0;

resume_userspace:
r7 = sp;
r4.l = lo(ALIGN_PAGE_MASK);
r4.h = hi(ALIGN_PAGE_MASK);
r7 = r7 & r4; /*thread_info->flags*/
p5 = r7;
resume_userspace_1:
/* Disable interrupts. */
[--sp] = reti;
reti = [sp++];

r7 = [p5 + TI_FLAGS];
r4.l = lo(_TIF_WORK_MASK);
r4.h = hi(_TIF_WORK_MASK);
r7 = r7 & r4;

syscall_resched:
cc = BITTST(r7, TIF_NEED_RESCHED);
if !cc jump syscall_sigpending;

/* Reenable interrupts. */
[--sp] = reti;
r0 = [sp++];

SP += -12;
call _schedule;
SP += 12;

jump resume_userspace_1;

syscall_sigpending:
cc = BITTST(r7, TIF_RESTORE_SIGMASK);
if cc jump syscall_do_signals;
cc = BITTST(r7, TIF_SIGPENDING);
if !cc jump syscall_really_exit;
syscall_do_signals:
/* Reenable interrupts. */
[--sp] = reti;
r0 = [sp++];

r0 = sp;
SP += -12;
call _do_signal;
SP += 12;

syscall_really_exit:
#ifdef CONFIG_IPIPE
[--sp] = reti;
r5 = [sp++];
#==endif /* CONFIG_IPIPE */
r5 = [sp + PT_RESERVED];
rets = r5;
rts;

sys_trace:
[--sp] = r3;
[--sp] = r2;
[--sp] = r1;
[--sp] = r0;
[--sp] = p5;
[--sp] = p2;
[--sp] = p1;
[--sp] = p0;
r1 = 0;
call _syscall_trace;
p0 = [sp++];
p1 = [sp++];
p2 = [sp++];
p5 = [sp++];
r0 = [sp++];
r1 = [sp++];
r2 = [sp++];
r3 = [sp++];

[--sp] = r4;
[--sp] = r3;
SP += -12;
call (p5);
SP += 20;
[sp + PT_R0] = r0;

[--sp] = r3;
[--sp] = r2;
[--sp] = r1;
[--sp] = r0;
[--sp] = p5;
[--sp] = p2;
[--sp] = p1;
[--sp] = p0;
r1 = 1;
call _syscall_trace;
p0 = [sp++];
p1 = [sp++];
p2 = [sp++];
p5 = [sp++];
r0 = [sp++];
r1 = [sp++];
r2 = [sp++];
r3 = [sp++];

jump resume_userspace;

ipipe-root.c
====================================================
asmlinkage int __ipipe_syscall_root(struct pt_regs *regs)
{
/---------------------------------------
r0 = sp; -- kernel stack
----------------------------------------/

ipipe_declare_cpuid;
unsigned long flags;

/*
* This routine either returns:
* 0 -- if the syscall is to be passed to Linux;
* 1 -- if the syscall should not be passed to Linux, and no
* tail work should be performed;
* -1 -- if the syscall should not be passed to Linux but the
* tail work has to be performed (for handling signals etc).
*/

if (__ipipe_event_pipelined_p(IPIPE_EVENT_SYSCALL) &&
__ipipe_dispatch_event(IPIPE_EVENT_SYSCALL,regs) > 0) {
/*
* We might enter here over a non-root domain and exit
* over the root one as a result of the syscall
* (i.e. by recycling the register set of the current
* context across the migration), so we need to fixup
* the interrupt flag upon return too, so that
* __ipipe_unstall_iret_root() resets the correct
* stall bit on exit.
*/
if (ipipe_current_domain == ipipe_root_domain && !in_atomic()) {
/*
* Sync pending VIRQs before _TIF_NEED_RESCHED
* is tested.
*/
ipipe_lock_cpu(flags);
if ((ipipe_root_domain->cpudata[cpuid].irq_pending_hi &
IPIPE_IRQMASK_VIRT) != 0)
__ipipe_sync_stage(IPIPE_IRQMASK_VIRT);
ipipe_unlock_cpu(flags);
return -1;
}
return 1;
}

return 0;
}

kernel/ipipe/core.c
==================================================================
/* __ipipe_dispatch_event() -- Low-level event dispatcher. */

int fastcall __ipipe_dispatch_event (unsigned event, void *data)
{
struct ipipe_domain *start_domain, *this_domain, *next_domain;
struct list_head *pos, *npos;
unsigned long flags;
ipipe_declare_cpuid;
int propagate = 1;

ipipe_lock_cpu(flags);

start_domain = this_domain = ipipe_percpu_domain[cpuid];

list_for_each_safe(pos,npos,&__ipipe_pipeline) {

next_domain = list_entry(pos,struct ipipe_domain,p_link);

/*
* Note: Domain migration may occur while running
* event or interrupt handlers, in which case the
* current register set is going to be recycled for a
* different domain than the initiating one. We do
* care for that, always tracking the current domain
* descriptor upon return from those handlers.
*/
if (next_domain->evhand[event] != NULL) {
ipipe_percpu_domain[cpuid] = next_domain;
ipipe_unlock_cpu(flags);
propagate = !next_domain->evhand[event](event,start_domain,data);
ipipe_lock_cpu(flags);
if (ipipe_percpu_domain[cpuid] != next_domain)
this_domain = ipipe_percpu_domain[cpuid];
}

if (next_domain != ipipe_root_domain && /* NEVER sync the root stage
here. */
next_domain->cpudata[cpuid].irq_pending_hi != 0 &&
!test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status)) {
ipipe_percpu_domain[cpuid] = next_domain;
__ipipe_sync_stage(IPIPE_IRQMASK_ANY);
ipipe_load_cpuid();
if (ipipe_percpu_domain[cpuid] != next_domain)
this_domain = ipipe_percpu_domain[cpuid];
}

ipipe_percpu_domain[cpuid] = this_domain;

if (next_domain == this_domain || !propagate)
break;
}

ipipe_unlock_cpu(flags);

return !propagate;
}

kernel start up -blackfin arch - resend


mach-bf537/head.S:

.text

ENTRY(__start)
ENTRY(__stext)
/* R0: argument of command line string, passed from uboot, save it */
R7 = R0;
/* Set the SYSCFG register */
R0 = 0x36;
SYSCFG = R0; /*Enable Cycle Counter and Nesting Of Interrupts(3rd
Bit)*/
/-------------------------------------------------------
SYSCFG: Only three settings

SYSCFG[2]: SNEN - self-nesting interrupt enable
SYSCFG[1]: CCEN - cycle count
SYSCFG[0]: SSSTEP - Supervisor Single Step

--------------------------------------------------------/

R0 = 0;

/*Clear Out All the data and pointer Registers*/
R1 = R0;
R2 = R0;
R3 = R0;
R4 = R0;
R5 = R0;
R6 = R0;

P0 = R0;
P1 = R0;
P2 = R0;
P3 = R0;
P4 = R0;
P5 = R0;

LC0 = r0;
LC1 = r0;
L0 = r0;
L1 = r0;
L2 = r0;
L3 = r0;

/*Clear Out All the DAG Registers*/
B0 = r0;
B1 = r0;
B2 = r0;
B3 = r0;

I0 = r0;
I1 = r0;
I2 = r0;
I3 = r0;

M0 = r0;
M1 = r0;
M2 = r0;
M3 = r0;

/* Turn off the icache */
p0.l = (IMEM_CONTROL & 0xFFFF);
p0.h = (IMEM_CONTROL >> 16);
R1 = [p0];
R0 = ~ENICPLB;
R0 = R0 & R1;
/--------------------------
Disable ICPLB
---------------------------/

/* Anamoly 05000125 */
CLI R2;
SSYNC;
[p0] = R0;
SSYNC;
STI R2;

/* Turn off the dcache */
p0.l = (DMEM_CONTROL & 0xFFFF);
p0.h = (DMEM_CONTROL >> 16);
R1 = [p0];
R0 = ~ENDCPLB;
R0 = R0 & R1;
/----------------------------
Disable DCPLB
------------------------------/

/* Anamoly 05000125 */
CLI R2;
SSYNC;
[p0] = R0;
SSYNC;
STI R2;

/* Initialise General-Purpose I/O Modules on BF537 */
/* Rev 0.0 Anomaly 05000212 - PORTx_FER, PORT_MUX Registers Do Not
accept "writes" correctly: */
p0.h = hi(PORT_MUX);
p0.l = lo(PORT_MUX);
R0.L = W[P0]; //Read
SSYNC;
R0 = (PGDE_UART | PFTE_UART)(Z);
W[P0] = R0.L; //Write
SSYNC;
W[P0] = R0.L; /* Enable both UARTS */
SSYNC;

p0.h = hi(PORTF_FER);
p0.l = lo(PORTF_FER);
R0.L = W[P0]; //Read
SSYNC;
R0 = 0x000F(Z);
W[P0] = R0.L; //Write
SSYNC;
W[P0] = R0.L; /* Enable peripheral function of PORTF for UART0 and
UART1 */
SSYNC;

p0.h = hi(EMAC_SYSTAT);
p0.l = lo(EMAC_SYSTAT);
R0.h = 0xFFFF; /* Clear EMAC Interrupt Status bits */
R0.l = 0xFFFF;
[P0] = R0;
SSYNC;

/*Initialise UART*/
p0.h = hi(UART_LCR);
p0.l = lo(UART_LCR);
r0 = 0x0(Z);
w[p0] = r0.L; /* To enable DLL writes */
ssync;

p0.h = hi(UART_DLL);
p0.l = lo(UART_DLL);
r0 = 0x00(Z);
w[p0] = r0.L;
ssync;

p0.h = hi(UART_DLH);
p0.l = lo(UART_DLH);
r0 = 0x00(Z);
w[p0] = r0.L;
ssync;

p0.h = hi(UART_GCTL);
p0.l = lo(UART_GCTL);
r0 = 0x0(Z);
w[p0] = r0.L; /* To enable UART clock */
ssync;

/* Initialize stack pointer */
sp.l = lo(INITIAL_STACK);
sp.h = hi(INITIAL_STACK);
fp = sp;
usp = sp;

/*Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM*/
call _bf53x_relocate_l1_mem;
#if CONFIG_BFIN_KERNEL_CLOCK
call start_dma_code;
#==endif
/* Code for initializing Async memory banks */

p2.h = hi(EBIU_AMBCTL1);
p2.l = lo(EBIU_AMBCTL1);
r0.h = hi(AMBCTL1VAL);
r0.l = lo(AMBCTL1VAL);
[p2] = r0;
ssync;

p2.h = hi(EBIU_AMBCTL0);
p2.l = lo(EBIU_AMBCTL0);
r0.h = hi(AMBCTL0VAL);
r0.l = lo(AMBCTL0VAL);
[p2] = r0;
ssync;

p2.h = hi(EBIU_AMGCTL);
p2.l = lo(EBIU_AMGCTL);
r0 = AMGCTLVAL;
w[p2] = r0;
ssync;
call _real_start;

/* This section keeps the processor in supervisor mode
* during kernel boot. Switches to user mode at end of boot.
* See page 3-9 of Hardware Reference manual for documentation.
*/

/* EVT15 = _real_start */

p0.l = lo(EVT15);
p0.h = hi(EVT15);
p1.l = _real_start;
p1.h = _real_start;
[p0] = p1;
csync;

p0.l = lo(IMASK);
p0.h = hi(IMASK);
p1.l = IMASK_IVG15;
p1.h = 0x0;
[p0] = p1;
csync;

raise 15;
p0.l = WAIT_HERE;
p0.h = WAIT_HERE;
reti = p0;
rti;

WAIT_HERE:
jump WAIT_HERE;

ENTRY(_real_start)
[ -- sp ] = reti;
p0.l = lo(WDOG_CTL);
p0.h = hi(WDOG_CTL);
r0 = 0xAD6(z);
w[p0] = r0; /* watchdog off for now */
ssync;

/* Code update for BSS size == 0
* Zero out the bss region.
*/

p1.l = ___bss_start;
p1.h = ___bss_start;
p2.l = ___bss_stop;
p2.h = ___bss_stop;
r0 = 0;
p2 -= p1;
lsetup (_clear_bss, _clear_bss ) lc0 = p2;
_clear_bss:
B[p1++] = r0;

/* In case there is a NULL pointer reference
* Zero out region before stext
*/

p1.l = 0x0;
p1.h = 0x0;
r0.l = __stext;
r0.h = __stext;
r0 = r0 >> 1;
p2 = r0;
r0 = 0;
lsetup (_clear_zero, _clear_zero ) lc0 = p2;
_clear_zero:
W[p1++] = r0;

/* pass the uboot arguments to the global value command line */
R0 = R7;
call _cmdline_init;

p1.l = __rambase;
p1.h = __rambase;
r0.l = __sdata;
r0.h = __sdata;
[p1] = r0;

p1.l = __ramstart;
p1.h = __ramstart;
p3.l = ___bss_stop;
p3.h = ___bss_stop;

r1 = p3;
[p1] = r1;

r0.l = lo(RAM_END);
r0.h = hi(RAM_END);
p1.l = __ramend;
p1.h = __ramend;
[p1] = r0;

/*
* load the current thread pointer and stack
*/
r1.l = _init_thread_union;
r1.h = _init_thread_union;

r2.l = 0x2000;
r2.h = 0x0000;
r1 = r1 + r2;
sp = r1;
usp = sp;
fp = sp;
call _start_kernel;
_exit:
jump.s _exit;

xenomai initialize - resend

modules.c:

int __init __xeno_sys_init(void)

include/asm-blackfin/system.h:

static inline int xnarch_init (void)

{
int err;

/* Register the irq_tail_hook - do rescheduling */

__ipipe_irq_tail_hook = (unsigned long)&xnpod_schedule_deferred;

/* Register Xenomai domain */
err = rthal_init();

if (err)
return err;

#ifdef CONFIG_SMP
/* The HAL layer also sets the same CPU affinity so that both
modules keep their execution sequence on SMP boxen. */
set_cpus_allowed(current,cpumask_of_cpu(0));
#==endif /* CONFIG_SMP */

err = xnarch_calibrate_sched();

if (err)
return err;

xnarch_escalation_virq = rthal_alloc_virq();

if (xnarch_escalation_virq == 0)
return -ENOSYS;

rthal_virtualize_irq(&rthal_domain,
xnarch_escalation_virq,
(rthal_irq_handler_t)&xnpod_schedule_handler,
NULL,
NULL,
IPIPE_HANDLE_MASK | IPIPE_WIRED_MASK);

xnarch_old_trap_handler = rthal_trap_catch(&xnarch_trap_fault);

return 0;

ipipe anonated -1

adam's home

1. Grab interrups: interrpt.S

In __common_int_entry:

#ifdef CONFIG_IPIPE
call ___ipipe_grab_irq
SP += 12;
cc = r0 == 0;
if cc jump common_restore_context;
#else /* CONFIG_IPIPE */
call _do_irq;
SP += 12;
#endif /* CONFIG_IPIPE */
call return_from_int;
common_restore_context:
RESTORE_CONTEXT
rti;

/--------------------------------
Note: if returns (r0 == 1), should not call return_from_int.
---------------------------------/

2. asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
{
struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
unsigned long sic_status;
ipipe_declare_cpuid;
int irq;

/--------------------------------------------------------
"struct ivgx {
/* irq number for request_irq, available in mach-bf533/irq.h */
int irqno;
/* corresponding bit in the SIC_ISR register */
int isrflag;
} ivg_table[NR_PERI_INTS];

struct ivg_slice {
/* position of first irq in ivg_table for given ivg */
struct ivgx *ifirst;
struct ivgx *istop;
} ivg7_13[IVG13 - IVG7 + 1];"

ivg7_13 is a table stores the mapping of peripheral interrupt to Core
interrupt IVG7-13.

ivg_table contains "IRQ NUM" and "Position in SIC_ISR" of peripheral
interrupts.
------------------------------------/

if (likely(vec == EVT_IVTMR_P)) {
irq = IRQ_CORETMR;
goto handle_irq;
}

/-----------------------------------------
CORETMR interrupt is likely to happen
-----------------------------------------/

__builtin_bfin_ssync();
sic_status = *pSIC_IMASK & *pSIC_ISR;

/----------------------------------------

sic_status: system interrupt happened and not masked.

------------------------------------------/

for(;; ivg++) {
if (ivg >= ivg_stop) {
num_spurious++;
return 0;
}
else if (sic_status & ivg->isrflag)
break;

/---------------------------------------
Find the system interrupt who triggers "vec"
----------------------------------------/
}

irq = ivg->irqno;
/---------------
irq defined:

See irq.h - peripheral interrupts IRQ NUM starts from 7 - sames order as
SIC_ISR
----------------/


ipipe_load_cpuid();

if (irq == IRQ_SYSTMR) {
*pTIMER_STATUS = 1; /* Latch TIMIL0 */
/* for update_process_times() */
__ipipe_tick_regs[cpuid].ipend = regs->ipend;
}
/--------------
IRQ_SYSTMR is peripheral, while CORE TIMER is CORE interrupt.
Why it need regs->ipend here?
-----------------/


handle_irq:

__ipipe_handle_irq(irq, regs);

return (ipipe_percpu_domain[cpuid] == ipipe_root_domain &&
!test_bit(IPIPE_STALL_FLAG,
&ipipe_root_domain->cpudata[cpuid].status));

/--------------
If this is Linux domain and Linux want to handle interrupt - return 1.
That means, this interrupt is handled the same way as Linux
---------------/

Blog Archive