Monday, July 31, 2006

rt_task_set_period

1. rt_task_set_period()

2. xnpod_set_thread_periodic()

3. xntimer_start()

4. nktimer->do_timer_start()

5. xntimer_do_start_apeiodic()

6. xntimer_next_local_shot()

---------------------------------

1. rt_task_wait_period()

2. xnpod_wait_thread_period()

-----------------------------
ksrc/nuclueus/timer.c
-----------------------------
xntimer_do_tick_aperiodic()

----------------------------
ksrc/nucleus/timer.c
----------------------------
xntimer_next_local_shot()

----------------------------
include/asm-blackfin/system.h
-----------------------------
xnarch_program_timer_shot()

----------------------------
include/asm-blackfin/hal.h:
----------------------------
rthal_timer_program_shot():
static inline void rthal_timer_program_shot (unsigned long delay)
{
if (delay < 2) delay = 10;
*pTCOUNT = delay - 1;
__builtin_bfin_csync();
*pTCNTL = 3; /* Oneshot mode, no auto-reload. */
__builtin_bfin_csync();
}

Tuesday, July 11, 2006

Joke from Forum

By: Robin Getz
RE: machine voice [ reply ]
2006-07-11 19:16
Nesta:

I am still puzzelled:

- you are running what video file? what does "running" mean? decoding -
playing - what video format?

- you are listening to the video signal? Or you are decoding an A/V
(Audio/Visual) stream and the audio is not decoding properly?

If you are listening to the video signal - like having the speakers
connected to the video output, are you are looking for ghosts? If so,
this book may help:

http://www.amazon.com/gp/product/0822325721/102-8326188-0684925

-Robin

By: nesta alssandro
RE: machine voice [ reply ]
2006-07-11 15:13
i tried to run video file on my kit ,but the sound of the video looks
like machine voice and i dont know why?

By: Robin Getz
RE: machine voice [ reply ]
2006-07-11 14:31
Nesta:

I am not sure what the question is - you turn the Blackfin on, and you
start hearing voices?

:)

-Robin

By: nesta alssandro
machine voice [ reply ]
2006-07-11 12:49
hi all
does anyone know what is the cause of hearing the actual voice like the
machine voice?

is it related to the kit or what?

Thursday, July 06, 2006

irq thread

arch/blackfin/kernel/irqchip.c

do_irqd() {

while (!kthread_should_stop()) {
if (!down_interruptible(&desc->thrsem)) {
local_irq_disable();
desc->thrhandler(irq,&__ipipe_tick_regs[cpu]);
local_irq_enable();
}
}

}

/----------------------
Note: the semaphore here acts as a counter. So multi interrupt will not lost.
-----------------------/

ENTRY(return_from_int)

ENTRY(return_from_int)
/* If someone else already raised IRQ 15, do nothing. */
csync;
p2.l = lo(ILAT);
p2.h = hi(ILAT);
r0 = [p2];
cc = bittst (r0, EVT_IVG15_P);
if cc jump 2f;

/------------------
Check for IRQ 15
-------------------/

/* if not return to user mode, get out */
p2.l = lo(IPEND);
p2.h = hi(IPEND);
r0 = [p2];
r1 = 0x17(Z);
r2 = ~r1;
r2.h = 0;
/--------
r2.h = 0, r2.l =~(0x17)
---------/
r0 = r2 & r0;
r1 = 1;
r1 = r0 - r1;
r2 = r0 & r1;
/------------
Don't care about EMU, RST, NMI, Global Interrupt Disable
-------------/
cc = r2 == 0;
if !cc jump 2f;
/--------------------
If there is Nested interrupt, jump 2f.
---------------------/

/* Lower the interrupt level to 15. */
p0.l = lo(EVT15);
p0.h = hi(EVT15);
p1.l = schedule_and_signal_from_int;
p1.h = schedule_and_signal_from_int;
[p0] = p1;
csync;
r0 = 0x801f (z);
STI r0;
raise 15; /* raise evt15 to do signal or reschedule */
rti;
2:
rts;

adam's home

/*
* __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
* interrupt protection log is maintained here for each domain. Hw
* interrupts are masked on entry.
*/
void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
{
/--------------------
irq: starts from 7

This function can be called from:

ipipe_grab_irq() - HW
ipipe_trigger_irq() - SW
---------------------/

struct ipipe_domain *this_domain;
struct list_head *head, *pos;
int m_ack, s_ack, s = -1;
ipipe_declare_cpuid;

m_ack = (regs == NULL); /* Software-triggered IRQs do not need
* any ack. */

/--------------------

Software triggered IRQ has not regs
See:
ipipe-core.c: ipipe_trigger_irq(): __ipipe_handle_irq(irq, NULL);
---------------------/


ipipe_load_cpuid();

this_domain = ipipe_percpu_domain[cpuid];
/-------------------------

struct ipipe_domain *ipipe_percpu_domain[IPIPE_NR_CPUS] =
{[0 ... IPIPE_NR_CPUS - 1] = &ipipe_root };

What is "ipipe_percpu_domain[cpuid]" stands for?

--------------------------/

s_ack = m_ack;

if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
head = &this_domain->p_link;
else
head = __ipipe_pipeline.next;

/----------------------------
__ipipe_pipeline: global list of domains, in priority order
head starts from this_domain or the next??
-----------------------------/

/* Ack the interrupt. */

pos = head;

while (pos != &__ipipe_pipeline) {
struct ipipe_domain *next_domain =
list_entry(pos, struct ipipe_domain, p_link);

/*
* For each domain handling the incoming IRQ, mark it as
* pending in its log.
*/
if (test_bit(IPIPE_HANDLE_FLAG,
&next_domain->irqs[irq].control)) {
/*
* Domains that handle this IRQ are polled for
* acknowledging it by decreasing priority order. The
* interrupt must be made pending _first_ in the
* domain's status flags before the PIC is unlocked.
*/

next_domain->cpudata[cpuid].irq_counters[irq].total_hits++;
next_domain->cpudata[cpuid].irq_counters[irq].pending_hits++;
__ipipe_set_irq_bit(next_domain, cpuid, irq);
/--------------------

struct ipcpudata {
unsigned long status;
unsigned long irq_pending_hi;
unsigned long irq_pending_lo[IPIPE_IRQ_IWORDS];
struct ipirqcnt {
unsigned long pending_hits;
unsigned long total_hits;
} irq_counters[IPIPE_NR_IRQS];
} ____cacheline_aligned_in_smp cpudata[IPIPE_NR_CPUS];

#define __ipipe_set_irq_bit(ipd,cpuid,irq) do { if (!test_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control)) { __set_bit(irq & IPIPE_IRQ_IMASK,&(ipd)->cpudata[cpuid].irq_pending_lo[irq >> IPIPE_IRQ_ISHIFT]); __set_bit(irq >> IPIPE_IRQ_ISHIFT,&(ipd)->cpudata[cpuid].irq_pending_hi); } } while(0)


(The a bit of irq_pending_hi is set)
irq = a * 32 + irq_pending_lo[a]

--------------------/



ipipe_mark_irq_receipt(next_domain, irq, cpuid);

/---------
meaningful when enable CONFIG_IPIPE_STATS
-------------/


/*
* Always get the first master acknowledge available.
* Once we've got it, allow slave acknowledge
* handlers to run (until one of them stops us).
*/
if (next_domain->irqs[irq].acknowledge != NULL) {
if (!m_ack)
m_ack = next_domain->irqs[irq].acknowledge(irq);
else if (test_bit
(IPIPE_SHARED_FLAG,
&next_domain->irqs[irq].control) && !s_ack)
s_ack = next_domain->irqs[irq].acknowledge(irq);
}
}


/----------------------

Any usage for m_ack and s_ack?

------------------------/

/*
* If the domain does not want the IRQ to be passed down the
* interrupt pipe, exit the loop now.
*/

if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
break;

pos = next_domain->p_link.next;
}

/*
* Now walk the pipeline, yielding control to the highest
* priority domain that has pending interrupt(s) or
* immediately to the current domain if the interrupt has been
* marked as 'sticky'. This search does not go beyond the
* current domain in the pipeline. We also enforce the
* additional root stage lock (blackfin-specific). */

if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
s = __test_and_set_bit(IPIPE_STALL_FLAG,
&ipipe_root_domain->cpudata[cpuid].status);

__ipipe_walk_pipeline(head, cpuid);

if (!s)
__clear_bit(IPIPE_STALL_FLAG,
&ipipe_root_domain->cpudata[cpuid].status);
}


ipipe/core.c:

/* __ipipe_walk_pipeline(): Plays interrupts pending in the log. Must
be called with local hw interrupts disabled. */

void fastcall __ipipe_walk_pipeline(struct list_head *pos, int cpuid)
{
struct ipipe_domain *this_domain = ipipe_percpu_domain[cpuid];

while (pos != &__ipipe_pipeline) {
struct ipipe_domain *next_domain =
list_entry(pos, struct ipipe_domain, p_link);

if (test_bit
(IPIPE_STALL_FLAG, &next_domain->cpudata[cpuid].status))
break; /* Stalled stage -- do not go further. */

if (next_domain->cpudata[cpuid].irq_pending_hi != 0) {

if (next_domain == this_domain)
__ipipe_sync_stage(IPIPE_IRQMASK_ANY);
else {
__ipipe_switch_to(this_domain, next_domain,
cpuid);

ipipe_load_cpuid(); /* Processor might have changed. */

if (this_domain->cpudata[cpuid].
irq_pending_hi != 0
&& !test_bit(IPIPE_STALL_FLAG,
&this_domain->cpudata[cpuid].status))
__ipipe_sync_stage(IPIPE_IRQMASK_ANY);
}

break;
} else if (next_domain == this_domain)
break;

pos = next_domain->p_link.next;
}
}



kernel/ipipe-core.c

/*
* __ipipe_sync_stage() -- Flush the pending IRQs for the current
* domain (and processor). This routine flushes the interrupt log
* (see "Optimistic interrupt protection" from D. Stodolsky et al. for
* more on the deferred interrupt scheme). Every interrupt that
* occurred while the pipeline was stalled gets played. WARNING:
* callers on SMP boxen should always check for CPU migration on
* return of this routine. One can control the kind of interrupts
* which are going to be sync'ed using the syncmask
* parameter. IPIPE_IRQMASK_ANY plays them all, IPIPE_IRQMASK_VIRT
* plays virtual interrupts only. This routine must be called with hw
* interrupts off.
*/
void __ipipe_sync_stage(unsigned long syncmask)
{
unsigned long mask, submask;
struct ipcpudata *cpudata;
struct ipipe_domain *ipd;
ipipe_declare_cpuid;
int level, rank;
unsigned irq;

ipipe_load_cpuid();
ipd = ipipe_percpu_domain[cpuid];
cpudata = &ipd->cpudata[cpuid];

if (__test_and_set_bit(IPIPE_SYNC_FLAG, &cpudata->status))
return;
/------------
Is "sync_stage" already been called? If so, return.
-------------/

/*
* The policy here is to keep the dispatching code interrupt-free
* by stalling the current stage. If the upper domain handler
* (which we call) wants to re-enable interrupts while in a safe
* portion of the code (e.g. SA_INTERRUPT flag unset for Linux's
* sigaction()), it will have to unstall (then stall again before
* returning to us!) the stage when it sees fit.
*/
while ((mask = (cpudata->irq_pending_hi & syncmask)) != 0) {
level = ffs(mask) - 1;
__clear_bit(level, &cpudata->irq_pending_hi);

while ((submask = cpudata->irq_pending_lo[level]) != 0) {

if (ipd == ipipe_root_domain &&
test_bit(IPIPE_ROOTLOCK_FLAG, &ipd->flags)) {
__set_bit(level, &cpudata->irq_pending_hi);
goto done;
}

rank = ffs(submask) - 1;
irq = (level << IPIPE_IRQ_ISHIFT) + rank;

if (test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control)) {
__clear_bit(rank,
&cpudata->irq_pending_lo[level]);
continue;
}

if (--cpudata->irq_counters[irq].pending_hits == 0) {
__clear_bit(rank,
&cpudata->irq_pending_lo[level]);
ipipe_mark_irq_delivery(ipd,irq,cpuid);
}

__set_bit(IPIPE_STALL_FLAG, &cpudata->status);
ipipe_mark_domain_stall(ipd, cpuid);

if (ipd == ipipe_root_domain) {
/*
* Note: the I-pipe implements a
* threaded interrupt model on this
* arch for Linux external IRQs. The
* interrupt handler we call here only
* wakes up the associated IRQ thread.
*/
if (ipipe_virtual_irq_p(irq)) {

/----------------
Is this a virtual irq?

-----------------/
/* No irqtail here; virtual interrupts have
no effect on IPEND so there is no need for
processing deferral. */
local_irq_enable_hw();
ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);
local_irq_disable_hw();
} else
/* No need to run the irqtail here either; we are not
preemptable by hw IRQs, so non-Linux IRQs cannot
stack over the short thread wakeup code. Which in turn
means that no irqtail condition could be pending
for domains above Linux in the pipeline. */
((void (*)(unsigned, struct pt_regs *))
ipd->irqs[irq].handler) (irq, __ipipe_tick_regs + cpuid);
} else {
__clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);
ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);
/* Attempt to exit the outer interrupt
* level before starting the deferred
* IRQ processing. */
__ipipe_run_irqtail();
/-----------------
see bellow
-----------------/


__set_bit(IPIPE_SYNC_FLAG, &cpudata->status);
}
#ifdef CONFIG_SMP
{
int _cpuid = ipipe_processor_id();

if (_cpuid != cpuid) { /* Handle CPU migration. */
/* We expect any domain to clear the SYNC bit each
time it switches in a new task, so that preemptions
and/or CPU migrations (in the SMP case) over the
ISR do not lock out the log syncer for some
indefinite amount of time. In the Linux case,
schedule() handles this (see kernel/sched.c). For
this reason, we don't bother clearing it here for
the source CPU in the migration handling case,
since it must have scheduled another task in by
now. */
cpuid = _cpuid;
cpudata = &ipd->cpudata[cpuid];
__set_bit(IPIPE_SYNC_FLAG,&cpudata->status);
}
}
#endif /* CONFIG_SMP */
__clear_bit(IPIPE_STALL_FLAG, &cpudata->status);
ipipe_mark_domain_unstall(ipd, cpuid);
}
}

done:
__clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);
}


static inline void __ipipe_run_irqtail(void)
{
asmlinkage void __ipipe_call_irqtail(void);
unsigned long pending;

__builtin_bfin_csync();

pending = *pIPEND;
if (pending & 0x8000) {
pending &= ~0x8010;
if (pending && (pending & (pending - 1)) == 0)
__ipipe_call_irqtail();
/--------------

See bellow:

---------------/
}
}


ENTRY(___ipipe_call_irqtail)

r0.l = 1f;
r0.h = 1f;
reti = r0;
rti
/-------

return from interrupt and continue here
---------/
1:
[--sp] = rets;
[--sp] = ( r7:4, p5:3 );
p0.l = ___ipipe_irq_tail_hook;
p0.h = ___ipipe_irq_tail_hook;
p0 = [p0] ;
sp += -12 ;
call (p0) ;
sp += 12;
( r7:4, p5:3 ) = [sp++];
rets = [sp++];

/------------
Call ipipe_irq_tail_hook
------------/

[--sp] = reti;
reti = [sp++]; /* IRQs are off. */

/-----------
Store reti, and disalbe interrupt nesting
------------/

r0.h = 3f;
r0.l = 3f;
p0.l = lo(EVT14);
p0.h = hi(EVT14);
[p0] = r0;
csync;
r0 = 0x401f;
sti r0;
/----------------------------------
enable interrupt: only enalbe IVG14
----------------------------------/

raise 14;
[--sp] = reti; /* IRQs on. */
2:
jump 2b /* Likely paranoid. */
3:
sp += 4; /* Discard saved RETI */
r0.h = _evt14_softirq;
r0.l = _evt14_softirq;
p0.l = lo(EVT14);
p0.h = hi(EVT14);
[p0] = r0;
csync
p0.l = _irq_flags;
p0.h = _irq_flags;
r0 = [p0];
sti r0
rts;

Wednesday, July 05, 2006

IPIPE - 1

1. Grab interrups: interrpt.S

In __common_int_entry:

#ifdef CONFIG_IPIPE
call ___ipipe_grab_irq
SP += 12;
cc = r0 == 0;
if cc jump common_restore_context;
#else /* CONFIG_IPIPE */
call _do_irq;
SP += 12;
#endif /* CONFIG_IPIPE */
call return_from_int;
common_restore_context:
RESTORE_CONTEXT
rti;

/--------------------------------
Note: if returns (r0 == 1), should not call return_from_int.
---------------------------------/

2. asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
{
struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
unsigned long sic_status;
ipipe_declare_cpuid;
int irq;

/--------------------------------------------------------
"struct ivgx {
/* irq number for request_irq, available in mach-bf533/irq.h */
int irqno;
/* corresponding bit in the SIC_ISR register */
int isrflag;
} ivg_table[NR_PERI_INTS];

struct ivg_slice {
/* position of first irq in ivg_table for given ivg */
struct ivgx *ifirst;
struct ivgx *istop;
} ivg7_13[IVG13 - IVG7 + 1];"

ivg7_13 is a table stores the mapping of peripheral interrupt to Core
interrupt IVG7-13.

ivg_table contains "IRQ NUM" and "Position in SIC_ISR" of peripheral
interrupts.
------------------------------------/

if (likely(vec == EVT_IVTMR_P)) {
irq = IRQ_CORETMR;
goto handle_irq;
}

/-----------------------------------------
CORETMR interrupt is likely to happen
-----------------------------------------/

__builtin_bfin_ssync();
sic_status = *pSIC_IMASK & *pSIC_ISR;

/----------------------------------------

sic_status: system interrupt happened and not masked.

------------------------------------------/

for(;; ivg++) {
if (ivg >= ivg_stop) {
num_spurious++;
return 0;
}
else if (sic_status & ivg->isrflag)
break;

/---------------------------------------
Find the system interrupt who triggers "vec"
----------------------------------------/
}

irq = ivg->irqno;
/---------------
irq defined:

See irq.h - peripheral interrupts IRQ NUM starts from 7 - sames order as
SIC_ISR
----------------/


ipipe_load_cpuid();

if (irq == IRQ_SYSTMR) {
*pTIMER_STATUS = 1; /* Latch TIMIL0 */
/* for update_process_times() */
__ipipe_tick_regs[cpuid].ipend = regs->ipend;
}
/--------------
IRQ_SYSTMR is peripheral, while CORE TIMER is CORE interrupt.
Why it need regs->ipend here?
-----------------/


handle_irq:

__ipipe_handle_irq(irq, regs);

return (ipipe_percpu_domain[cpuid] == ipipe_root_domain &&
!test_bit(IPIPE_STALL_FLAG,
&ipipe_root_domain->cpudata[cpuid].status));

/--------------
If this is Linux domain and Linux want to handle interrupt - return 1.
That means, this interrupt is handled the same way as Linux
---------------/


}

Tuesday, July 04, 2006

IPIPE - 1

1. Grab interrups: interrpt.S

In __common_int_entry:

#ifdef CONFIG_IPIPE
call ___ipipe_grab_irq
SP += 12;
cc = r0 == 0;
if cc jump common_restore_context;
#else /* CONFIG_IPIPE */
call _do_irq;
SP += 12;

IPIPE - anonated - 07-04

1. Grab interrups: interrpt.S

In __common_int_entry:

#ifdef CONFIG_IPIPE
call ___ipipe_grab_irq
SP += 12;
cc = r0 == 0;
if cc jump common_restore_context;
#else /* CONFIG_IPIPE */
call _do_irq;
SP += 12;

Sunday, July 02, 2006

adam's home

adam's home

* Static priority:

[100, 139) - Nice: [-20, 19)

* Base Time Slice - depends on static priority (5ms - 800ms). By default 100 ms.

* Dynamic priority:

dynamic priority (100 - 139) - depends on static priority, and bonus (0 - 10) - actually
used to select the new process to run.

Bunus depends on Average Sleep Time

* The run queue data structure:
a. Per-CPU

struct runqueue
{
prio_array_t * active;
prio_array_t * expired;
}

typedef struct list_head[NR_PRIO] prio_array_t;

* scheduler_tic() function:

Call in each timer tick, update the time slice of "Current" task.

For task run out of time slice - normal process, moved to expire queue, - real-time, move to
tail of priority list in active queue.

* schedule()

Select the next task to run, do context swith

a) Direct invocation - when a task want's to go asleep
b) lazy invocation - set TIF_NEED_RESCHED flag of "Current". Checked when returnning to user mode

Blog Archive