Monday, September 11, 2006

switch between primary and secondary mode - 1

Include/linux/sched.h

struct task_struct {
... ...
#ifdef CONFIG_IPIPE
void *ptd[IPIPE_ROOT_NPTDKEYS];
#==endif
}

Include/asm-generic/syscall.h
extern int nkgkptd;

#define xnshadow_ptd(t) ((t)->ptd[nkgkptd])
#define xnshadow_thread(t) ((xnthread_t *)xnshadow_ptd(t))

Ksrc/nucleus/shadow.c

/* Create RT context for "current" */

int xnshadow_map (xnthread_t *thread,
xncompletion_t __user *u_completion)
{
xnarch_cpumask_t affinity;
unsigned muxid, magic;
int mode, prio;

prio = xnthread_base_priority(thread);

if (prio < 1 || prio >= MAX_RT_PRIO)
return -EINVAL;

/* Increment the interface reference count. */
magic = xnthread_get_magic(thread);

for (muxid = 0; muxid < XENOMAI_MUX_NR; muxid++)
{
if (muxtable[muxid].magic == magic)
{
xnarch_atomic_inc(&muxtable[muxid].refcnt);
break;
}
}

xnltt_log_event(xeno_ev_shadowmap,
thread->name,
current->pid,
prio);

current->cap_effective |=
CAP_TO_MASK(CAP_IPC_LOCK)|
CAP_TO_MASK(CAP_SYS_RAWIO)|
CAP_TO_MASK(CAP_SYS_NICE);


xnarch_init_shadow_tcb(xnthread_archtcb(thread),thread,xnthread_name(thr
ead));
set_linux_task_priority(current,prio);
xnshadow_ptd(current) = thread;
/--------------------------------------------------------------------

Current->ptd = thread

---------------------------------------------------------------------/
xnpod_suspend_thread(thread,XNRELAX,XN_INFINITE,NULL);

if (u_completion)
{
xnshadow_signal_completion(u_completion,0);
return 0;
}

/* Nobody waits for us, so we may start the shadow immediately
after having forced the CPU affinity to the current
processor. Note that we don't use smp_processor_id() to prevent
kernel debug stuff to yell at us for calling it in a preemptible
section of code. */

affinity = xnarch_cpumask_of_cpu(rthal_processor_id());
set_cpus_allowed(current, affinity);

mode = thread->rrperiod != XN_INFINITE ? XNRRB : 0;
xnpod_start_thread(thread,mode,0,affinity,NULL,NULL);

return xnshadow_harden();
}

/-----------------------------------------------------------

Gate keeper kernel thread - each thread for one CPU.

-------------------------------------------------------------/

static int gatekeeper_thread (void *data)

{
struct __gatekeeper *gk = (struct __gatekeeper *)data;
struct task_struct *this_task = current;
DECLARE_WAITQUEUE(wait,this_task);
int cpu = gk - &gatekeeper[0];
xnthread_t *thread;
cpumask_t cpumask;
spl_t s;

sigfillset(&this_task->blocked);
cpumask = cpumask_of_cpu(cpu);
set_cpus_allowed(this_task, cpumask);
set_linux_task_priority(this_task,MAX_RT_PRIO-1);

init_waitqueue_head(&gk->waitq);
add_wait_queue_exclusive(&gk->waitq,&wait);

up(&gk->sync); /* Sync with xnshadow_mount(). */

for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
up(&gk->sync); /* Make the request token available. */
schedule();

if (kthread_should_stop())
break;

xnlock_get_irqsave(&nklock, s);

thread = gk->thread;

/* In the very rare case where the requestor has been awaken
by a signal before we have been able to process the
pending request, just ignore the latter. */

if (xnthread_user_task(thread)->state == TASK_INTERRUPTIBLE) {
#ifdef CONFIG_SMP
/* If the task changed its CPU while in secondary mode,
change the CPU of the underlying Xenomai shadow too. We
do not migrate the thread timers here, it would not
work. For a "full" migration comprising timers, using
xnpod_migrate_thread is required. */
thread->sched = xnpod_sched_slot(cpu);
#--endif /* CONFIG_SMP */
xnpod_resume_thread(thread,XNRELAX);
xnpod_renice_root(XNPOD_ROOT_PRIO_BASE);
xnpod_schedule();
}

xnlock_put_irqrestore(&nklock, s);
}

return 0;
}

Thursday, September 07, 2006

FW: system call - cont 2


/* __ipipe_dispatch_event() -- Low-level event dispatcher. */

int fastcall __ipipe_dispatch_event (unsigned event, void *data)
{
struct ipipe_domain *start_domain, *this_domain, *next_domain;
struct list_head *pos, *npos;
unsigned long flags;
ipipe_declare_cpuid;
int propagate = 1;

ipipe_lock_cpu(flags);

start_domain = this_domain = ipipe_percpu_domain[cpuid];

list_for_each_safe(pos,npos,&__ipipe_pipeline) {

next_domain = list_entry(pos,struct
ipipe_domain,p_link);

/*
* Note: Domain migration may occur while running
* event or interrupt handlers, in which case the
* current register set is going to be recycled for a
* different domain than the initiating one. We do
* care for that, always tracking the current domain
* descriptor upon return from those handlers.
*/
if (next_domain->evhand[event] != NULL) {
ipipe_percpu_domain[cpuid] = next_domain;
ipipe_unlock_cpu(flags);
propagate =
!next_domain->evhand[event](event,start_domain,data);
ipipe_lock_cpu(flags);
if (ipipe_percpu_domain[cpuid] != next_domain)
this_domain =
ipipe_percpu_domain[cpuid];
}

if (next_domain != ipipe_root_domain && /* NEVER sync
the root stage here. */
next_domain->cpudata[cpuid].irq_pending_hi != 0 &&

!test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status)) {
ipipe_percpu_domain[cpuid] = next_domain;
__ipipe_sync_pipeline(IPIPE_IRQMASK_ANY);
ipipe_load_cpuid();
if (ipipe_percpu_domain[cpuid] != next_domain)
this_domain =
ipipe_percpu_domain[cpuid];
}

ipipe_percpu_domain[cpuid] = this_domain;

if (next_domain == this_domain || !propagate)
break;
}

ipipe_unlock_cpu(flags);

return !propagate;
}

Ksrc/nucleus/shadow.c

static inline int do_losyscall_event (unsigned event, unsigned domid,
void *data) ---------- For ipipe_root_domain()

static inline int do_hisyscall_event (unsigned event, unsigned domid,
void *data) ---------- For Xenomai domain()

ipipe_walk_pipeline


/* __ipipe_walk_pipeline(): Plays interrupts pending in the log. Must
be called with local hw interrupts disabled. */

void fastcall __ipipe_walk_pipeline(struct list_head *pos, int cpuid)
{
struct ipipe_domain *this_domain = ipipe_percpu_domain[cpuid];

while (pos != &__ipipe_pipeline) {
struct ipipe_domain *next_domain =
list_entry(pos, struct ipipe_domain, p_link);

if (test_bit
(IPIPE_STALL_FLAG,
&next_domain->cpudata[cpuid].status))
break; /* Stalled stage -- do not go further.
*/

if (next_domain->cpudata[cpuid].irq_pending_hi != 0) {

if (next_domain == this_domain)

__ipipe_sync_pipeline(IPIPE_IRQMASK_ANY);
else {
__ipipe_switch_to(this_domain,
next_domain,
cpuid);

ipipe_load_cpuid(); /* Processor
might have changed. */

if (this_domain->cpudata[cpuid].
irq_pending_hi != 0
&& !test_bit(IPIPE_STALL_FLAG,

&this_domain->cpudata[cpuid].status))

__ipipe_sync_pipeline(IPIPE_IRQMASK_ANY);
}

break;
} else if (next_domain == this_domain)
break;

pos = next_domain->p_link.next;
}
}

/* Called with hw interrupts off. */
static inline void __ipipe_switch_to(struct ipipe_domain *out,
struct ipipe_domain *in, int cpuid)
{
void ipipe_suspend_domain(void);

/-----------------------------------------------------------------------
-

Strange statement here - but "ipipe_suspend_domain()" will not be called
------------------------------------------------------------------------
/

/*
* "in" is guaranteed to be closer than "out" from the head of
the
* pipeline (and obviously different).
*/

ipipe_percpu_domain[cpuid] = in;

ipipe_suspend_domain(); /* Sync stage and propagate interrupts.
*/
ipipe_load_cpuid(); /* Processor might have changed. */

if (ipipe_percpu_domain[cpuid] == in)
/*
* Otherwise, something has changed the current domain
under
* our feet recycling the register set; do not override.
*/
ipipe_percpu_domain[cpuid] = out;
}

Blog Archive