Monday, September 11, 2006

switch between primary and secondary mode - 1

Include/linux/sched.h

struct task_struct {
... ...
#ifdef CONFIG_IPIPE
void *ptd[IPIPE_ROOT_NPTDKEYS];
#==endif
}

Include/asm-generic/syscall.h
extern int nkgkptd;

#define xnshadow_ptd(t) ((t)->ptd[nkgkptd])
#define xnshadow_thread(t) ((xnthread_t *)xnshadow_ptd(t))

Ksrc/nucleus/shadow.c

/* Create RT context for "current" */

int xnshadow_map (xnthread_t *thread,
xncompletion_t __user *u_completion)
{
xnarch_cpumask_t affinity;
unsigned muxid, magic;
int mode, prio;

prio = xnthread_base_priority(thread);

if (prio < 1 || prio >= MAX_RT_PRIO)
return -EINVAL;

/* Increment the interface reference count. */
magic = xnthread_get_magic(thread);

for (muxid = 0; muxid < XENOMAI_MUX_NR; muxid++)
{
if (muxtable[muxid].magic == magic)
{
xnarch_atomic_inc(&muxtable[muxid].refcnt);
break;
}
}

xnltt_log_event(xeno_ev_shadowmap,
thread->name,
current->pid,
prio);

current->cap_effective |=
CAP_TO_MASK(CAP_IPC_LOCK)|
CAP_TO_MASK(CAP_SYS_RAWIO)|
CAP_TO_MASK(CAP_SYS_NICE);


xnarch_init_shadow_tcb(xnthread_archtcb(thread),thread,xnthread_name(thr
ead));
set_linux_task_priority(current,prio);
xnshadow_ptd(current) = thread;
/--------------------------------------------------------------------

Current->ptd = thread

---------------------------------------------------------------------/
xnpod_suspend_thread(thread,XNRELAX,XN_INFINITE,NULL);

if (u_completion)
{
xnshadow_signal_completion(u_completion,0);
return 0;
}

/* Nobody waits for us, so we may start the shadow immediately
after having forced the CPU affinity to the current
processor. Note that we don't use smp_processor_id() to prevent
kernel debug stuff to yell at us for calling it in a preemptible
section of code. */

affinity = xnarch_cpumask_of_cpu(rthal_processor_id());
set_cpus_allowed(current, affinity);

mode = thread->rrperiod != XN_INFINITE ? XNRRB : 0;
xnpod_start_thread(thread,mode,0,affinity,NULL,NULL);

return xnshadow_harden();
}

/-----------------------------------------------------------

Gate keeper kernel thread - each thread for one CPU.

-------------------------------------------------------------/

static int gatekeeper_thread (void *data)

{
struct __gatekeeper *gk = (struct __gatekeeper *)data;
struct task_struct *this_task = current;
DECLARE_WAITQUEUE(wait,this_task);
int cpu = gk - &gatekeeper[0];
xnthread_t *thread;
cpumask_t cpumask;
spl_t s;

sigfillset(&this_task->blocked);
cpumask = cpumask_of_cpu(cpu);
set_cpus_allowed(this_task, cpumask);
set_linux_task_priority(this_task,MAX_RT_PRIO-1);

init_waitqueue_head(&gk->waitq);
add_wait_queue_exclusive(&gk->waitq,&wait);

up(&gk->sync); /* Sync with xnshadow_mount(). */

for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
up(&gk->sync); /* Make the request token available. */
schedule();

if (kthread_should_stop())
break;

xnlock_get_irqsave(&nklock, s);

thread = gk->thread;

/* In the very rare case where the requestor has been awaken
by a signal before we have been able to process the
pending request, just ignore the latter. */

if (xnthread_user_task(thread)->state == TASK_INTERRUPTIBLE) {
#ifdef CONFIG_SMP
/* If the task changed its CPU while in secondary mode,
change the CPU of the underlying Xenomai shadow too. We
do not migrate the thread timers here, it would not
work. For a "full" migration comprising timers, using
xnpod_migrate_thread is required. */
thread->sched = xnpod_sched_slot(cpu);
#--endif /* CONFIG_SMP */
xnpod_resume_thread(thread,XNRELAX);
xnpod_renice_root(XNPOD_ROOT_PRIO_BASE);
xnpod_schedule();
}

xnlock_put_irqrestore(&nklock, s);
}

return 0;
}

Blog Archive