--- linux-2.6.11-rc1/fs/proc/array.c 2005-01-14 12:35:55 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/fs/proc/array.c 2005-01-15 11:27:52 +0100 @@ -133,7 +136,8 @@ static const char *task_state_array[] = "T (stopped)", /* 4 */ "T (tracing stop)", /* 8 */ "Z (zombie)", /* 16 */ - "X (dead)" /* 32 */ + "X (dead)", /* 32 */ + "H (on hold)" /* 64 */ }; static inline const char * get_task_state(struct task_struct *tsk) --- linux-2.6.11-rc1/fs/proc/array.c 2005-01-14 12:35:55 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/fs/proc/array.c 2005-01-15 11:27:52 +0100 @@ -142,7 +146,8 @@ static inline const char * get_task_stat TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE | TASK_STOPPED | - TASK_TRACED)) | + TASK_TRACED | + TASK_ONHOLD)) | (tsk->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)); const char **p = &task_state_array[0]; --- linux-2.6.11-rc1/include/linux/sched.h 2005-01-14 12:35:58 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/include/linux/sched.h 2005-01-15 11:27:52 +0100 @@ -110,6 +111,7 @@ extern unsigned long nr_iowait(void); #define TASK_TRACED 8 #define EXIT_ZOMBIE 16 #define EXIT_DEAD 32 +#define TASK_ONHOLD 64 #define __set_task_state(tsk, state_value) \ do { (tsk)->state = (state_value); } while (0) --- linux-2.6.11-rc1/include/linux/vserver/sched.h 1970-01-01 01:00:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/include/linux/vserver/sched.h 2005-01-15 11:27:52 +0100 @@ -0,0 +1,25 @@ +#ifndef _VX_SCHED_H +#define _VX_SCHED_H + +#ifdef __KERNEL__ + +struct timespec; + +void vx_vsi_uptime(struct timespec *, struct timespec *); + + +struct vx_info; + +void vx_update_load(struct vx_info *); + + +struct task_struct; + +int effective_vavavoom(struct task_struct *, int); + +int vx_tokens_recalc(struct vx_info *); + +#endif /* __KERNEL__ */ +#else /* _VX_SCHED_H */ +#warning duplicate inclusion +#endif /* _VX_SCHED_H */ --- linux-2.6.11-rc1/include/linux/vserver/signal.h 1970-01-01 01:00:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/include/linux/vserver/signal.h 2005-01-15 11:27:52 +0100 @@ -0,0 +1,27 @@ +#ifndef _VX_SIGNAL_H +#define _VX_SIGNAL_H + +#include "switch.h" + +/* context signalling */ + +#define VCMD_ctx_kill VC_CMD(PROCTRL, 1, 0) +#define VCMD_wait_exit VC_CMD(EVENT, 99, 0) + +struct vcmd_ctx_kill_v0 { + int32_t pid; + int32_t sig; +}; + +struct vcmd_wait_exit_v0 { + int32_t a; + int32_t b; +}; + +#ifdef __KERNEL__ + +extern int vc_ctx_kill(uint32_t, void __user *); +extern int vc_wait_exit(uint32_t, void __user *); + +#endif /* __KERNEL__ */ +#endif /* _VX_SIGNAL_H */ --- linux-2.6.11-rc1/kernel/exit.c 2005-01-14 12:36:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/kernel/exit.c 2005-01-15 11:27:52 +0100 @@ -226,6 +232,7 @@ void reparent_to_init(void) ptrace_unlink(current); /* Reparent to init */ REMOVE_LINKS(current); + /* FIXME handle vchild_reaper/initpid */ current->parent = child_reaper; current->real_parent = child_reaper; SET_LINKS(current); --- linux-2.6.11-rc1/kernel/exit.c 2005-01-14 12:36:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/kernel/exit.c 2005-01-15 11:27:52 +0100 @@ -590,6 +598,7 @@ static inline void forget_original_paren struct task_struct *p, *reaper = father; struct list_head *_p, *_n; + /* FIXME handle vchild_reaper/initpid */ do { reaper = next_thread(reaper); if (reaper == father) { --- linux-2.6.11-rc1/kernel/sched.c 2005-01-14 12:36:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/kernel/sched.c 2005-01-15 16:26:21 +0100 @@ -234,6 +237,10 @@ struct runqueue { task_t *migration_thread; struct list_head migration_queue; #endif +#ifdef CONFIG_VSERVER_HARDCPU + struct list_head hold_queue; + int idle_tokens; +#endif #ifdef CONFIG_SCHEDSTATS /* latency stats */ --- linux-2.6.11-rc1/kernel/sched.c 2005-01-14 12:36:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/kernel/sched.c 2005-01-15 16:26:21 +0100 @@ -621,6 +628,9 @@ static int effective_prio(task_t *p) bonus = CURRENT_BONUS(p) - MAX_BONUS / 2; prio = p->static_prio - bonus; + if (task_vx_flags(p, VXF_SCHED_PRIO, 0)) + prio += effective_vavavoom(p, MAX_USER_PRIO); + if (prio < MAX_RT_PRIO) prio = MAX_RT_PRIO; if (prio > MAX_PRIO-1) --- linux-2.6.11-rc1/kernel/sched.c 2005-01-14 12:36:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/kernel/sched.c 2005-01-15 16:26:21 +0100 @@ -753,19 +763,27 @@ static void activate_task(task_t *p, run } p->timestamp = now; + vx_activate_task(p); __activate_task(p, rq); } /* * deactivate_task - remove a task from the runqueue. */ -static void deactivate_task(struct task_struct *p, runqueue_t *rq) +static void __deactivate_task(struct task_struct *p, runqueue_t *rq) { rq->nr_running--; dequeue_task(p, p->array); p->array = NULL; } +static inline +void deactivate_task(struct task_struct *p, runqueue_t *rq) +{ + vx_deactivate_task(p); + __deactivate_task(p, rq); +} + /* * resched_task - mark a task 'to be rescheduled now'. * --- linux-2.6.11-rc1/kernel/sched.c 2005-01-14 12:36:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/kernel/sched.c 2005-01-15 16:26:21 +0100 @@ -1100,6 +1118,9 @@ out_activate: * to be considered on this CPU.) */ activate_task(p, rq, cpu == this_cpu); + /* this is to get the accounting behind the load update */ + if (old_state == TASK_UNINTERRUPTIBLE) + vx_uninterruptible_dec(p); if (!sync || cpu != this_cpu) { if (TASK_PREEMPTS_CURR(p, rq)) resched_task(rq->curr); --- linux-2.6.11-rc1/kernel/sched.c 2005-01-14 12:36:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/kernel/sched.c 2005-01-15 16:26:21 +0100 @@ -1220,6 +1241,7 @@ void fastcall wake_up_new_task(task_t * p->prio = effective_prio(p); + vx_activate_task(p); if (likely(cpu == this_cpu)) { if (!(clone_flags & CLONE_VM)) { /* --- linux-2.6.11-rc1/kernel/sched.c 2005-01-14 12:36:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/kernel/sched.c 2005-01-15 16:26:21 +0100 @@ -2410,6 +2432,10 @@ void scheduler_tick(void) if (p == rq->idle) { if (wake_priority_sleeper(rq)) goto out; +#ifdef CONFIG_VSERVER_HARDCPU_IDLE + if (!--rq->idle_tokens && !list_empty(&rq->hold_queue)) + set_need_resched(); +#endif rebalance_tick(cpu, rq, SCHED_IDLE); return; } --- linux-2.6.11-rc1/kernel/sched.c 2005-01-14 12:36:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/kernel/sched.c 2005-01-15 16:26:21 +0100 @@ -2442,7 +2468,7 @@ void scheduler_tick(void) } goto out_unlock; } - if (!--p->time_slice) { + if (vx_need_resched(p)) { dequeue_task(p, rq->active); set_tsk_need_resched(p); p->prio = effective_prio(p); --- linux-2.6.11-rc1/kernel/sched.c 2005-01-14 12:36:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/kernel/sched.c 2005-01-15 16:26:21 +0100 @@ -2658,6 +2684,10 @@ asmlinkage void __sched schedule(void) struct list_head *queue; unsigned long long now; unsigned long run_time; +#ifdef CONFIG_VSERVER_HARDCPU + struct vx_info *vxi; + int maxidle = -HZ; +#endif int cpu, idx; /* --- linux-2.6.11-rc1/kernel/sched.c 2005-01-14 12:36:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/kernel/sched.c 2005-01-15 16:26:21 +0100 @@ -2716,12 +2746,53 @@ need_resched_nonpreemptible: unlikely(signal_pending(prev)))) prev->state = TASK_RUNNING; else { - if (prev->state == TASK_UNINTERRUPTIBLE) + if (prev->state == TASK_UNINTERRUPTIBLE) { rq->nr_uninterruptible++; + vx_uninterruptible_inc(prev); + } deactivate_task(prev, rq); } } +#ifdef CONFIG_VSERVER_HARDCPU + if (!list_empty(&rq->hold_queue)) { + struct list_head *l, *n; + int ret; + + vxi = NULL; + list_for_each_safe(l, n, &rq->hold_queue) { + next = list_entry(l, task_t, run_list); + if (vxi == next->vx_info) + continue; + + vxi = next->vx_info; + ret = vx_tokens_recalc(vxi); + // tokens = vx_tokens_avail(next); + + if (ret > 0) { + list_del(&next->run_list); + next->state &= ~TASK_ONHOLD; + // one less waiting + vx_onhold_dec(vxi); + array = rq->expired; + next->prio = MAX_PRIO-1; + enqueue_task(next, array); + rq->nr_running++; + if (next->static_prio < rq->best_expired_prio) + rq->best_expired_prio = next->static_prio; + + // printk("ˇˇˇ %8lu unhold %p [%d]\n", jiffies, next, next->prio); + break; + } + if ((ret < 0) && (maxidle < ret)) + maxidle = ret; + } + } + rq->idle_tokens = -maxidle; + +pick_next: +#endif + cpu = smp_processor_id(); if (unlikely(!rq->nr_running)) { go_idle: --- linux-2.6.11-rc1/kernel/sched.c 2005-01-14 12:36:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/kernel/sched.c 2005-01-15 16:26:21 +0100 @@ -2770,6 +2841,26 @@ go_idle: queue = array->queue + idx; next = list_entry(queue->next, task_t, run_list); +#ifdef CONFIG_VSERVER_HARDCPU + vxi = next->vx_info; + if (vx_info_flags(vxi, VXF_SCHED_PAUSE|VXF_SCHED_HARD, 0)) { + int ret = vx_tokens_recalc(vxi); + + if (unlikely(ret <= 0)) { + if (ret && (rq->idle_tokens > -ret)) + rq->idle_tokens = -ret; + __deactivate_task(next, rq); + recalc_task_prio(next, now); + // a new one on hold + vx_onhold_inc(vxi); + next->state |= TASK_ONHOLD; + list_add_tail(&next->run_list, &rq->hold_queue); + //printk("ˇˇˇ %8lu hold %p [%d]\n", jiffies, next, next->prio); + goto pick_next; + } + } +#endif + if (!rt_task(next) && next->activated > 0) { unsigned long long delta = now - next->timestamp; --- linux-2.6.11-rc1/kernel/sched.c 2005-01-14 12:36:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/kernel/sched.c 2005-01-15 16:26:21 +0100 @@ -4802,6 +4895,9 @@ void __init sched_init(void) INIT_LIST_HEAD(&rq->migration_queue); #endif atomic_set(&rq->nr_iowait, 0); +#ifdef CONFIG_VSERVER_HARDCPU + INIT_LIST_HEAD(&rq->hold_queue); +#endif for (j = 0; j < 2; j++) { array = rq->arrays + j; --- linux-2.6.11-rc1/kernel/sched.c 2005-01-14 12:36:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/kernel/sched.c 2005-01-15 16:26:21 +0100 @@ -4871,6 +4967,7 @@ void normalize_rt_tasks(void) deactivate_task(p, task_rq(p)); __setscheduler(p, SCHED_NORMAL, 0); if (array) { + vx_activate_task(p); __activate_task(p, task_rq(p)); resched_task(rq->curr); } --- linux-2.6.11-rc1/kernel/signal.c 2005-01-14 12:36:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/kernel/signal.c 2005-01-15 11:27:52 +0100 @@ -631,17 +631,26 @@ static int check_kill_permission(int sig struct task_struct *t) { int error = -EINVAL; + int user; + if (sig < 0 || sig > _NSIG) return error; + + user = (!info || ((unsigned long)info != 1 && + (unsigned long)info != 2 && SI_FROMUSER(info))); + error = -EPERM; - if ((!info || ((unsigned long)info != 1 && - (unsigned long)info != 2 && SI_FROMUSER(info))) - && ((sig != SIGCONT) || + if (user && ((sig != SIGCONT) || (current->signal->session != t->signal->session)) && (current->euid ^ t->suid) && (current->euid ^ t->uid) && (current->uid ^ t->suid) && (current->uid ^ t->uid) && !capable(CAP_KILL)) return error; + + error = -ESRCH; + if (user && !vx_check(vx_task_xid(t), VX_ADMIN|VX_IDENT)) + return error; + return security_task_kill(t, info, sig); } --- linux-2.6.11-rc1/kernel/vserver/sched.c 1970-01-01 01:00:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/kernel/vserver/sched.c 2005-01-15 11:27:52 +0100 @@ -0,0 +1,225 @@ +/* + * linux/kernel/vserver/sched.c + * + * Virtual Server: Scheduler Support + * + * Copyright (C) 2004 Herbert Pötzl + * + * V0.01 adapted Sam Vilains version to 2.6.3 + * V0.02 removed legacy interface + * + */ + +#include +#include +// #include +#include +#include +#include + +#include +#include + + +/* + * recalculate the context's scheduling tokens + * + * ret > 0 : number of tokens available + * ret = 0 : context is paused + * ret < 0 : number of jiffies until new tokens arrive + * + */ +int vx_tokens_recalc(struct vx_info *vxi) +{ + long delta, tokens = 0; + + if (vx_info_flags(vxi, VXF_SCHED_PAUSE, 0)) + /* we are paused */ + return 0; + + delta = jiffies - vxi->sched.jiffies; + + if (delta >= vxi->sched.interval) { + /* lockdown scheduler info */ + spin_lock(&vxi->sched.tokens_lock); + + /* calc integral token part */ + delta = jiffies - vxi->sched.jiffies; + tokens = delta / vxi->sched.interval; + delta = tokens * vxi->sched.interval; + tokens *= vxi->sched.fill_rate; + + atomic_add(tokens, &vxi->sched.tokens); + vxi->sched.jiffies += delta; + tokens = atomic_read(&vxi->sched.tokens); + + if (tokens > vxi->sched.tokens_max) { + tokens = vxi->sched.tokens_max; + atomic_set(&vxi->sched.tokens, tokens); + } + spin_unlock(&vxi->sched.tokens_lock); + } else { + /* no new tokens */ + tokens = vx_tokens_avail(vxi); + if (tokens <= 0) + vxi->vx_state |= VXS_ONHOLD; + if (tokens < vxi->sched.tokens_min) { + /* enough tokens will be available in */ + if (vxi->sched.tokens_min == 0) + return delta - vxi->sched.interval; + return delta - vxi->sched.interval * + vxi->sched.tokens_min / vxi->sched.fill_rate; + } + } + + /* we have some tokens left */ + if (vx_info_state(vxi, VXS_ONHOLD) && + (tokens >= vxi->sched.tokens_min)) + vxi->vx_state &= ~VXS_ONHOLD; + if (vx_info_state(vxi, VXS_ONHOLD)) + tokens -= vxi->sched.tokens_min; + + return tokens; +} + +/* + * effective_prio - return the priority that is based on the static + * priority but is modified by bonuses/penalties. + * + * We scale the actual sleep average [0 .... MAX_SLEEP_AVG] + * into a -4 ... 0 ... +4 bonus/penalty range. + * + * Additionally, we scale another amount based on the number of + * CPU tokens currently held by the context, if the process is + * part of a context (and the appropriate SCHED flag is set). + * This ranges from -5 ... 0 ... +15, quadratically. + * + * So, the total bonus is -9 .. 0 .. +19 + * We use ~50% of the full 0...39 priority range so that: + * + * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs. + * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks. + * unless that context is far exceeding its CPU allocation. + * + * Both properties are important to certain workloads. + */ +int effective_vavavoom(task_t *p, int max_prio) +{ + struct vx_info *vxi = p->vx_info; + int vavavoom, max; + + /* lots of tokens = lots of vavavoom + * no tokens = no vavavoom */ + if ((vavavoom = atomic_read(&vxi->sched.tokens)) >= 0) { + max = vxi->sched.tokens_max; + vavavoom = max - vavavoom; + max = max * max; + vavavoom = max_prio * VAVAVOOM_RATIO / 100 + * (vavavoom*vavavoom - (max >> 2)) / max; + /* alternative, geometric mapping + vavavoom = -( MAX_USER_PRIO*VAVAVOOM_RATIO/100 * vavavoom + / vxi->sched.tokens_max - + MAX_USER_PRIO*VAVAVOOM_RATIO/100/2); */ + } else + vavavoom = 0; + /* vavavoom = ( MAX_USER_PRIO*VAVAVOOM_RATIO/100*tokens_left(p) - + MAX_USER_PRIO*VAVAVOOM_RATIO/100/2); */ + + return vavavoom; +} + + +int vc_set_sched_v2(uint32_t xid, void __user *data) +{ + struct vcmd_set_sched_v2 vc_data; + struct vx_info *vxi; + + if (copy_from_user (&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + vxi = locate_vx_info(xid); + if (!vxi) + return -EINVAL; + + spin_lock(&vxi->sched.tokens_lock); + + if (vc_data.interval != SCHED_KEEP) + vxi->sched.interval = vc_data.interval; + if (vc_data.fill_rate != SCHED_KEEP) + vxi->sched.fill_rate = vc_data.fill_rate; + if (vc_data.tokens_min != SCHED_KEEP) + vxi->sched.tokens_min = vc_data.tokens_min; + if (vc_data.tokens_max != SCHED_KEEP) + vxi->sched.tokens_max = vc_data.tokens_max; + if (vc_data.tokens != SCHED_KEEP) + atomic_set(&vxi->sched.tokens, vc_data.tokens); + + /* Sanity check the resultant values */ + if (vxi->sched.fill_rate <= 0) + vxi->sched.fill_rate = 1; + if (vxi->sched.interval <= 0) + vxi->sched.interval = HZ; + if (vxi->sched.tokens_max == 0) + vxi->sched.tokens_max = 1; + if (atomic_read(&vxi->sched.tokens) > vxi->sched.tokens_max) + atomic_set(&vxi->sched.tokens, vxi->sched.tokens_max); + if (vxi->sched.tokens_min > vxi->sched.tokens_max) + vxi->sched.tokens_min = vxi->sched.tokens_max; + + spin_unlock(&vxi->sched.tokens_lock); + put_vx_info(vxi); + return 0; +} + + +int vc_set_sched(uint32_t xid, void __user *data) +{ + struct vcmd_set_sched_v3 vc_data; + struct vx_info *vxi; + unsigned int set_mask; + + if (copy_from_user (&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + vxi = locate_vx_info(xid); + if (!vxi) + return -EINVAL; + + set_mask = vc_data.set_mask; + + spin_lock(&vxi->sched.tokens_lock); + + if (set_mask & VXSM_FILL_RATE) + vxi->sched.fill_rate = vc_data.fill_rate; + if (set_mask & VXSM_INTERVAL) + vxi->sched.interval = vc_data.interval; + if (set_mask & VXSM_TOKENS) + atomic_set(&vxi->sched.tokens, vc_data.tokens); + if (set_mask & VXSM_TOKENS_MIN) + vxi->sched.tokens_min = vc_data.tokens_min; + if (set_mask & VXSM_TOKENS_MAX) + vxi->sched.tokens_max = vc_data.tokens_max; + if (set_mask & VXSM_PRIO_BIAS) + vxi->sched.priority_bias = vc_data.priority_bias; + + /* Sanity check the resultant values */ + if (vxi->sched.fill_rate <= 0) + vxi->sched.fill_rate = 1; + if (vxi->sched.interval <= 0) + vxi->sched.interval = HZ; + if (vxi->sched.tokens_max == 0) + vxi->sched.tokens_max = 1; + if (atomic_read(&vxi->sched.tokens) > vxi->sched.tokens_max) + atomic_set(&vxi->sched.tokens, vxi->sched.tokens_max); + if (vxi->sched.tokens_min > vxi->sched.tokens_max) + vxi->sched.tokens_min = vxi->sched.tokens_max; + if (vxi->sched.priority_bias > MAX_PRIO_BIAS) + vxi->sched.priority_bias = MAX_PRIO_BIAS; + if (vxi->sched.priority_bias < MIN_PRIO_BIAS) + vxi->sched.priority_bias = MIN_PRIO_BIAS; + + spin_unlock(&vxi->sched.tokens_lock); + put_vx_info(vxi); + return 0; +} + --- linux-2.6.11-rc1/kernel/vserver/signal.c 1970-01-01 01:00:00 +0100 +++ linux-2.6.11-rc1-vs1.9.4-rc2/kernel/vserver/signal.c 2005-01-15 11:27:52 +0100 @@ -0,0 +1,126 @@ +/* + * linux/kernel/vserver/signal.c + * + * Virtual Server: Signal Support + * + * Copyright (C) 2003-2004 Herbert Pötzl + * + * V0.01 broken out from vcontext V0.05 + * + */ + +#include +#include + +#include +#include + +#include +#include + + +int vc_ctx_kill(uint32_t id, void __user *data) +{ + int retval, count=0; + struct vcmd_ctx_kill_v0 vc_data; + struct siginfo info; + struct task_struct *p; + struct vx_info *vxi; + + if (!vx_check(0, VX_ADMIN)) + return -ENOSYS; + if (copy_from_user (&vc_data, data, sizeof(vc_data))) + return -EFAULT; + + info.si_signo = vc_data.sig; + info.si_errno = 0; + info.si_code = SI_USER; + info.si_pid = current->pid; + info.si_uid = current->uid; + + vxi = locate_vx_info(id); + if (!vxi) + return -ESRCH; + + retval = -ESRCH; + read_lock(&tasklist_lock); + switch (vc_data.pid) { + case -1: + case 0: + for_each_process(p) { + int err = 0; + + if (vx_task_xid(p) != id || p->pid <= 1 || + (vc_data.pid && vxi->vx_initpid == p->pid) || + !thread_group_leader(p)) + continue; + + err = send_sig_info(vc_data.sig, &info, p); + ++count; + if (err != -EPERM) + retval = err; + } + break; + + default: + p = find_task_by_real_pid(vc_data.pid); + if (p) { + if (!thread_group_leader(p)) { + struct task_struct *tg; + + tg = find_task_by_real_pid(p->tgid); + if (tg) + p = tg; + } + if ((id == -1) || (vx_task_xid(p) == id)) + retval = send_sig_info(vc_data.sig, &info, p); + } + break; + } + read_unlock(&tasklist_lock); + put_vx_info(vxi); + return retval; +} + + +static int __wait_exit(struct vx_info *vxi) +{ + DECLARE_WAITQUEUE(wait, current); + int ret = 0; + + add_wait_queue(&vxi->vx_exit, &wait); + set_current_state(TASK_INTERRUPTIBLE); + +wait: + if (vx_info_state(vxi, VXS_DEFUNCT)) + goto out; + if (signal_pending(current)) { + ret = -ERESTARTSYS; + goto out; + } + schedule(); + goto wait; + +out: + set_current_state(TASK_RUNNING); + remove_wait_queue(&vxi->vx_exit, &wait); + return ret; +} + + + +int vc_wait_exit(uint32_t id, void __user *data) +{ +// struct vcmd_wait_exit_v0 vc_data; + struct vx_info *vxi; + int ret; + + vxi = locate_vx_info(id); + if (!vxi) + return -ESRCH; + + ret = __wait_exit(vxi); + put_vx_info(vxi); + return ret; +} +