diff -NurpP linux-2.6.36-vs2.3.0.36.37.0/include/linux/vs_sched.h linux-2.6.36-vs2.3.0.36.37.4/include/linux/vs_sched.h --- linux-2.6.36-vs2.3.0.36.37.0/include/linux/vs_sched.h 2010-10-21 13:09:36.000000000 +0200 +++ linux-2.6.36-vs2.3.0.36.37.4/include/linux/vs_sched.h 2010-10-24 15:13:57.000000000 +0200 @@ -6,76 +6,9 @@ #include "vserver/sched.h" -#define VAVAVOOM_RATIO 50 - #define MAX_PRIO_BIAS 20 #define MIN_PRIO_BIAS -20 - -#ifdef CONFIG_VSERVER_HARDCPU - -/* - * effective_prio - return the priority that is based on the static - * priority but is modified by bonuses/penalties. - * - * We scale the actual sleep average [0 .... MAX_SLEEP_AVG] - * into a -4 ... 0 ... +4 bonus/penalty range. - * - * Additionally, we scale another amount based on the number of - * CPU tokens currently held by the context, if the process is - * part of a context (and the appropriate SCHED flag is set). - * This ranges from -5 ... 0 ... +15, quadratically. - * - * So, the total bonus is -9 .. 0 .. +19 - * We use ~50% of the full 0...39 priority range so that: - * - * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs. - * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks. - * unless that context is far exceeding its CPU allocation. - * - * Both properties are important to certain workloads. - */ -static inline -int vx_effective_vavavoom(struct _vx_sched_pc *sched_pc, int max_prio) -{ - int vavavoom, max; - - /* lots of tokens = lots of vavavoom - * no tokens = no vavavoom */ - if ((vavavoom = sched_pc->tokens) >= 0) { - max = sched_pc->tokens_max; - vavavoom = max - vavavoom; - max = max * max; - vavavoom = max_prio * VAVAVOOM_RATIO / 100 - * (vavavoom*vavavoom - (max >> 2)) / max; - return vavavoom; - } - return 0; -} - - -static inline -int vx_adjust_prio(struct task_struct *p, int prio, int max_user) -{ - struct vx_info *vxi = p->vx_info; - struct _vx_sched_pc *sched_pc; - - if (!vxi) - return prio; - - sched_pc = &vx_cpu(vxi, sched_pc); - if (vx_info_flags(vxi, VXF_SCHED_PRIO, 0)) { - int vavavoom = vx_effective_vavavoom(sched_pc, max_user); - - sched_pc->vavavoom = vavavoom; - prio += vavavoom; - } - prio += sched_pc->prio_bias; - return prio; -} - -#else /* !CONFIG_VSERVER_HARDCPU */ - static inline int vx_adjust_prio(struct task_struct *p, int prio, int max_user) { @@ -86,9 +19,6 @@ int vx_adjust_prio(struct task_struct *p return prio; } -#endif /* CONFIG_VSERVER_HARDCPU */ - - static inline void vx_account_user(struct vx_info *vxi, cputime_t cputime, int nice) { diff -NurpP linux-2.6.36-vs2.3.0.36.37.0/include/linux/vserver/sched.h linux-2.6.36-vs2.3.0.36.37.4/include/linux/vserver/sched.h --- linux-2.6.36-vs2.3.0.36.37.0/include/linux/vserver/sched.h 2010-10-21 13:09:36.000000000 +0200 +++ linux-2.6.36-vs2.3.0.36.37.4/include/linux/vserver/sched.h 2010-10-28 18:16:35.000000000 +0200 @@ -14,9 +14,6 @@ struct vx_info; void vx_update_load(struct vx_info *); -int vx_tokens_recalc(struct _vx_sched_pc *, - unsigned long *, unsigned long *, int [2]); - void vx_update_sched_param(struct _vx_sched *sched, struct _vx_sched_pc *sched_pc); diff -NurpP linux-2.6.36-vs2.3.0.36.37.0/include/linux/vserver/sched_cmd.h linux-2.6.36-vs2.3.0.36.37.4/include/linux/vserver/sched_cmd.h --- linux-2.6.36-vs2.3.0.36.37.0/include/linux/vserver/sched_cmd.h 2010-10-21 13:09:36.000000000 +0200 +++ linux-2.6.36-vs2.3.0.36.37.4/include/linux/vserver/sched_cmd.h 2010-10-28 19:02:11.000000000 +0200 @@ -2,107 +2,20 @@ #define _VX_SCHED_CMD_H -/* sched vserver commands */ - -#define VCMD_set_sched_v2 VC_CMD(SCHED, 1, 2) -#define VCMD_set_sched_v3 VC_CMD(SCHED, 1, 3) -#define VCMD_set_sched_v4 VC_CMD(SCHED, 1, 4) - -struct vcmd_set_sched_v2 { - int32_t fill_rate; - int32_t interval; - int32_t tokens; - int32_t tokens_min; - int32_t tokens_max; - uint64_t cpu_mask; -}; - -struct vcmd_set_sched_v3 { - uint32_t set_mask; - int32_t fill_rate; - int32_t interval; - int32_t tokens; - int32_t tokens_min; - int32_t tokens_max; - int32_t priority_bias; -}; - -struct vcmd_set_sched_v4 { - uint32_t set_mask; - int32_t fill_rate; - int32_t interval; - int32_t tokens; - int32_t tokens_min; - int32_t tokens_max; - int32_t prio_bias; +struct vcmd_prio_bias { int32_t cpu_id; - int32_t bucket_id; -}; - -#define VCMD_set_sched VC_CMD(SCHED, 1, 5) -#define VCMD_get_sched VC_CMD(SCHED, 2, 5) - -struct vcmd_sched_v5 { - uint32_t mask; - int32_t cpu_id; - int32_t bucket_id; - int32_t fill_rate[2]; - int32_t interval[2]; - int32_t tokens; - int32_t tokens_min; - int32_t tokens_max; int32_t prio_bias; }; -#define VXSM_FILL_RATE 0x0001 -#define VXSM_INTERVAL 0x0002 -#define VXSM_FILL_RATE2 0x0004 -#define VXSM_INTERVAL2 0x0008 -#define VXSM_TOKENS 0x0010 -#define VXSM_TOKENS_MIN 0x0020 -#define VXSM_TOKENS_MAX 0x0040 -#define VXSM_PRIO_BIAS 0x0100 - -#define VXSM_IDLE_TIME 0x0200 -#define VXSM_FORCE 0x0400 - -#define VXSM_V3_MASK 0x0173 -#define VXSM_SET_MASK 0x01FF - -#define VXSM_CPU_ID 0x1000 -#define VXSM_BUCKET_ID 0x2000 - -#define VXSM_MSEC 0x4000 - -#define SCHED_KEEP (-2) /* only for v2 */ +#define VCMD_set_prio_bias VC_CMD(SCHED, 4, 0) +#define VCMD_get_prio_bias VC_CMD(SCHED, 5, 0) #ifdef __KERNEL__ #include -extern int vc_set_sched_v2(struct vx_info *, void __user *); -extern int vc_set_sched_v3(struct vx_info *, void __user *); -extern int vc_set_sched_v4(struct vx_info *, void __user *); -extern int vc_set_sched(struct vx_info *, void __user *); -extern int vc_get_sched(struct vx_info *, void __user *); - -#endif /* __KERNEL__ */ - -#define VCMD_sched_info VC_CMD(SCHED, 3, 0) - -struct vcmd_sched_info { - int32_t cpu_id; - int32_t bucket_id; - uint64_t user_msec; - uint64_t sys_msec; - uint64_t hold_msec; - uint32_t token_usec; - int32_t vavavoom; -}; - -#ifdef __KERNEL__ - -extern int vc_sched_info(struct vx_info *, void __user *); +extern int vc_set_prio_bias(struct vx_info *, void __user *); +extern int vc_get_prio_bias(struct vx_info *, void __user *); #endif /* __KERNEL__ */ #endif /* _VX_SCHED_CMD_H */ diff -NurpP linux-2.6.36-vs2.3.0.36.37.0/include/linux/vserver/sched_def.h linux-2.6.36-vs2.3.0.36.37.4/include/linux/vserver/sched_def.h --- linux-2.6.36-vs2.3.0.36.37.0/include/linux/vserver/sched_def.h 2010-10-21 13:09:36.000000000 +0200 +++ linux-2.6.36-vs2.3.0.36.37.4/include/linux/vserver/sched_def.h 2010-10-28 18:13:49.000000000 +0200 @@ -11,36 +11,13 @@ /* context sub struct */ struct _vx_sched { - spinlock_t tokens_lock; /* lock for token bucket */ - - int tokens; /* number of CPU tokens */ - int fill_rate[2]; /* Fill rate: add X tokens... */ - int interval[2]; /* Divisor: per Y jiffies */ - int tokens_min; /* Limit: minimum for unhold */ - int tokens_max; /* Limit: no more than N tokens */ - int prio_bias; /* bias offset for priority */ - unsigned update_mask; /* which features should be updated */ cpumask_t update; /* CPUs which should update */ }; struct _vx_sched_pc { - int tokens; /* number of CPU tokens */ - int flags; /* bucket flags */ - - int fill_rate[2]; /* Fill rate: add X tokens... */ - int interval[2]; /* Divisor: per Y jiffies */ - int tokens_min; /* Limit: minimum for unhold */ - int tokens_max; /* Limit: no more than N tokens */ - int prio_bias; /* bias offset for priority */ - int vavavoom; /* last calculated vavavoom */ - - unsigned long norm_time; /* last time accounted */ - unsigned long idle_time; /* non linear time for fair sched */ - unsigned long token_time; /* token time for accounting */ - unsigned long onhold; /* jiffies when put on hold */ uint64_t user_ticks; /* token tick events */ uint64_t sys_ticks; /* token tick events */ @@ -48,18 +25,11 @@ struct _vx_sched_pc { }; -#define VXSF_ONHOLD 0x0001 -#define VXSF_IDLE_TIME 0x0100 - #ifdef CONFIG_VSERVER_DEBUG static inline void __dump_vx_sched(struct _vx_sched *sched) { printk("\t_vx_sched:\n"); - printk("\t tokens: %4d/%4d, %4d/%4d, %4d, %4d\n", - sched->fill_rate[0], sched->interval[0], - sched->fill_rate[1], sched->interval[1], - sched->tokens_min, sched->tokens_max); printk("\t priority = %4d\n", sched->prio_bias); } diff -NurpP linux-2.6.36-vs2.3.0.36.37.0/kernel/vserver/sched.c linux-2.6.36-vs2.3.0.36.37.4/kernel/vserver/sched.c --- linux-2.6.36-vs2.3.0.36.37.0/kernel/vserver/sched.c 2010-10-21 13:09:36.000000000 +0200 +++ linux-2.6.36-vs2.3.0.36.37.4/kernel/vserver/sched.c 2010-10-28 19:13:19.000000000 +0200 @@ -3,12 +3,13 @@ * * Virtual Server: Scheduler Support * - * Copyright (C) 2004-2007 Herbert Pötzl + * Copyright (C) 2004-2010 Herbert Pötzl * * V0.01 adapted Sam Vilains version to 2.6.3 * V0.02 removed legacy interface * V0.03 changed vcmds to vxi arg * V0.04 removed older and legacy interfaces + * V0.05 removed scheduler code/commands * */ @@ -19,393 +20,60 @@ #include -#define vxd_check_range(val, min, max) do { \ - vxlprintk((val < min) || (val > max), \ - "check_range(%ld,%ld,%ld)", \ - (long)val, (long)min, (long)max, \ - __FILE__, __LINE__); \ - } while (0) - - void vx_update_sched_param(struct _vx_sched *sched, struct _vx_sched_pc *sched_pc) { - unsigned int set_mask = sched->update_mask; - - if (set_mask & VXSM_FILL_RATE) - sched_pc->fill_rate[0] = sched->fill_rate[0]; - if (set_mask & VXSM_INTERVAL) - sched_pc->interval[0] = sched->interval[0]; - if (set_mask & VXSM_FILL_RATE2) - sched_pc->fill_rate[1] = sched->fill_rate[1]; - if (set_mask & VXSM_INTERVAL2) - sched_pc->interval[1] = sched->interval[1]; - if (set_mask & VXSM_TOKENS) - sched_pc->tokens = sched->tokens; - if (set_mask & VXSM_TOKENS_MIN) - sched_pc->tokens_min = sched->tokens_min; - if (set_mask & VXSM_TOKENS_MAX) - sched_pc->tokens_max = sched->tokens_max; - if (set_mask & VXSM_PRIO_BIAS) - sched_pc->prio_bias = sched->prio_bias; - - if (set_mask & VXSM_IDLE_TIME) - sched_pc->flags |= VXSF_IDLE_TIME; - else - sched_pc->flags &= ~VXSF_IDLE_TIME; - - /* reset time */ - sched_pc->norm_time = jiffies; -} - - -/* - * recalculate the context's scheduling tokens - * - * ret > 0 : number of tokens available - * ret < 0 : on hold, check delta_min[] - * -1 only jiffies - * -2 also idle time - * - */ -int vx_tokens_recalc(struct _vx_sched_pc *sched_pc, - unsigned long *norm_time, unsigned long *idle_time, int delta_min[2]) -{ - long delta; - long tokens = 0; - int flags = sched_pc->flags; - - /* how much time did pass? */ - delta = *norm_time - sched_pc->norm_time; - // printk("@ %ld, %ld, %ld\n", *norm_time, sched_pc->norm_time, jiffies); - vxd_check_range(delta, 0, INT_MAX); - - if (delta >= sched_pc->interval[0]) { - long tokens, integral; - - /* calc integral token part */ - tokens = delta / sched_pc->interval[0]; - integral = tokens * sched_pc->interval[0]; - tokens *= sched_pc->fill_rate[0]; -#ifdef CONFIG_VSERVER_HARDCPU - delta_min[0] = delta - integral; - vxd_check_range(delta_min[0], 0, sched_pc->interval[0]); -#endif - /* advance time */ - sched_pc->norm_time += delta; - - /* add tokens */ - sched_pc->tokens += tokens; - sched_pc->token_time += tokens; - } else - delta_min[0] = delta; - -#ifdef CONFIG_VSERVER_IDLETIME - if (!(flags & VXSF_IDLE_TIME)) - goto skip_idle; - - /* how much was the idle skip? */ - delta = *idle_time - sched_pc->idle_time; - vxd_check_range(delta, 0, INT_MAX); - - if (delta >= sched_pc->interval[1]) { - long tokens, integral; - - /* calc fair share token part */ - tokens = delta / sched_pc->interval[1]; - integral = tokens * sched_pc->interval[1]; - tokens *= sched_pc->fill_rate[1]; - delta_min[1] = delta - integral; - vxd_check_range(delta_min[1], 0, sched_pc->interval[1]); - - /* advance idle time */ - sched_pc->idle_time += integral; - - /* add tokens */ - sched_pc->tokens += tokens; - sched_pc->token_time += tokens; - } else - delta_min[1] = delta; -skip_idle: -#endif - - /* clip at maximum */ - if (sched_pc->tokens > sched_pc->tokens_max) - sched_pc->tokens = sched_pc->tokens_max; - tokens = sched_pc->tokens; - - if ((flags & VXSF_ONHOLD)) { - /* can we unhold? */ - if (tokens >= sched_pc->tokens_min) { - flags &= ~VXSF_ONHOLD; - sched_pc->hold_ticks += - *norm_time - sched_pc->onhold; - } else - goto on_hold; - } else { - /* put on hold? */ - if (tokens <= 0) { - flags |= VXSF_ONHOLD; - sched_pc->onhold = *norm_time; - goto on_hold; - } - } - sched_pc->flags = flags; - return tokens; - -on_hold: - tokens = sched_pc->tokens_min - tokens; - sched_pc->flags = flags; - // BUG_ON(tokens < 0); probably doesn't hold anymore - -#ifdef CONFIG_VSERVER_HARDCPU - /* next interval? */ - if (!sched_pc->fill_rate[0]) - delta_min[0] = HZ; - else if (tokens > sched_pc->fill_rate[0]) - delta_min[0] += sched_pc->interval[0] * - tokens / sched_pc->fill_rate[0]; - else - delta_min[0] = sched_pc->interval[0] - delta_min[0]; - vxd_check_range(delta_min[0], 0, INT_MAX); - -#ifdef CONFIG_VSERVER_IDLETIME - if (!(flags & VXSF_IDLE_TIME)) - return -1; - - /* next interval? */ - if (!sched_pc->fill_rate[1]) - delta_min[1] = HZ; - else if (tokens > sched_pc->fill_rate[1]) - delta_min[1] += sched_pc->interval[1] * - tokens / sched_pc->fill_rate[1]; - else - delta_min[1] = sched_pc->interval[1] - delta_min[1]; - vxd_check_range(delta_min[1], 0, INT_MAX); - - return -2; -#else - return -1; -#endif /* CONFIG_VSERVER_IDLETIME */ -#else - return 0; -#endif /* CONFIG_VSERVER_HARDCPU */ -} - -static inline unsigned long msec_to_ticks(unsigned long msec) -{ - return msecs_to_jiffies(msec); + sched_pc->prio_bias = sched->prio_bias; } -static inline unsigned long ticks_to_msec(unsigned long ticks) +static int do_set_prio_bias(struct vx_info *vxi, struct vcmd_prio_bias *data) { - return jiffies_to_msecs(ticks); -} - -static inline unsigned long ticks_to_usec(unsigned long ticks) -{ - return jiffies_to_usecs(ticks); -} - - -static int do_set_sched(struct vx_info *vxi, struct vcmd_sched_v5 *data) -{ - unsigned int set_mask = data->mask; - unsigned int update_mask; - int i, cpu; - - /* Sanity check data values */ - if (data->tokens_max <= 0) - data->tokens_max = HZ; - if (data->tokens_min < 0) - data->tokens_min = HZ / 3; - if (data->tokens_min >= data->tokens_max) - data->tokens_min = data->tokens_max; + int cpu; if (data->prio_bias > MAX_PRIO_BIAS) data->prio_bias = MAX_PRIO_BIAS; if (data->prio_bias < MIN_PRIO_BIAS) data->prio_bias = MIN_PRIO_BIAS; - spin_lock(&vxi->sched.tokens_lock); - - /* sync up on delayed updates */ - for_each_cpu_mask(cpu, vxi->sched.update) - vx_update_sched_param(&vxi->sched, - &vx_per_cpu(vxi, sched_pc, cpu)); - - if (set_mask & VXSM_FILL_RATE) - vxi->sched.fill_rate[0] = data->fill_rate[0]; - if (set_mask & VXSM_FILL_RATE2) - vxi->sched.fill_rate[1] = data->fill_rate[1]; - if (set_mask & VXSM_INTERVAL) - vxi->sched.interval[0] = (set_mask & VXSM_MSEC) ? - msec_to_ticks(data->interval[0]) : data->interval[0]; - if (set_mask & VXSM_INTERVAL2) - vxi->sched.interval[1] = (set_mask & VXSM_MSEC) ? - msec_to_ticks(data->interval[1]) : data->interval[1]; - if (set_mask & VXSM_TOKENS) - vxi->sched.tokens = data->tokens; - if (set_mask & VXSM_TOKENS_MIN) - vxi->sched.tokens_min = data->tokens_min; - if (set_mask & VXSM_TOKENS_MAX) - vxi->sched.tokens_max = data->tokens_max; - if (set_mask & VXSM_PRIO_BIAS) - vxi->sched.prio_bias = data->prio_bias; - - /* Sanity check rate/interval */ - for (i = 0; i < 2; i++) { - if (data->fill_rate[i] < 0) - data->fill_rate[i] = 0; - if (data->interval[i] <= 0) - data->interval[i] = HZ; - } - - update_mask = vxi->sched.update_mask & VXSM_SET_MASK; - update_mask |= (set_mask & (VXSM_SET_MASK | VXSM_IDLE_TIME)); - vxi->sched.update_mask = update_mask; - -#ifdef CONFIG_SMP - rmb(); - if (set_mask & VXSM_CPU_ID) { + if (data->cpu_id != ~0) { vxi->sched.update = cpumask_of_cpu(data->cpu_id); cpus_and(vxi->sched.update, cpu_online_map, vxi->sched.update); } else vxi->sched.update = cpu_online_map; - /* forced reload? */ - if (set_mask & VXSM_FORCE) { - for_each_cpu_mask(cpu, vxi->sched.update) - vx_update_sched_param(&vxi->sched, - &vx_per_cpu(vxi, sched_pc, cpu)); - vxi->sched.update = CPU_MASK_NONE; - } -#else - /* on UP we update immediately */ - vx_update_sched_param(&vxi->sched, - &vx_per_cpu(vxi, sched_pc, 0)); -#endif - - spin_unlock(&vxi->sched.tokens_lock); + for_each_cpu_mask(cpu, vxi->sched.update) + vx_update_sched_param(&vxi->sched, + &vx_per_cpu(vxi, sched_pc, cpu)); return 0; } - -#define COPY_IDS(C) C(cpu_id); C(bucket_id) -#define COPY_PRI(C) C(prio_bias) -#define COPY_TOK(C) C(tokens); C(tokens_min); C(tokens_max) -#define COPY_FRI(C) C(fill_rate[0]); C(interval[0]); \ - C(fill_rate[1]); C(interval[1]); - -#define COPY_VALUE(name) vc_data.name = data->name - -static int do_set_sched_v4(struct vx_info *vxi, struct vcmd_set_sched_v4 *data) +int vc_set_prio_bias(struct vx_info *vxi, void __user *data) { - struct vcmd_sched_v5 vc_data; - - vc_data.mask = data->set_mask; - COPY_IDS(COPY_VALUE); - COPY_PRI(COPY_VALUE); - COPY_TOK(COPY_VALUE); - vc_data.fill_rate[0] = vc_data.fill_rate[1] = data->fill_rate; - vc_data.interval[0] = vc_data.interval[1] = data->interval; - return do_set_sched(vxi, &vc_data); -} - -int vc_set_sched_v4(struct vx_info *vxi, void __user *data) -{ - struct vcmd_set_sched_v4 vc_data; + struct vcmd_prio_bias vc_data; if (copy_from_user(&vc_data, data, sizeof(vc_data))) return -EFAULT; - return do_set_sched_v4(vxi, &vc_data); + return do_set_prio_bias(vxi, &vc_data); } - /* latest interface is v5 */ - -int vc_set_sched(struct vx_info *vxi, void __user *data) +int vc_get_prio_bias(struct vx_info *vxi, void __user *data) { - struct vcmd_sched_v5 vc_data; - - if (copy_from_user(&vc_data, data, sizeof(vc_data))) - return -EFAULT; - - return do_set_sched(vxi, &vc_data); -} - - -#define COPY_PRI(C) C(prio_bias) -#define COPY_TOK(C) C(tokens); C(tokens_min); C(tokens_max) -#define COPY_FRI(C) C(fill_rate[0]); C(interval[0]); \ - C(fill_rate[1]); C(interval[1]); - -#define COPY_VALUE(name) vc_data.name = data->name - - -int vc_get_sched(struct vx_info *vxi, void __user *data) -{ - struct vcmd_sched_v5 vc_data; - - if (copy_from_user(&vc_data, data, sizeof(vc_data))) - return -EFAULT; - - if (vc_data.mask & VXSM_CPU_ID) { - int cpu = vc_data.cpu_id; - struct _vx_sched_pc *data; - - if (!cpu_possible(cpu)) - return -EINVAL; - - data = &vx_per_cpu(vxi, sched_pc, cpu); - COPY_TOK(COPY_VALUE); - COPY_PRI(COPY_VALUE); - COPY_FRI(COPY_VALUE); - - if (data->flags & VXSF_IDLE_TIME) - vc_data.mask |= VXSM_IDLE_TIME; - } else { - struct _vx_sched *data = &vxi->sched; - - COPY_TOK(COPY_VALUE); - COPY_PRI(COPY_VALUE); - COPY_FRI(COPY_VALUE); - } - - if (vc_data.mask & VXSM_MSEC) { - vc_data.interval[0] = ticks_to_msec(vc_data.interval[0]); - vc_data.interval[1] = ticks_to_msec(vc_data.interval[1]); - } - - if (copy_to_user(data, &vc_data, sizeof(vc_data))) - return -EFAULT; - return 0; -} - - -int vc_sched_info(struct vx_info *vxi, void __user *data) -{ - struct vcmd_sched_info vc_data; + struct vcmd_prio_bias vc_data; + struct _vx_sched_pc *pcd; int cpu; if (copy_from_user(&vc_data, data, sizeof(vc_data))) return -EFAULT; cpu = vc_data.cpu_id; + if (!cpu_possible(cpu)) return -EINVAL; - if (vxi) { - struct _vx_sched_pc *sched_pc = - &vx_per_cpu(vxi, sched_pc, cpu); - - vc_data.user_msec = ticks_to_msec(sched_pc->user_ticks); - vc_data.sys_msec = ticks_to_msec(sched_pc->sys_ticks); - vc_data.hold_msec = ticks_to_msec(sched_pc->hold_ticks); - vc_data.vavavoom = sched_pc->vavavoom; - } - vc_data.token_usec = ticks_to_usec(1); + pcd = &vx_per_cpu(vxi, sched_pc, cpu); + vc_data.prio_bias = pcd->prio_bias; if (copy_to_user(data, &vc_data, sizeof(vc_data))) return -EFAULT; diff -NurpP linux-2.6.36-vs2.3.0.36.37.0/kernel/vserver/sched_init.h linux-2.6.36-vs2.3.0.36.37.4/kernel/vserver/sched_init.h --- linux-2.6.36-vs2.3.0.36.37.0/kernel/vserver/sched_init.h 2010-10-21 13:09:36.000000000 +0200 +++ linux-2.6.36-vs2.3.0.36.37.4/kernel/vserver/sched_init.h 2010-10-28 18:17:11.000000000 +0200 @@ -1,37 +1,14 @@ static inline void vx_info_init_sched(struct _vx_sched *sched) { - static struct lock_class_key tokens_lock_key; - /* scheduling; hard code starting values as constants */ - sched->fill_rate[0] = 1; - sched->interval[0] = 4; - sched->fill_rate[1] = 1; - sched->interval[1] = 8; - sched->tokens = HZ >> 2; - sched->tokens_min = HZ >> 4; - sched->tokens_max = HZ >> 1; - sched->tokens_lock = SPIN_LOCK_UNLOCKED; - sched->prio_bias = 0; - - lockdep_set_class(&sched->tokens_lock, &tokens_lock_key); + sched->prio_bias = 0; } static inline void vx_info_init_sched_pc(struct _vx_sched_pc *sched_pc, int cpu) { - sched_pc->fill_rate[0] = 1; - sched_pc->interval[0] = 4; - sched_pc->fill_rate[1] = 1; - sched_pc->interval[1] = 8; - sched_pc->tokens = HZ >> 2; - sched_pc->tokens_min = HZ >> 4; - sched_pc->tokens_max = HZ >> 1; - sched_pc->prio_bias = 0; - sched_pc->vavavoom = 0; - sched_pc->token_time = 0; - sched_pc->idle_time = 0; - sched_pc->norm_time = jiffies; + sched_pc->prio_bias = 0; sched_pc->user_ticks = 0; sched_pc->sys_ticks = 0; diff -NurpP linux-2.6.36-vs2.3.0.36.37.0/kernel/vserver/sched_proc.h linux-2.6.36-vs2.3.0.36.37.4/kernel/vserver/sched_proc.h --- linux-2.6.36-vs2.3.0.36.37.0/kernel/vserver/sched_proc.h 2010-10-21 13:09:36.000000000 +0200 +++ linux-2.6.36-vs2.3.0.36.37.4/kernel/vserver/sched_proc.h 2010-10-28 18:15:30.000000000 +0200 @@ -8,17 +8,7 @@ int vx_info_proc_sched(struct _vx_sched int length = 0; length += sprintf(buffer, - "FillRate:\t%8d,%d\n" - "Interval:\t%8d,%d\n" - "TokensMin:\t%8d\n" - "TokensMax:\t%8d\n" "PrioBias:\t%8d\n", - sched->fill_rate[0], - sched->fill_rate[1], - sched->interval[0], - sched->interval[1], - sched->tokens_min, - sched->tokens_max, sched->prio_bias); return length; } @@ -30,27 +20,12 @@ int vx_info_proc_sched_pc(struct _vx_sch int length = 0; length += sprintf(buffer + length, - "cpu %d: %lld %lld %lld %ld %ld", cpu, + "cpu %d: %lld %lld %lld", cpu, (unsigned long long)sched_pc->user_ticks, (unsigned long long)sched_pc->sys_ticks, - (unsigned long long)sched_pc->hold_ticks, - sched_pc->token_time, - sched_pc->idle_time); + (unsigned long long)sched_pc->hold_ticks); length += sprintf(buffer + length, - " %c%c %d %d %d %d/%d %d/%d", - (sched_pc->flags & VXSF_ONHOLD) ? 'H' : 'R', - (sched_pc->flags & VXSF_IDLE_TIME) ? 'I' : '-', - sched_pc->tokens, - sched_pc->tokens_min, - sched_pc->tokens_max, - sched_pc->fill_rate[0], - sched_pc->interval[0], - sched_pc->fill_rate[1], - sched_pc->interval[1]); - length += sprintf(buffer + length, - " %d %d\n", - sched_pc->prio_bias, - sched_pc->vavavoom); + " %d\n", sched_pc->prio_bias); return length; } diff -NurpP linux-2.6.36-vs2.3.0.36.37.0/kernel/vserver/switch.c linux-2.6.36-vs2.3.0.36.37.4/kernel/vserver/switch.c --- linux-2.6.36-vs2.3.0.36.37.0/kernel/vserver/switch.c 2010-10-21 19:05:54.000000000 +0200 +++ linux-2.6.36-vs2.3.0.36.37.4/kernel/vserver/switch.c 2010-10-28 19:10:47.000000000 +0200 @@ -172,16 +172,10 @@ long do_vcmd(uint32_t cmd, uint32_t id, case VCMD_get_ncaps: return vc_get_ncaps(nxi, data); - case VCMD_set_sched_v4: - return vc_set_sched_v4(vxi, data); - /* this is version 5 */ - case VCMD_set_sched: - return vc_set_sched(vxi, data); - case VCMD_get_sched: - return vc_get_sched(vxi, data); - case VCMD_sched_info: - return vc_sched_info(vxi, data); - + case VCMD_set_prio_bias: + return vc_set_prio_bias(vxi, data); + case VCMD_get_prio_bias: + return vc_get_prio_bias(vxi, data); case VCMD_add_dlimit: return __COMPAT(vc_add_dlimit, id, data, compat); case VCMD_rem_dlimit: @@ -343,8 +337,7 @@ long do_vserver(uint32_t cmd, uint32_t i __VCMD(get_iattr, 2, VCA_NONE, 0); __VCMD(fget_iattr, 2, VCA_NONE, 0); __VCMD(get_dlimit, 3, VCA_NONE, VCF_INFO); - __VCMD(get_sched, 3, VCA_VXI, VCF_INFO); - __VCMD(sched_info, 3, VCA_VXI, VCF_INFO | VCF_ZIDOK); + __VCMD(get_prio_bias, 3, VCA_VXI, VCF_INFO); /* lower admin commands */ __VCMD(wait_exit, 4, VCA_VXI, VCF_INFO); @@ -374,8 +367,7 @@ long do_vserver(uint32_t cmd, uint32_t i __VCMD(set_vhi_name, 7, VCA_VXI, VCF_ARES | VCF_SETUP); __VCMD(set_rlimit, 7, VCA_VXI, VCF_ARES | VCF_SETUP); - __VCMD(set_sched, 7, VCA_VXI, VCF_ARES | VCF_SETUP); - __VCMD(set_sched_v4, 7, VCA_VXI, VCF_ARES | VCF_SETUP); + __VCMD(set_prio_bias, 7, VCA_VXI, VCF_ARES | VCF_SETUP); __VCMD(set_ncaps, 7, VCA_NXI, VCF_ARES | VCF_SETUP); __VCMD(set_nflags, 7, VCA_NXI, VCF_ARES | VCF_SETUP); diff -NurpP linux-2.6.36-vs2.3.0.36.37.0/kernel/vserver/vci_config.h linux-2.6.36-vs2.3.0.36.37.4/kernel/vserver/vci_config.h --- linux-2.6.36-vs2.3.0.36.37.0/kernel/vserver/vci_config.h 2010-10-21 13:09:36.000000000 +0200 +++ linux-2.6.36-vs2.3.0.36.37.4/kernel/vserver/vci_config.h 2010-10-24 15:17:00.000000000 +0200 @@ -36,9 +36,6 @@ static inline uint32_t vci_kernel_config #ifdef CONFIG_VSERVER_PROC_SECURE (1 << VCI_KCBIT_PROC_SECURE) | #endif -#ifdef CONFIG_VSERVER_HARDCPU - (1 << VCI_KCBIT_HARDCPU) | -#endif #ifdef CONFIG_VSERVER_IDLELIMIT (1 << VCI_KCBIT_IDLELIMIT) | #endif