diff -NurpP --minimal linux-2.6.32.17/arch/alpha/Kconfig linux-2.6.32.17-vs2.3.0.36.29.4/arch/alpha/Kconfig
--- linux-2.6.32.17/arch/alpha/Kconfig	2009-12-03 20:01:49.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/alpha/Kconfig	2009-12-03 20:04:56.000000000 +0100
@@ -674,6 +674,8 @@ config DUMMY_CONSOLE
 	depends on VGA_HOSE
 	default y
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -NurpP --minimal linux-2.6.32.17/arch/alpha/kernel/entry.S linux-2.6.32.17-vs2.3.0.36.29.4/arch/alpha/kernel/entry.S
--- linux-2.6.32.17/arch/alpha/kernel/entry.S	2009-06-11 17:11:46.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/alpha/kernel/entry.S	2009-12-03 20:04:56.000000000 +0100
@@ -874,24 +874,15 @@ sys_getxgid:
 	.globl	sys_getxpid
 	.ent	sys_getxpid
 sys_getxpid:
+	lda	$sp, -16($sp)
+	stq	$26, 0($sp)
 	.prologue 0
-	ldq	$2, TI_TASK($8)
 
-	/* See linux/kernel/timer.c sys_getppid for discussion
-	   about this loop.  */
-	ldq	$3, TASK_GROUP_LEADER($2)
-	ldq	$4, TASK_REAL_PARENT($3)
-	ldl	$0, TASK_TGID($2)
-1:	ldl	$1, TASK_TGID($4)
-#ifdef CONFIG_SMP
-	mov	$4, $5
-	mb
-	ldq	$3, TASK_GROUP_LEADER($2)
-	ldq	$4, TASK_REAL_PARENT($3)
-	cmpeq	$4, $5, $5
-	beq	$5, 1b
-#endif
-	stq	$1, 80($sp)
+	lda	$16, 96($sp)
+	jsr	$26, do_getxpid
+	ldq	$26, 0($sp)
+
+	lda	$sp, 16($sp)
 	ret
 .end sys_getxpid
 
diff -NurpP --minimal linux-2.6.32.17/arch/alpha/kernel/osf_sys.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/alpha/kernel/osf_sys.c
--- linux-2.6.32.17/arch/alpha/kernel/osf_sys.c	2010-08-09 18:04:07.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/alpha/kernel/osf_sys.c	2010-01-20 04:21:33.000000000 +0100
@@ -865,7 +865,7 @@ SYSCALL_DEFINE2(osf_gettimeofday, struct
 {
 	if (tv) {
 		struct timeval ktv;
-		do_gettimeofday(&ktv);
+		vx_gettimeofday(&ktv);
 		if (put_tv32(tv, &ktv))
 			return -EFAULT;
 	}
diff -NurpP --minimal linux-2.6.32.17/arch/alpha/kernel/ptrace.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/alpha/kernel/ptrace.c
--- linux-2.6.32.17/arch/alpha/kernel/ptrace.c	2009-09-10 15:25:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/alpha/kernel/ptrace.c	2009-12-03 20:04:56.000000000 +0100
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <linux/security.h>
 #include <linux/signal.h>
+#include <linux/vs_base.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
diff -NurpP --minimal linux-2.6.32.17/arch/alpha/kernel/systbls.S linux-2.6.32.17-vs2.3.0.36.29.4/arch/alpha/kernel/systbls.S
--- linux-2.6.32.17/arch/alpha/kernel/systbls.S	2009-03-24 14:18:08.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/alpha/kernel/systbls.S	2009-12-03 20:04:56.000000000 +0100
@@ -446,7 +446,7 @@ sys_call_table:
 	.quad sys_stat64			/* 425 */
 	.quad sys_lstat64
 	.quad sys_fstat64
-	.quad sys_ni_syscall			/* sys_vserver */
+	.quad sys_vserver			/* sys_vserver */
 	.quad sys_ni_syscall			/* sys_mbind */
 	.quad sys_ni_syscall			/* sys_get_mempolicy */
 	.quad sys_ni_syscall			/* sys_set_mempolicy */
diff -NurpP --minimal linux-2.6.32.17/arch/alpha/kernel/traps.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/alpha/kernel/traps.c
--- linux-2.6.32.17/arch/alpha/kernel/traps.c	2009-06-11 17:11:46.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/alpha/kernel/traps.c	2009-12-03 20:04:56.000000000 +0100
@@ -183,7 +183,8 @@ die_if_kernel(char * str, struct pt_regs
 #ifdef CONFIG_SMP
 	printk("CPU %d ", hard_smp_processor_id());
 #endif
-	printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
+	printk("%s(%d[#%u]): %s %ld\n", current->comm,
+		task_pid_nr(current), current->xid, str, err);
 	dik_show_regs(regs, r9_15);
 	add_taint(TAINT_DIE);
 	dik_show_trace((unsigned long *)(regs+1));
diff -NurpP --minimal linux-2.6.32.17/arch/alpha/mm/fault.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/alpha/mm/fault.c
--- linux-2.6.32.17/arch/alpha/mm/fault.c	2009-09-10 15:25:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/alpha/mm/fault.c	2009-12-03 20:04:56.000000000 +0100
@@ -193,8 +193,8 @@ do_page_fault(unsigned long address, uns
 		down_read(&mm->mmap_sem);
 		goto survive;
 	}
-	printk(KERN_ALERT "VM: killing process %s(%d)\n",
-	       current->comm, task_pid_nr(current));
+	printk(KERN_ALERT "VM: killing process %s(%d:#%u)\n",
+	       current->comm, task_pid_nr(current), current->xid);
 	if (!user_mode(regs))
 		goto no_context;
 	do_group_exit(SIGKILL);
diff -NurpP --minimal linux-2.6.32.17/arch/arm/include/asm/tlb.h linux-2.6.32.17-vs2.3.0.36.29.4/arch/arm/include/asm/tlb.h
--- linux-2.6.32.17/arch/arm/include/asm/tlb.h	2009-09-10 15:25:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/arm/include/asm/tlb.h	2010-02-19 16:17:49.000000000 +0100
@@ -27,6 +27,7 @@
 
 #else /* !CONFIG_MMU */
 
+#include <linux/vs_memory.h>
 #include <asm/pgalloc.h>
 
 /*
diff -NurpP --minimal linux-2.6.32.17/arch/arm/Kconfig linux-2.6.32.17-vs2.3.0.36.29.4/arch/arm/Kconfig
--- linux-2.6.32.17/arch/arm/Kconfig	2009-12-03 20:01:49.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/arm/Kconfig	2009-12-03 20:04:56.000000000 +0100
@@ -1512,6 +1512,8 @@ source "fs/Kconfig"
 
 source "arch/arm/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -NurpP --minimal linux-2.6.32.17/arch/arm/kernel/calls.S linux-2.6.32.17-vs2.3.0.36.29.4/arch/arm/kernel/calls.S
--- linux-2.6.32.17/arch/arm/kernel/calls.S	2010-08-09 18:04:07.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/arm/kernel/calls.S	2010-01-20 04:21:33.000000000 +0100
@@ -322,7 +322,7 @@
 /* 310 */	CALL(sys_request_key)
 		CALL(sys_keyctl)
 		CALL(ABI(sys_semtimedop, sys_oabi_semtimedop))
-/* vserver */	CALL(sys_ni_syscall)
+		CALL(sys_vserver)
 		CALL(sys_ioprio_set)
 /* 315 */	CALL(sys_ioprio_get)
 		CALL(sys_inotify_init)
diff -NurpP --minimal linux-2.6.32.17/arch/arm/kernel/process.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/arm/kernel/process.c
--- linux-2.6.32.17/arch/arm/kernel/process.c	2009-12-03 20:01:50.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/arm/kernel/process.c	2009-12-03 20:04:56.000000000 +0100
@@ -269,7 +269,8 @@ void __show_regs(struct pt_regs *regs)
 void show_regs(struct pt_regs * regs)
 {
 	printk("\n");
-	printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
+	printk("Pid: %d[#%u], comm: %20s\n",
+		task_pid_nr(current), current->xid, current->comm);
 	__show_regs(regs);
 	__backtrace();
 }
diff -NurpP --minimal linux-2.6.32.17/arch/arm/kernel/traps.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/arm/kernel/traps.c
--- linux-2.6.32.17/arch/arm/kernel/traps.c	2009-12-03 20:01:50.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/arm/kernel/traps.c	2009-12-03 20:04:56.000000000 +0100
@@ -234,8 +234,8 @@ static void __die(const char *str, int e
 	sysfs_printk_last_file();
 	print_modules();
 	__show_regs(regs);
-	printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
-		TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
+	printk(KERN_EMERG "Process %.*s (pid: %d:#%u, stack limit = 0x%p)\n",
+		TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), tsk->xid, thread + 1);
 
 	if (!user_mode(regs) || in_interrupt()) {
 		dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
diff -NurpP --minimal linux-2.6.32.17/arch/avr32/mm/fault.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/avr32/mm/fault.c
--- linux-2.6.32.17/arch/avr32/mm/fault.c	2009-09-10 15:25:20.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/avr32/mm/fault.c	2009-12-03 20:04:56.000000000 +0100
@@ -216,7 +216,8 @@ out_of_memory:
 		down_read(&mm->mmap_sem);
 		goto survive;
 	}
-	printk("VM: Killing process %s\n", tsk->comm);
+	printk("VM: Killing process %s(%d:#%u)\n",
+		tsk->comm, task_pid_nr(tsk), tsk->xid);
 	if (user_mode(regs))
 		do_group_exit(SIGKILL);
 	goto no_context;
diff -NurpP --minimal linux-2.6.32.17/arch/cris/Kconfig linux-2.6.32.17-vs2.3.0.36.29.4/arch/cris/Kconfig
--- linux-2.6.32.17/arch/cris/Kconfig	2009-06-11 17:11:56.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/cris/Kconfig	2009-12-03 20:04:56.000000000 +0100
@@ -685,6 +685,8 @@ source "drivers/staging/Kconfig"
 
 source "arch/cris/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -NurpP --minimal linux-2.6.32.17/arch/cris/mm/fault.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/cris/mm/fault.c
--- linux-2.6.32.17/arch/cris/mm/fault.c	2009-12-03 20:01:56.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/cris/mm/fault.c	2009-12-03 20:04:56.000000000 +0100
@@ -245,7 +245,8 @@ do_page_fault(unsigned long address, str
 
  out_of_memory:
 	up_read(&mm->mmap_sem);
-	printk("VM: killing process %s\n", tsk->comm);
+	printk("VM: killing process %s(%d:#%u)\n",
+		tsk->comm, task_pid_nr(tsk), tsk->xid);
 	if (user_mode(regs))
 		do_exit(SIGKILL);
 	goto no_context;
diff -NurpP --minimal linux-2.6.32.17/arch/frv/kernel/kernel_thread.S linux-2.6.32.17-vs2.3.0.36.29.4/arch/frv/kernel/kernel_thread.S
--- linux-2.6.32.17/arch/frv/kernel/kernel_thread.S	2008-12-25 00:26:37.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/frv/kernel/kernel_thread.S	2009-12-03 20:04:56.000000000 +0100
@@ -37,7 +37,7 @@ kernel_thread:
 
 	# start by forking the current process, but with shared VM
 	setlos.p	#__NR_clone,gr7		; syscall number
-	ori		gr10,#CLONE_VM,gr8	; first syscall arg	[clone_flags]
+	ori		gr10,#CLONE_KT,gr8	; first syscall arg	[clone_flags]
 	sethi.p		#0xe4e4,gr9		; second syscall arg	[newsp]
 	setlo		#0xe4e4,gr9
 	setlos.p	#0,gr10			; third syscall arg	[parent_tidptr]
diff -NurpP --minimal linux-2.6.32.17/arch/frv/mm/fault.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/frv/mm/fault.c
--- linux-2.6.32.17/arch/frv/mm/fault.c	2009-09-10 15:25:22.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/frv/mm/fault.c	2009-12-03 20:04:56.000000000 +0100
@@ -257,7 +257,8 @@ asmlinkage void do_page_fault(int datamm
  */
  out_of_memory:
 	up_read(&mm->mmap_sem);
-	printk("VM: killing process %s\n", current->comm);
+	printk("VM: killing process %s(%d:#%u)\n",
+		current->comm, task_pid_nr(current), current->xid);
 	if (user_mode(__frame))
 		do_group_exit(SIGKILL);
 	goto no_context;
diff -NurpP --minimal linux-2.6.32.17/arch/h8300/Kconfig linux-2.6.32.17-vs2.3.0.36.29.4/arch/h8300/Kconfig
--- linux-2.6.32.17/arch/h8300/Kconfig	2009-03-24 14:18:24.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/h8300/Kconfig	2009-12-03 20:04:56.000000000 +0100
@@ -226,6 +226,8 @@ source "fs/Kconfig"
 
 source "arch/h8300/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -NurpP --minimal linux-2.6.32.17/arch/ia64/ia32/ia32_entry.S linux-2.6.32.17-vs2.3.0.36.29.4/arch/ia64/ia32/ia32_entry.S
--- linux-2.6.32.17/arch/ia64/ia32/ia32_entry.S	2009-06-11 17:11:57.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/ia64/ia32/ia32_entry.S	2009-12-03 20:04:56.000000000 +0100
@@ -451,7 +451,7 @@ ia32_syscall_table:
  	data8 sys_tgkill	/* 270 */
  	data8 compat_sys_utimes
  	data8 sys32_fadvise64_64
- 	data8 sys_ni_syscall
+	data8 sys32_vserver
   	data8 sys_ni_syscall
  	data8 sys_ni_syscall	/* 275 */
   	data8 sys_ni_syscall
diff -NurpP --minimal linux-2.6.32.17/arch/ia64/include/asm/tlb.h linux-2.6.32.17-vs2.3.0.36.29.4/arch/ia64/include/asm/tlb.h
--- linux-2.6.32.17/arch/ia64/include/asm/tlb.h	2009-09-10 15:25:22.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/ia64/include/asm/tlb.h	2009-12-31 14:37:34.000000000 +0100
@@ -40,6 +40,7 @@
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/swap.h>
+#include <linux/vs_memory.h>
 
 #include <asm/pgalloc.h>
 #include <asm/processor.h>
diff -NurpP --minimal linux-2.6.32.17/arch/ia64/Kconfig linux-2.6.32.17-vs2.3.0.36.29.4/arch/ia64/Kconfig
--- linux-2.6.32.17/arch/ia64/Kconfig	2009-12-03 20:01:56.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/ia64/Kconfig	2009-12-03 20:04:56.000000000 +0100
@@ -685,6 +685,8 @@ source "fs/Kconfig"
 
 source "arch/ia64/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -NurpP --minimal linux-2.6.32.17/arch/ia64/kernel/entry.S linux-2.6.32.17-vs2.3.0.36.29.4/arch/ia64/kernel/entry.S
--- linux-2.6.32.17/arch/ia64/kernel/entry.S	2009-09-10 15:25:22.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/ia64/kernel/entry.S	2009-12-03 20:04:56.000000000 +0100
@@ -1753,7 +1753,7 @@ sys_call_table:
 	data8 sys_mq_notify
 	data8 sys_mq_getsetattr
 	data8 sys_kexec_load
-	data8 sys_ni_syscall			// reserved for vserver
+	data8 sys_vserver
 	data8 sys_waitid			// 1270
 	data8 sys_add_key
 	data8 sys_request_key
diff -NurpP --minimal linux-2.6.32.17/arch/ia64/kernel/perfmon.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/ia64/kernel/perfmon.c
--- linux-2.6.32.17/arch/ia64/kernel/perfmon.c	2009-09-10 15:25:22.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/ia64/kernel/perfmon.c	2009-12-03 20:04:56.000000000 +0100
@@ -41,6 +41,7 @@
 #include <linux/rcupdate.h>
 #include <linux/completion.h>
 #include <linux/tracehook.h>
+#include <linux/vs_memory.h>
 
 #include <asm/errno.h>
 #include <asm/intrinsics.h>
@@ -2372,7 +2373,7 @@ pfm_smpl_buffer_alloc(struct task_struct
 	 */
 	insert_vm_struct(mm, vma);
 
-	mm->total_vm  += size >> PAGE_SHIFT;
+	vx_vmpages_add(mm, size >> PAGE_SHIFT);
 	vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
 							vma_pages(vma));
 	up_write(&task->mm->mmap_sem);
diff -NurpP --minimal linux-2.6.32.17/arch/ia64/kernel/process.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/ia64/kernel/process.c
--- linux-2.6.32.17/arch/ia64/kernel/process.c	2009-12-03 20:01:56.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/ia64/kernel/process.c	2009-12-03 20:04:56.000000000 +0100
@@ -110,8 +110,8 @@ show_regs (struct pt_regs *regs)
 	unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
 
 	print_modules();
-	printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current),
-			smp_processor_id(), current->comm);
+	printk("\nPid: %d[#%u], CPU %d, comm: %20s\n", task_pid_nr(current),
+			current->xid, smp_processor_id(), current->comm);
 	printk("psr : %016lx ifs : %016lx ip  : [<%016lx>]    %s (%s)\n",
 	       regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(),
 	       init_utsname()->release);
diff -NurpP --minimal linux-2.6.32.17/arch/ia64/kernel/ptrace.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/ia64/kernel/ptrace.c
--- linux-2.6.32.17/arch/ia64/kernel/ptrace.c	2009-09-10 15:25:22.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/ia64/kernel/ptrace.c	2009-12-03 20:04:56.000000000 +0100
@@ -22,6 +22,7 @@
 #include <linux/regset.h>
 #include <linux/elf.h>
 #include <linux/tracehook.h>
+#include <linux/vs_base.h>
 
 #include <asm/pgtable.h>
 #include <asm/processor.h>
diff -NurpP --minimal linux-2.6.32.17/arch/ia64/kernel/traps.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/ia64/kernel/traps.c
--- linux-2.6.32.17/arch/ia64/kernel/traps.c	2008-12-25 00:26:37.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/ia64/kernel/traps.c	2009-12-03 20:04:56.000000000 +0100
@@ -60,8 +60,9 @@ die (const char *str, struct pt_regs *re
 	put_cpu();
 
 	if (++die.lock_owner_depth < 3) {
-		printk("%s[%d]: %s %ld [%d]\n",
-		current->comm, task_pid_nr(current), str, err, ++die_counter);
+		printk("%s[%d[#%u]]: %s %ld [%d]\n",
+			current->comm, task_pid_nr(current), current->xid,
+			str, err, ++die_counter);
 		if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV)
 	            != NOTIFY_STOP)
 			show_regs(regs);
@@ -324,8 +325,9 @@ handle_fpu_swa (int fp_fault, struct pt_
 			if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) {
 				last.time = current_jiffies + 5 * HZ;
 				printk(KERN_WARNING
-		       			"%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
-		       			current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr);
+					"%s(%d[#%u]): floating-point assist fault at ip %016lx, isr %016lx\n",
+					current->comm, task_pid_nr(current), current->xid,
+					regs->cr_iip + ia64_psr(regs)->ri, isr);
 			}
 		}
 	}
diff -NurpP --minimal linux-2.6.32.17/arch/ia64/mm/fault.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/ia64/mm/fault.c
--- linux-2.6.32.17/arch/ia64/mm/fault.c	2009-09-10 15:25:23.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/ia64/mm/fault.c	2009-12-03 20:04:56.000000000 +0100
@@ -10,6 +10,7 @@
 #include <linux/interrupt.h>
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
+#include <linux/vs_memory.h>
 
 #include <asm/pgtable.h>
 #include <asm/processor.h>
@@ -281,7 +282,8 @@ ia64_do_page_fault (unsigned long addres
 		down_read(&mm->mmap_sem);
 		goto survive;
 	}
-	printk(KERN_CRIT "VM: killing process %s\n", current->comm);
+	printk(KERN_CRIT "VM: killing process %s(%d:#%u)\n",
+		current->comm, task_pid_nr(current), current->xid);
 	if (user_mode(regs))
 		do_group_exit(SIGKILL);
 	goto no_context;
diff -NurpP --minimal linux-2.6.32.17/arch/m32r/kernel/traps.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/m32r/kernel/traps.c
--- linux-2.6.32.17/arch/m32r/kernel/traps.c	2009-12-03 20:01:57.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/m32r/kernel/traps.c	2009-12-03 20:04:56.000000000 +0100
@@ -196,8 +196,9 @@ static void show_registers(struct pt_reg
 	} else {
 		printk("SPI: %08lx\n", sp);
 	}
-	printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
-		current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current);
+	printk("Process %s (pid: %d[#%u], process nr: %d, stackpage=%08lx)",
+		current->comm, task_pid_nr(current), current->xid,
+		0xffff & i, 4096+(unsigned long)current);
 
 	/*
 	 * When in-kernel, we also print out the stack and code at the
diff -NurpP --minimal linux-2.6.32.17/arch/m32r/mm/fault.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/m32r/mm/fault.c
--- linux-2.6.32.17/arch/m32r/mm/fault.c	2009-09-10 15:25:23.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/m32r/mm/fault.c	2009-12-03 20:04:56.000000000 +0100
@@ -276,7 +276,8 @@ out_of_memory:
 		down_read(&mm->mmap_sem);
 		goto survive;
 	}
-	printk("VM: killing process %s\n", tsk->comm);
+	printk("VM: killing process %s(%d:#%u)\n",
+		tsk->comm, task_pid_nr(tsk), tsk->xid);
 	if (error_code & ACE_USERMODE)
 		do_group_exit(SIGKILL);
 	goto no_context;
diff -NurpP --minimal linux-2.6.32.17/arch/m68k/Kconfig linux-2.6.32.17-vs2.3.0.36.29.4/arch/m68k/Kconfig
--- linux-2.6.32.17/arch/m68k/Kconfig	2009-12-03 20:01:57.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/m68k/Kconfig	2009-12-03 20:04:56.000000000 +0100
@@ -622,6 +622,8 @@ source "fs/Kconfig"
 
 source "arch/m68k/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -NurpP --minimal linux-2.6.32.17/arch/m68k/kernel/ptrace.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/m68k/kernel/ptrace.c
--- linux-2.6.32.17/arch/m68k/kernel/ptrace.c	2008-12-25 00:26:37.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/m68k/kernel/ptrace.c	2009-12-03 20:04:56.000000000 +0100
@@ -18,6 +18,7 @@
 #include <linux/ptrace.h>
 #include <linux/user.h>
 #include <linux/signal.h>
+#include <linux/vs_base.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -269,6 +270,8 @@ long arch_ptrace(struct task_struct *chi
 		ret = ptrace_request(child, request, addr, data);
 		break;
 	}
+	if (!vx_check(vx_task_xid(child), VS_WATCH_P | VS_IDENT))
+		goto out_tsk;
 
 	return ret;
 out_eio:
diff -NurpP --minimal linux-2.6.32.17/arch/m68k/kernel/traps.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/m68k/kernel/traps.c
--- linux-2.6.32.17/arch/m68k/kernel/traps.c	2009-09-10 15:25:23.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/m68k/kernel/traps.c	2009-12-03 20:04:56.000000000 +0100
@@ -906,8 +906,8 @@ void show_registers(struct pt_regs *regs
 	printk("d4: %08lx    d5: %08lx    a0: %08lx    a1: %08lx\n",
 	       regs->d4, regs->d5, regs->a0, regs->a1);
 
-	printk("Process %s (pid: %d, task=%p)\n",
-		current->comm, task_pid_nr(current), current);
+	printk("Process %s (pid: %d[#%u], task=%p)\n",
+		current->comm, task_pid_nr(current), current->xid, current);
 	addr = (unsigned long)&fp->un;
 	printk("Frame format=%X ", regs->format);
 	switch (regs->format) {
diff -NurpP --minimal linux-2.6.32.17/arch/m68k/mm/fault.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/m68k/mm/fault.c
--- linux-2.6.32.17/arch/m68k/mm/fault.c	2009-09-10 15:25:23.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/m68k/mm/fault.c	2009-12-03 20:04:56.000000000 +0100
@@ -186,7 +186,8 @@ out_of_memory:
 		goto survive;
 	}
 
-	printk("VM: killing process %s\n", current->comm);
+	printk("VM: killing process %s(%d:#%u)\n",
+		current->comm, task_pid_nr(current), current->xid);
 	if (user_mode(regs))
 		do_group_exit(SIGKILL);
 
diff -NurpP --minimal linux-2.6.32.17/arch/m68knommu/Kconfig linux-2.6.32.17-vs2.3.0.36.29.4/arch/m68knommu/Kconfig
--- linux-2.6.32.17/arch/m68knommu/Kconfig	2009-12-03 20:01:57.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/m68knommu/Kconfig	2009-12-03 20:04:56.000000000 +0100
@@ -727,6 +727,8 @@ source "fs/Kconfig"
 
 source "arch/m68knommu/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -NurpP --minimal linux-2.6.32.17/arch/m68knommu/kernel/traps.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/m68knommu/kernel/traps.c
--- linux-2.6.32.17/arch/m68knommu/kernel/traps.c	2009-09-10 15:25:23.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/m68knommu/kernel/traps.c	2009-12-03 20:04:56.000000000 +0100
@@ -78,8 +78,9 @@ void die_if_kernel(char *str, struct pt_
 	printk(KERN_EMERG "d4: %08lx    d5: %08lx    a0: %08lx    a1: %08lx\n",
 	       fp->d4, fp->d5, fp->a0, fp->a1);
 
-	printk(KERN_EMERG "Process %s (pid: %d, stackpage=%08lx)\n",
-		current->comm, current->pid, PAGE_SIZE+(unsigned long)current);
+	printk(KERN_EMERG "Process %s (pid: %d[#%u], stackpage=%08lx)\n",
+		current->comm, task_pid_nr(current), current->xid,
+		PAGE_SIZE+(unsigned long)current);
 	show_stack(NULL, (unsigned long *)(fp + 1));
 	add_taint(TAINT_DIE);
 	do_exit(SIGSEGV);
diff -NurpP --minimal linux-2.6.32.17/arch/microblaze/mm/fault.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/microblaze/mm/fault.c
--- linux-2.6.32.17/arch/microblaze/mm/fault.c	2009-09-10 15:25:24.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/microblaze/mm/fault.c	2009-12-03 20:04:56.000000000 +0100
@@ -279,7 +279,8 @@ out_of_memory:
 		goto survive;
 	}
 	up_read(&mm->mmap_sem);
-	printk(KERN_WARNING "VM: killing process %s\n", current->comm);
+	printk(KERN_WARNING "VM: killing process %s(%d:#%u)\n",
+		current->comm, task_pid_nr(current), current->xid);
 	if (user_mode(regs))
 		do_exit(SIGKILL);
 	bad_page_fault(regs, address, SIGKILL);
diff -NurpP --minimal linux-2.6.32.17/arch/mips/Kconfig linux-2.6.32.17-vs2.3.0.36.29.4/arch/mips/Kconfig
--- linux-2.6.32.17/arch/mips/Kconfig	2009-12-03 20:01:58.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/mips/Kconfig	2009-12-03 20:04:56.000000000 +0100
@@ -2188,6 +2188,8 @@ source "fs/Kconfig"
 
 source "arch/mips/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -NurpP --minimal linux-2.6.32.17/arch/mips/kernel/ptrace.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/mips/kernel/ptrace.c
--- linux-2.6.32.17/arch/mips/kernel/ptrace.c	2008-12-25 00:26:37.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/mips/kernel/ptrace.c	2009-12-03 20:04:56.000000000 +0100
@@ -25,6 +25,7 @@
 #include <linux/security.h>
 #include <linux/audit.h>
 #include <linux/seccomp.h>
+#include <linux/vs_base.h>
 
 #include <asm/byteorder.h>
 #include <asm/cpu.h>
@@ -259,6 +260,9 @@ long arch_ptrace(struct task_struct *chi
 {
 	int ret;
 
+	if (!vx_check(vx_task_xid(child), VS_WATCH_P | VS_IDENT))
+		goto out;
+
 	switch (request) {
 	/* when I and D space are separate, these will need to be fixed. */
 	case PTRACE_PEEKTEXT: /* read word at location addr. */
diff -NurpP --minimal linux-2.6.32.17/arch/mips/kernel/scall32-o32.S linux-2.6.32.17-vs2.3.0.36.29.4/arch/mips/kernel/scall32-o32.S
--- linux-2.6.32.17/arch/mips/kernel/scall32-o32.S	2009-12-03 20:01:59.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/mips/kernel/scall32-o32.S	2009-12-03 20:04:56.000000000 +0100
@@ -525,7 +525,7 @@ einval:	li	v0, -ENOSYS
 	sys	sys_mq_timedreceive	5
 	sys	sys_mq_notify		2	/* 4275 */
 	sys	sys_mq_getsetattr	3
-	sys	sys_ni_syscall		0	/* sys_vserver */
+	sys	sys_vserver		3
 	sys	sys_waitid		5
 	sys	sys_ni_syscall		0	/* available, was setaltroot */
 	sys	sys_add_key		5	/* 4280 */
diff -NurpP --minimal linux-2.6.32.17/arch/mips/kernel/scall64-64.S linux-2.6.32.17-vs2.3.0.36.29.4/arch/mips/kernel/scall64-64.S
--- linux-2.6.32.17/arch/mips/kernel/scall64-64.S	2009-12-03 20:01:59.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/mips/kernel/scall64-64.S	2009-12-03 20:04:56.000000000 +0100
@@ -362,7 +362,7 @@ sys_call_table:
 	PTR	sys_mq_timedreceive
 	PTR	sys_mq_notify
 	PTR	sys_mq_getsetattr		/* 5235 */
-	PTR	sys_ni_syscall			/* sys_vserver */
+	PTR	sys_vserver
 	PTR	sys_waitid
 	PTR	sys_ni_syscall			/* available, was setaltroot */
 	PTR	sys_add_key
diff -NurpP --minimal linux-2.6.32.17/arch/mips/kernel/scall64-n32.S linux-2.6.32.17-vs2.3.0.36.29.4/arch/mips/kernel/scall64-n32.S
--- linux-2.6.32.17/arch/mips/kernel/scall64-n32.S	2009-12-03 20:01:59.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/mips/kernel/scall64-n32.S	2009-12-03 20:04:56.000000000 +0100
@@ -360,7 +360,7 @@ EXPORT(sysn32_call_table)
 	PTR	compat_sys_mq_timedreceive
 	PTR	compat_sys_mq_notify
 	PTR	compat_sys_mq_getsetattr
-	PTR	sys_ni_syscall			/* 6240, sys_vserver */
+	PTR	sys32_vserver			/* 6240 */
 	PTR	compat_sys_waitid
 	PTR	sys_ni_syscall			/* available, was setaltroot */
 	PTR	sys_add_key
diff -NurpP --minimal linux-2.6.32.17/arch/mips/kernel/scall64-o32.S linux-2.6.32.17-vs2.3.0.36.29.4/arch/mips/kernel/scall64-o32.S
--- linux-2.6.32.17/arch/mips/kernel/scall64-o32.S	2009-12-03 20:01:59.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/mips/kernel/scall64-o32.S	2009-12-03 20:04:56.000000000 +0100
@@ -480,7 +480,7 @@ sys_call_table:
 	PTR	compat_sys_mq_timedreceive
 	PTR	compat_sys_mq_notify		/* 4275 */
 	PTR	compat_sys_mq_getsetattr
-	PTR	sys_ni_syscall			/* sys_vserver */
+	PTR	sys32_vserver
 	PTR	sys_32_waitid
 	PTR	sys_ni_syscall			/* available, was setaltroot */
 	PTR	sys_add_key			/* 4280 */
diff -NurpP --minimal linux-2.6.32.17/arch/mips/kernel/traps.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/mips/kernel/traps.c
--- linux-2.6.32.17/arch/mips/kernel/traps.c	2009-12-03 20:01:59.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/mips/kernel/traps.c	2009-12-03 20:04:56.000000000 +0100
@@ -335,9 +335,10 @@ void show_registers(const struct pt_regs
 
 	__show_regs(regs);
 	print_modules();
-	printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
-	       current->comm, current->pid, current_thread_info(), current,
-	      field, current_thread_info()->tp_value);
+	printk("Process %s (pid: %d:#%u, threadinfo=%p, task=%p, tls=%0*lx)\n",
+		current->comm, task_pid_nr(current), current->xid,
+		current_thread_info(), current,
+		field, current_thread_info()->tp_value);
 	if (cpu_has_userlocal) {
 		unsigned long tls;
 
diff -NurpP --minimal linux-2.6.32.17/arch/mn10300/mm/fault.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/mn10300/mm/fault.c
--- linux-2.6.32.17/arch/mn10300/mm/fault.c	2009-09-10 15:25:39.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/mn10300/mm/fault.c	2009-12-03 20:04:56.000000000 +0100
@@ -339,7 +339,8 @@ no_context:
 out_of_memory:
 	up_read(&mm->mmap_sem);
 	monitor_signal(regs);
-	printk(KERN_ALERT "VM: killing process %s\n", tsk->comm);
+	printk(KERN_ALERT "VM: killing process %s(%d:#%u)\n",
+		tsk->comm, task_pid_nr(tsk), tsk->xid);
 	if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
 		do_exit(SIGKILL);
 	goto no_context;
diff -NurpP --minimal linux-2.6.32.17/arch/parisc/Kconfig linux-2.6.32.17-vs2.3.0.36.29.4/arch/parisc/Kconfig
--- linux-2.6.32.17/arch/parisc/Kconfig	2009-12-03 20:02:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/parisc/Kconfig	2009-12-03 20:04:56.000000000 +0100
@@ -294,6 +294,8 @@ source "fs/Kconfig"
 
 source "arch/parisc/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -NurpP --minimal linux-2.6.32.17/arch/parisc/kernel/syscall_table.S linux-2.6.32.17-vs2.3.0.36.29.4/arch/parisc/kernel/syscall_table.S
--- linux-2.6.32.17/arch/parisc/kernel/syscall_table.S	2009-12-03 20:02:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/parisc/kernel/syscall_table.S	2009-12-03 20:04:56.000000000 +0100
@@ -361,7 +361,7 @@
 	ENTRY_COMP(mbind)		/* 260 */
 	ENTRY_COMP(get_mempolicy)
 	ENTRY_COMP(set_mempolicy)
-	ENTRY_SAME(ni_syscall)	/* 263: reserved for vserver */
+	ENTRY_DIFF(vserver)
 	ENTRY_SAME(add_key)
 	ENTRY_SAME(request_key)		/* 265 */
 	ENTRY_SAME(keyctl)
diff -NurpP --minimal linux-2.6.32.17/arch/parisc/kernel/traps.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/parisc/kernel/traps.c
--- linux-2.6.32.17/arch/parisc/kernel/traps.c	2009-09-10 15:25:40.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/parisc/kernel/traps.c	2009-12-03 20:04:56.000000000 +0100
@@ -236,8 +236,9 @@ void die_if_kernel(char *str, struct pt_
 		if (err == 0)
 			return; /* STFU */
 
-		printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
-			current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
+		printk(KERN_CRIT "%s (pid %d:#%u): %s (code %ld) at " RFMT "\n",
+			current->comm, task_pid_nr(current), current->xid,
+			str, err, regs->iaoq[0]);
 #ifdef PRINT_USER_FAULTS
 		/* XXX for debugging only */
 		show_regs(regs);
@@ -270,8 +271,8 @@ void die_if_kernel(char *str, struct pt_
 		pdc_console_restart();
 	
 	if (err)
-		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
-			current->comm, task_pid_nr(current), str, err);
+		printk(KERN_CRIT "%s (pid %d:#%u): %s (code %ld)\n",
+			current->comm, task_pid_nr(current), current->xid, str, err);
 
 	/* Wot's wrong wif bein' racy? */
 	if (current->thread.flags & PARISC_KERNEL_DEATH) {
diff -NurpP --minimal linux-2.6.32.17/arch/parisc/mm/fault.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/parisc/mm/fault.c
--- linux-2.6.32.17/arch/parisc/mm/fault.c	2009-09-10 15:25:40.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/parisc/mm/fault.c	2009-12-03 20:04:56.000000000 +0100
@@ -237,8 +237,9 @@ bad_area:
 
 #ifdef PRINT_USER_FAULTS
 		printk(KERN_DEBUG "\n");
-		printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n",
-		    task_pid_nr(tsk), tsk->comm, code, address);
+		printk(KERN_DEBUG "do_page_fault() pid=%d:#%u "
+		    "command='%s' type=%lu address=0x%08lx\n",
+		    task_pid_nr(tsk), tsk->xid, tsk->comm, code, address);
 		if (vma) {
 			printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n",
 					vma->vm_start, vma->vm_end);
@@ -264,7 +265,8 @@ no_context:
 
   out_of_memory:
 	up_read(&mm->mmap_sem);
-	printk(KERN_CRIT "VM: killing process %s\n", current->comm);
+	printk(KERN_CRIT "VM: killing process %s(%d:#%u)\n",
+		current->comm, current->pid, current->xid);
 	if (user_mode(regs))
 		do_group_exit(SIGKILL);
 	goto no_context;
diff -NurpP --minimal linux-2.6.32.17/arch/powerpc/include/asm/unistd.h linux-2.6.32.17-vs2.3.0.36.29.4/arch/powerpc/include/asm/unistd.h
--- linux-2.6.32.17/arch/powerpc/include/asm/unistd.h	2009-12-03 20:02:01.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/powerpc/include/asm/unistd.h	2009-12-03 20:04:56.000000000 +0100
@@ -275,7 +275,7 @@
 #endif
 #define __NR_rtas		255
 #define __NR_sys_debug_setcontext 256
-/* Number 257 is reserved for vserver */
+#define __NR_vserver		257
 #define __NR_migrate_pages	258
 #define __NR_mbind		259
 #define __NR_get_mempolicy	260
diff -NurpP --minimal linux-2.6.32.17/arch/powerpc/Kconfig linux-2.6.32.17-vs2.3.0.36.29.4/arch/powerpc/Kconfig
--- linux-2.6.32.17/arch/powerpc/Kconfig	2009-12-03 20:02:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/powerpc/Kconfig	2009-12-03 20:04:56.000000000 +0100
@@ -943,6 +943,8 @@ source "lib/Kconfig"
 
 source "arch/powerpc/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 config KEYS_COMPAT
diff -NurpP --minimal linux-2.6.32.17/arch/powerpc/kernel/process.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/powerpc/kernel/process.c
--- linux-2.6.32.17/arch/powerpc/kernel/process.c	2010-08-09 18:04:08.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/powerpc/kernel/process.c	2010-02-12 10:59:55.000000000 +0100
@@ -519,8 +519,9 @@ void show_regs(struct pt_regs * regs)
 #else
 		printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
 #endif
-	printk("TASK = %p[%d] '%s' THREAD: %p",
-	       current, task_pid_nr(current), current->comm, task_thread_info(current));
+	printk("TASK = %p[%d,#%u] '%s' THREAD: %p",
+	       current, task_pid_nr(current), current->xid,
+	       current->comm, task_thread_info(current));
 
 #ifdef CONFIG_SMP
 	printk(" CPU: %d", raw_smp_processor_id());
diff -NurpP --minimal linux-2.6.32.17/arch/powerpc/kernel/traps.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/powerpc/kernel/traps.c
--- linux-2.6.32.17/arch/powerpc/kernel/traps.c	2009-09-10 15:25:41.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/powerpc/kernel/traps.c	2009-12-03 20:04:56.000000000 +0100
@@ -931,8 +931,9 @@ void nonrecoverable_exception(struct pt_
 
 void trace_syscall(struct pt_regs *regs)
 {
-	printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
-	       current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
+	printk("Task: %p(%d[#%u]), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
+	       current, task_pid_nr(current), current->xid,
+	       regs->nip, regs->link, regs->gpr[0],
 	       regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
 }
 
diff -NurpP --minimal linux-2.6.32.17/arch/powerpc/kernel/vdso.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/powerpc/kernel/vdso.c
--- linux-2.6.32.17/arch/powerpc/kernel/vdso.c	2009-12-03 20:02:02.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/powerpc/kernel/vdso.c	2009-12-03 20:04:56.000000000 +0100
@@ -23,6 +23,7 @@
 #include <linux/security.h>
 #include <linux/bootmem.h>
 #include <linux/lmb.h>
+#include <linux/vs_memory.h>
 
 #include <asm/pgtable.h>
 #include <asm/system.h>
diff -NurpP --minimal linux-2.6.32.17/arch/powerpc/mm/fault.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/powerpc/mm/fault.c
--- linux-2.6.32.17/arch/powerpc/mm/fault.c	2009-12-03 20:02:02.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/powerpc/mm/fault.c	2009-12-03 20:04:56.000000000 +0100
@@ -358,7 +358,8 @@ out_of_memory:
 		down_read(&mm->mmap_sem);
 		goto survive;
 	}
-	printk("VM: killing process %s\n", current->comm);
+	printk("VM: killing process %s(%d:#%u)\n",
+		current->comm, current->pid, current->xid);
 	if (user_mode(regs))
 		do_group_exit(SIGKILL);
 	return SIGKILL;
diff -NurpP --minimal linux-2.6.32.17/arch/s390/include/asm/tlb.h linux-2.6.32.17-vs2.3.0.36.29.4/arch/s390/include/asm/tlb.h
--- linux-2.6.32.17/arch/s390/include/asm/tlb.h	2009-09-10 15:25:43.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/s390/include/asm/tlb.h	2009-12-31 14:37:21.000000000 +0100
@@ -23,6 +23,8 @@
 
 #include <linux/mm.h>
 #include <linux/swap.h>
+#include <linux/vs_memory.h>
+
 #include <asm/processor.h>
 #include <asm/pgalloc.h>
 #include <asm/smp.h>
diff -NurpP --minimal linux-2.6.32.17/arch/s390/include/asm/unistd.h linux-2.6.32.17-vs2.3.0.36.29.4/arch/s390/include/asm/unistd.h
--- linux-2.6.32.17/arch/s390/include/asm/unistd.h	2009-12-03 20:02:03.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/s390/include/asm/unistd.h	2009-12-30 00:58:47.000000000 +0100
@@ -202,7 +202,7 @@
 #define __NR_clock_gettime	(__NR_timer_create+6)
 #define __NR_clock_getres	(__NR_timer_create+7)
 #define __NR_clock_nanosleep	(__NR_timer_create+8)
-/* Number 263 is reserved for vserver */
+#define __NR_vserver		263
 #define __NR_statfs64		265
 #define __NR_fstatfs64		266
 #define __NR_remap_file_pages	267
diff -NurpP --minimal linux-2.6.32.17/arch/s390/Kconfig linux-2.6.32.17-vs2.3.0.36.29.4/arch/s390/Kconfig
--- linux-2.6.32.17/arch/s390/Kconfig	2009-12-03 20:02:03.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/s390/Kconfig	2009-12-03 20:04:56.000000000 +0100
@@ -616,6 +616,8 @@ source "fs/Kconfig"
 
 source "arch/s390/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -NurpP --minimal linux-2.6.32.17/arch/s390/kernel/ptrace.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/s390/kernel/ptrace.c
--- linux-2.6.32.17/arch/s390/kernel/ptrace.c	2010-08-09 18:04:08.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/s390/kernel/ptrace.c	2010-05-27 19:21:36.000000000 +0200
@@ -36,6 +36,7 @@
 #include <linux/regset.h>
 #include <linux/tracehook.h>
 #include <linux/seccomp.h>
+#include <linux/vs_base.h>
 #include <trace/syscall.h>
 #include <asm/compat.h>
 #include <asm/segment.h>
diff -NurpP --minimal linux-2.6.32.17/arch/s390/kernel/syscalls.S linux-2.6.32.17-vs2.3.0.36.29.4/arch/s390/kernel/syscalls.S
--- linux-2.6.32.17/arch/s390/kernel/syscalls.S	2009-12-03 20:02:03.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/s390/kernel/syscalls.S	2009-12-03 20:04:56.000000000 +0100
@@ -271,7 +271,7 @@ SYSCALL(sys_clock_settime,sys_clock_sett
 SYSCALL(sys_clock_gettime,sys_clock_gettime,sys32_clock_gettime_wrapper)	/* 260 */
 SYSCALL(sys_clock_getres,sys_clock_getres,sys32_clock_getres_wrapper)
 SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys32_clock_nanosleep_wrapper)
-NI_SYSCALL							/* reserved for vserver */
+SYSCALL(sys_vserver,sys_vserver,sys32_vserver)
 SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper)
 SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64_wrapper)
 SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64_wrapper)
diff -NurpP --minimal linux-2.6.32.17/arch/s390/lib/uaccess_pt.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/s390/lib/uaccess_pt.c
--- linux-2.6.32.17/arch/s390/lib/uaccess_pt.c	2009-09-10 15:25:43.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/s390/lib/uaccess_pt.c	2009-12-03 20:04:56.000000000 +0100
@@ -90,7 +90,8 @@ out_of_memory:
 		down_read(&mm->mmap_sem);
 		goto survive;
 	}
-	printk("VM: killing process %s\n", current->comm);
+	printk("VM: killing process %s(%d:#%u)\n",
+		current->comm, task_pid_nr(current), current->xid);
 	return ret;
 
 out_sigbus:
diff -NurpP --minimal linux-2.6.32.17/arch/sh/Kconfig linux-2.6.32.17-vs2.3.0.36.29.4/arch/sh/Kconfig
--- linux-2.6.32.17/arch/sh/Kconfig	2009-12-03 20:02:03.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/sh/Kconfig	2009-12-03 20:04:56.000000000 +0100
@@ -853,6 +853,8 @@ source "fs/Kconfig"
 
 source "arch/sh/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -NurpP --minimal linux-2.6.32.17/arch/sh/kernel/irq.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/sh/kernel/irq.c
--- linux-2.6.32.17/arch/sh/kernel/irq.c	2009-12-03 20:02:10.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/sh/kernel/irq.c	2009-12-03 20:04:56.000000000 +0100
@@ -12,6 +12,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
 #include <linux/ftrace.h>
+// #include <linux/vs_context.h>
 #include <asm/processor.h>
 #include <asm/machvec.h>
 #include <asm/uaccess.h>
diff -NurpP --minimal linux-2.6.32.17/arch/sh/kernel/vsyscall/vsyscall.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/sh/kernel/vsyscall/vsyscall.c
--- linux-2.6.32.17/arch/sh/kernel/vsyscall/vsyscall.c	2009-03-24 14:18:42.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/sh/kernel/vsyscall/vsyscall.c	2009-12-03 20:04:56.000000000 +0100
@@ -19,6 +19,7 @@
 #include <linux/elf.h>
 #include <linux/sched.h>
 #include <linux/err.h>
+#include <linux/vs_memory.h>
 
 /*
  * Should the kernel map a VDSO page into processes and pass its
diff -NurpP --minimal linux-2.6.32.17/arch/sh/mm/fault_32.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/sh/mm/fault_32.c
--- linux-2.6.32.17/arch/sh/mm/fault_32.c	2009-12-03 20:02:14.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/sh/mm/fault_32.c	2009-12-03 20:04:56.000000000 +0100
@@ -292,7 +292,8 @@ out_of_memory:
 		down_read(&mm->mmap_sem);
 		goto survive;
 	}
-	printk("VM: killing process %s\n", tsk->comm);
+	printk("VM: killing process %s(%d:#%u)\n",
+		tsk->comm, task_pid_nr(tsk), tsk->xid);
 	if (user_mode(regs))
 		do_group_exit(SIGKILL);
 	goto no_context;
diff -NurpP --minimal linux-2.6.32.17/arch/sh/mm/tlbflush_64.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/sh/mm/tlbflush_64.c
--- linux-2.6.32.17/arch/sh/mm/tlbflush_64.c	2009-12-03 20:02:14.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/sh/mm/tlbflush_64.c	2009-12-03 20:04:56.000000000 +0100
@@ -306,7 +306,8 @@ out_of_memory:
 		down_read(&mm->mmap_sem);
 		goto survive;
 	}
-	printk("VM: killing process %s\n", tsk->comm);
+	printk("VM: killing process %s(%d:#%u)\n",
+		tsk->comm, task_pid_nr(tsk), tsk->xid);
 	if (user_mode(regs))
 		do_group_exit(SIGKILL);
 	goto no_context;
diff -NurpP --minimal linux-2.6.32.17/arch/sparc/include/asm/tlb_64.h linux-2.6.32.17-vs2.3.0.36.29.4/arch/sparc/include/asm/tlb_64.h
--- linux-2.6.32.17/arch/sparc/include/asm/tlb_64.h	2009-09-10 15:25:45.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/sparc/include/asm/tlb_64.h	2009-12-03 20:04:56.000000000 +0100
@@ -3,6 +3,7 @@
 
 #include <linux/swap.h>
 #include <linux/pagemap.h>
+#include <linux/vs_memory.h>
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
diff -NurpP --minimal linux-2.6.32.17/arch/sparc/include/asm/unistd.h linux-2.6.32.17-vs2.3.0.36.29.4/arch/sparc/include/asm/unistd.h
--- linux-2.6.32.17/arch/sparc/include/asm/unistd.h	2009-12-03 20:02:15.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/sparc/include/asm/unistd.h	2009-12-03 20:04:56.000000000 +0100
@@ -335,7 +335,7 @@
 #define __NR_timer_getoverrun	264
 #define __NR_timer_delete	265
 #define __NR_timer_create	266
-/* #define __NR_vserver		267 Reserved for VSERVER */
+#define __NR_vserver		267
 #define __NR_io_setup		268
 #define __NR_io_destroy		269
 #define __NR_io_submit		270
diff -NurpP --minimal linux-2.6.32.17/arch/sparc/Kconfig linux-2.6.32.17-vs2.3.0.36.29.4/arch/sparc/Kconfig
--- linux-2.6.32.17/arch/sparc/Kconfig	2009-12-03 20:02:14.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/sparc/Kconfig	2009-12-03 20:04:56.000000000 +0100
@@ -550,6 +550,8 @@ source "fs/Kconfig"
 
 source "arch/sparc/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -NurpP --minimal linux-2.6.32.17/arch/sparc/kernel/systbls_32.S linux-2.6.32.17-vs2.3.0.36.29.4/arch/sparc/kernel/systbls_32.S
--- linux-2.6.32.17/arch/sparc/kernel/systbls_32.S	2010-08-09 18:04:08.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/sparc/kernel/systbls_32.S	2010-01-20 04:21:33.000000000 +0100
@@ -70,7 +70,7 @@ sys_call_table:
 /*250*/	.long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
 /*255*/	.long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
 /*260*/	.long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
-/*265*/	.long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
+/*265*/	.long sys_timer_delete, sys_timer_create, sys_vserver, sys_io_setup, sys_io_destroy
 /*270*/	.long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
 /*275*/	.long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
 /*280*/	.long sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
diff -NurpP --minimal linux-2.6.32.17/arch/sparc/kernel/systbls_64.S linux-2.6.32.17-vs2.3.0.36.29.4/arch/sparc/kernel/systbls_64.S
--- linux-2.6.32.17/arch/sparc/kernel/systbls_64.S	2010-08-09 18:04:08.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/sparc/kernel/systbls_64.S	2010-01-20 04:21:33.000000000 +0100
@@ -71,7 +71,7 @@ sys_call_table32:
 /*250*/	.word sys32_mremap, sys32_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
 	.word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
 /*260*/	.word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
-	.word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
+	.word sys_timer_delete, compat_sys_timer_create, sys32_vserver, compat_sys_io_setup, sys_io_destroy
 /*270*/	.word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
 	.word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
 /*280*/	.word sys32_tee, sys_add_key, sys_request_key, sys_keyctl, compat_sys_openat
@@ -146,7 +146,7 @@ sys_call_table:
 /*250*/	.word sys_64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
 	.word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
 /*260*/	.word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
-	.word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
+	.word sys_timer_delete, sys_timer_create, sys_vserver, sys_io_setup, sys_io_destroy
 /*270*/	.word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
 	.word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
 /*280*/	.word sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
diff -NurpP --minimal linux-2.6.32.17/arch/um/include/asm/tlb.h linux-2.6.32.17-vs2.3.0.36.29.4/arch/um/include/asm/tlb.h
--- linux-2.6.32.17/arch/um/include/asm/tlb.h	2009-09-10 15:25:46.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/um/include/asm/tlb.h	2010-03-13 21:49:04.000000000 +0100
@@ -3,6 +3,7 @@
 
 #include <linux/pagemap.h>
 #include <linux/swap.h>
+#include <linux/vs_memory.h>
 #include <asm/percpu.h>
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
diff -NurpP --minimal linux-2.6.32.17/arch/um/Kconfig.rest linux-2.6.32.17-vs2.3.0.36.29.4/arch/um/Kconfig.rest
--- linux-2.6.32.17/arch/um/Kconfig.rest	2009-06-11 17:12:19.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/um/Kconfig.rest	2010-03-13 21:49:04.000000000 +0100
@@ -18,6 +18,8 @@ source "drivers/connector/Kconfig"
 
 source "fs/Kconfig"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -NurpP --minimal linux-2.6.32.17/arch/x86/ia32/ia32entry.S linux-2.6.32.17-vs2.3.0.36.29.4/arch/x86/ia32/ia32entry.S
--- linux-2.6.32.17/arch/x86/ia32/ia32entry.S	2010-08-09 18:04:08.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/x86/ia32/ia32entry.S	2010-01-20 04:21:33.000000000 +0100
@@ -777,7 +777,7 @@ ia32_sys_call_table:
 	.quad sys_tgkill		/* 270 */
 	.quad compat_sys_utimes
 	.quad sys32_fadvise64_64
-	.quad quiet_ni_syscall	/* sys_vserver */
+	.quad sys32_vserver
 	.quad sys_mbind
 	.quad compat_sys_get_mempolicy	/* 275 */
 	.quad sys_set_mempolicy
diff -NurpP --minimal linux-2.6.32.17/arch/x86/include/asm/unistd_64.h linux-2.6.32.17-vs2.3.0.36.29.4/arch/x86/include/asm/unistd_64.h
--- linux-2.6.32.17/arch/x86/include/asm/unistd_64.h	2009-12-03 20:02:16.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/x86/include/asm/unistd_64.h	2009-12-03 20:04:56.000000000 +0100
@@ -535,7 +535,7 @@ __SYSCALL(__NR_tgkill, sys_tgkill)
 #define __NR_utimes				235
 __SYSCALL(__NR_utimes, sys_utimes)
 #define __NR_vserver				236
-__SYSCALL(__NR_vserver, sys_ni_syscall)
+__SYSCALL(__NR_vserver, sys_vserver)
 #define __NR_mbind				237
 __SYSCALL(__NR_mbind, sys_mbind)
 #define __NR_set_mempolicy			238
diff -NurpP --minimal linux-2.6.32.17/arch/x86/Kconfig linux-2.6.32.17-vs2.3.0.36.29.4/arch/x86/Kconfig
--- linux-2.6.32.17/arch/x86/Kconfig	2010-08-09 18:04:08.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/x86/Kconfig	2010-05-21 13:20:18.000000000 +0200
@@ -2084,6 +2084,8 @@ source "fs/Kconfig"
 
 source "arch/x86/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -NurpP --minimal linux-2.6.32.17/arch/x86/kernel/syscall_table_32.S linux-2.6.32.17-vs2.3.0.36.29.4/arch/x86/kernel/syscall_table_32.S
--- linux-2.6.32.17/arch/x86/kernel/syscall_table_32.S	2010-08-09 18:04:09.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/x86/kernel/syscall_table_32.S	2010-01-20 04:21:33.000000000 +0100
@@ -272,7 +272,7 @@ ENTRY(sys_call_table)
 	.long sys_tgkill	/* 270 */
 	.long sys_utimes
  	.long sys_fadvise64_64
-	.long sys_ni_syscall	/* sys_vserver */
+	.long sys_vserver
 	.long sys_mbind
 	.long sys_get_mempolicy
 	.long sys_set_mempolicy
diff -NurpP --minimal linux-2.6.32.17/arch/xtensa/mm/fault.c linux-2.6.32.17-vs2.3.0.36.29.4/arch/xtensa/mm/fault.c
--- linux-2.6.32.17/arch/xtensa/mm/fault.c	2009-09-10 15:25:48.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/arch/xtensa/mm/fault.c	2009-12-03 20:04:56.000000000 +0100
@@ -151,7 +151,8 @@ out_of_memory:
 		down_read(&mm->mmap_sem);
 		goto survive;
 	}
-	printk("VM: killing process %s\n", current->comm);
+	printk("VM: killing process %s(%d:#%u)\n",
+		current->comm, task_pid_nr(current), current->xid);
 	if (user_mode(regs))
 		do_group_exit(SIGKILL);
 	bad_page_fault(regs, address, SIGKILL);
diff -NurpP --minimal linux-2.6.32.17/Documentation/scheduler/sched-cfs-hard-limits.txt linux-2.6.32.17-vs2.3.0.36.29.4/Documentation/scheduler/sched-cfs-hard-limits.txt
--- linux-2.6.32.17/Documentation/scheduler/sched-cfs-hard-limits.txt	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/Documentation/scheduler/sched-cfs-hard-limits.txt	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,48 @@
+CPU HARD LIMITS FOR CFS GROUPS
+==============================
+
+1. Overview
+2. Interface
+3. Examples
+
+1. Overview
+-----------
+
+CFS is a proportional share scheduler which tries to divide the CPU time
+proportionately between tasks or groups of tasks (task group/cgroup) depending
+on the priority/weight of the task or shares assigned to groups of tasks.
+In CFS, a task/task group can get more than its share of CPU if there are
+enough idle CPU cycles available in the system, due to the work conserving
+nature of the scheduler. However in certain scenarios (like pay-per-use),
+it is desirable not to provide extra time to a group even in the presence
+of idle CPU cycles. This is where hard limiting can be of use.
+
+Hard limits for task groups can be set by specifying how much CPU runtime a
+group can consume within a given period. If the group consumes more CPU time
+than the runtime in a given period, it gets throttled. None of the tasks of
+the throttled group gets to run until the runtime of the group gets refreshed
+at the beginning of the next period.
+
+2. Interface
+------------
+
+Hard limit feature adds 2 cgroup files for CFS group scheduler:
+
+cfs_runtime_us: Hard limit for the group in microseconds.
+
+cfs_period_us: Time period in microseconds within which hard limits is
+enforced.
+
+A group gets created with default values for runtime (infinite runtime which
+means hard limits disabled) and period (0.5s). Each group can set its own
+values for runtime and period independent of other groups in the system.
+
+3. Examples
+-----------
+
+# mount -t cgroup -ocpu none /cgroups/
+# cd /cgroups
+# mkdir 1
+# cd 1/
+# echo 250000 > cfs_runtime_us /* set a 250ms runtime or limit */
+# echo 500000 > cfs_period_us /* set a 500ms period */
diff -NurpP --minimal linux-2.6.32.17/Documentation/vserver/debug.txt linux-2.6.32.17-vs2.3.0.36.29.4/Documentation/vserver/debug.txt
--- linux-2.6.32.17/Documentation/vserver/debug.txt	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/Documentation/vserver/debug.txt	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,154 @@
+
+debug_cvirt:
+
+ 2   4	"vx_map_tgid: %p/%llx: %d -> %d"
+	"vx_rmap_tgid: %p/%llx: %d -> %d"
+
+debug_dlim:
+
+ 0   1	"ALLOC (%p,#%d)%c inode (%d)"
+	"FREE  (%p,#%d)%c inode"
+ 1   2	"ALLOC (%p,#%d)%c %lld bytes (%d)"
+	"FREE  (%p,#%d)%c %lld bytes"
+ 2   4	"ADJUST: %lld,%lld on %ld,%ld [mult=%d]"
+ 3   8	"ext3_has_free_blocks(%p): %lu<%lu+1, %c, %u!=%u r=%d"
+	"ext3_has_free_blocks(%p): free=%lu, root=%lu"
+	"rcu_free_dl_info(%p)"
+ 4  10	"alloc_dl_info(%p,%d) = %p"
+	"dealloc_dl_info(%p)"
+	"get_dl_info(%p[#%d.%d])"
+	"put_dl_info(%p[#%d.%d])"
+ 5  20	"alloc_dl_info(%p,%d)*"
+ 6  40	"__hash_dl_info: %p[#%d]"
+	"__unhash_dl_info: %p[#%d]"
+ 7  80	"locate_dl_info(%p,#%d) = %p"
+
+debug_misc:
+
+ 0   1	"destroy_dqhash: %p [#0x%08x] c=%d"
+	"new_dqhash: %p [#0x%08x]"
+	"vroot[%d]_clr_dev: dev=%p[%lu,%d:%d]"
+	"vroot[%d]_get_real_bdev: dev=%p[%lu,%d:%d]"
+	"vroot[%d]_set_dev: dev=%p[%lu,%d:%d]"
+	"vroot_get_real_bdev not set"
+ 1   2	"cow_break_link(»%s«)"
+	"temp copy »%s«"
+ 2   4	"dentry_open(new): %p"
+	"dentry_open(old): %p"
+	"lookup_create(new): %p"
+	"old path »%s«"
+	"path_lookup(old): %d"
+	"vfs_create(new): %d"
+	"vfs_rename: %d"
+	"vfs_sendfile: %d"
+ 3   8	"fput(new_file=%p[#%d])"
+	"fput(old_file=%p[#%d])"
+ 4  10	"vx_info_kill(%p[#%d],%d,%d) = %d"
+	"vx_info_kill(%p[#%d],%d,%d)*"
+ 5  20	"vs_reboot(%p[#%d],%d)"
+ 6  40	"dropping task %p[#%u,%u] for %p[#%u,%u]"
+
+debug_net:
+
+ 2   4	"nx_addr_conflict(%p,%p) %d.%d,%d.%d"
+ 3   8	"inet_bind(%p) %d.%d.%d.%d, %d.%d.%d.%d, %d.%d.%d.%d"
+	"inet_bind(%p)* %p,%p;%lx %d.%d.%d.%d"
+ 4  10	"ip_route_connect(%p) %p,%p;%lx"
+ 5  20	"__addr_in_socket(%p,%d.%d.%d.%d) %p:%d.%d.%d.%d %p;%lx"
+ 6  40	"sk,egf: %p [#%d] (from %d)"
+	"sk,egn: %p [#%d] (from %d)"
+	"sk,req: %p [#%d] (from %d)"
+	"sk: %p [#%d] (from %d)"
+	"tw: %p [#%d] (from %d)"
+ 7  80	"__sock_recvmsg: %p[%p,%p,%p;%d]:%d/%d"
+	"__sock_sendmsg: %p[%p,%p,%p;%d]:%d/%d"
+
+debug_nid:
+
+ 0   1	"__lookup_nx_info(#%u): %p[#%u]"
+	"alloc_nx_info(%d) = %p"
+	"create_nx_info(%d) (dynamic rejected)"
+	"create_nx_info(%d) = %p (already there)"
+	"create_nx_info(%d) = %p (new)"
+	"dealloc_nx_info(%p)"
+ 1   2	"alloc_nx_info(%d)*"
+	"create_nx_info(%d)*"
+ 2   4	"get_nx_info(%p[#%d.%d])"
+	"put_nx_info(%p[#%d.%d])"
+ 3   8	"claim_nx_info(%p[#%d.%d.%d]) %p"
+	"clr_nx_info(%p[#%d.%d])"
+	"init_nx_info(%p[#%d.%d])"
+	"release_nx_info(%p[#%d.%d.%d]) %p"
+	"set_nx_info(%p[#%d.%d])"
+ 4  10	"__hash_nx_info: %p[#%d]"
+	"__nx_dynamic_id: [#%d]"
+	"__unhash_nx_info: %p[#%d.%d.%d]"
+ 5  20	"moved task %p into nxi:%p[#%d]"
+	"nx_migrate_task(%p,%p[#%d.%d.%d])"
+	"task_get_nx_info(%p)"
+ 6  40	"nx_clear_persistent(%p[#%d])"
+
+debug_quota:
+
+ 0   1	"quota_sync_dqh(%p,%d) discard inode %p"
+ 1   2	"quota_sync_dqh(%p,%d)"
+	"sync_dquots(%p,%d)"
+	"sync_dquots_dqh(%p,%d)"
+ 3   8	"do_quotactl(%p,%d,cmd=%d,id=%d,%p)"
+
+debug_switch:
+
+ 0   1	"vc: VCMD_%02d_%d[%d], %d,%p [%d,%d,%x,%x]"
+ 1   2	"vc: VCMD_%02d_%d[%d] = %08lx(%ld) [%d,%d]"
+ 4  10	"%s: (%s %s) returned %s with %d"
+
+debug_tag:
+
+ 7  80	"dx_parse_tag(»%s«): %d:#%d"
+	"dx_propagate_tag(%p[#%lu.%d]): %d,%d"
+
+debug_xid:
+
+ 0   1	"__lookup_vx_info(#%u): %p[#%u]"
+	"alloc_vx_info(%d) = %p"
+	"alloc_vx_info(%d)*"
+	"create_vx_info(%d) (dynamic rejected)"
+	"create_vx_info(%d) = %p (already there)"
+	"create_vx_info(%d) = %p (new)"
+	"dealloc_vx_info(%p)"
+	"loc_vx_info(%d) = %p (found)"
+	"loc_vx_info(%d) = %p (new)"
+	"loc_vx_info(%d) = %p (not available)"
+ 1   2	"create_vx_info(%d)*"
+	"loc_vx_info(%d)*"
+ 2   4	"get_vx_info(%p[#%d.%d])"
+	"put_vx_info(%p[#%d.%d])"
+ 3   8	"claim_vx_info(%p[#%d.%d.%d]) %p"
+	"clr_vx_info(%p[#%d.%d])"
+	"init_vx_info(%p[#%d.%d])"
+	"release_vx_info(%p[#%d.%d.%d]) %p"
+	"set_vx_info(%p[#%d.%d])"
+ 4  10	"__hash_vx_info: %p[#%d]"
+	"__unhash_vx_info: %p[#%d.%d.%d]"
+	"__vx_dynamic_id: [#%d]"
+ 5  20	"enter_vx_info(%p[#%d],%p) %p[#%d,%p]"
+	"leave_vx_info(%p[#%d,%p]) %p[#%d,%p]"
+	"moved task %p into vxi:%p[#%d]"
+	"task_get_vx_info(%p)"
+	"vx_migrate_task(%p,%p[#%d.%d])"
+ 6  40	"vx_clear_persistent(%p[#%d])"
+	"vx_exit_init(%p[#%d],%p[#%d,%d,%d])"
+	"vx_set_init(%p[#%d],%p[#%d,%d,%d])"
+	"vx_set_persistent(%p[#%d])"
+	"vx_set_reaper(%p[#%d],%p[#%d,%d])"
+ 7  80	"vx_child_reaper(%p[#%u,%u]) = %p[#%u,%u]"
+
+
+debug_limit:
+
+ n 2^n	"vx_acc_cres[%5d,%s,%2d]: %5d%s"
+	"vx_cres_avail[%5d,%s,%2d]: %5ld > %5d + %5d"
+
+ m 2^m	"vx_acc_page[%5d,%s,%2d]: %5d%s"
+	"vx_acc_pages[%5d,%s,%2d]: %5d += %5d"
+	"vx_pages_avail[%5d,%s,%2d]: %5ld > %5d + %5d"
diff -NurpP --minimal linux-2.6.32.17/drivers/block/Kconfig linux-2.6.32.17-vs2.3.0.36.29.4/drivers/block/Kconfig
--- linux-2.6.32.17/drivers/block/Kconfig	2009-09-10 15:25:49.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/drivers/block/Kconfig	2009-12-03 20:04:56.000000000 +0100
@@ -271,6 +271,13 @@ config BLK_DEV_CRYPTOLOOP
 	  instead, which can be configured to be on-disk compatible with the
 	  cryptoloop device.
 
+config BLK_DEV_VROOT
+	tristate "Virtual Root device support"
+	depends on QUOTACTL
+	---help---
+	  Saying Y here will allow you to use quota/fs ioctls on a shared
+	  partition within a virtual server without compromising security.
+
 config BLK_DEV_NBD
 	tristate "Network block device support"
 	depends on NET
diff -NurpP --minimal linux-2.6.32.17/drivers/block/loop.c linux-2.6.32.17-vs2.3.0.36.29.4/drivers/block/loop.c
--- linux-2.6.32.17/drivers/block/loop.c	2009-12-03 20:02:19.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/drivers/block/loop.c	2009-12-03 20:04:56.000000000 +0100
@@ -74,6 +74,7 @@
 #include <linux/gfp.h>
 #include <linux/kthread.h>
 #include <linux/splice.h>
+#include <linux/vs_context.h>
 
 #include <asm/uaccess.h>
 
@@ -812,6 +813,7 @@ static int loop_set_fd(struct loop_devic
 	lo->lo_blocksize = lo_blocksize;
 	lo->lo_device = bdev;
 	lo->lo_flags = lo_flags;
+	lo->lo_xid = vx_current_xid();
 	lo->lo_backing_file = file;
 	lo->transfer = transfer_none;
 	lo->ioctl = NULL;
@@ -937,6 +939,7 @@ static int loop_clr_fd(struct loop_devic
 	lo->lo_encrypt_key_size = 0;
 	lo->lo_flags = 0;
 	lo->lo_thread = NULL;
+	lo->lo_xid = 0;
 	memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
 	memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
 	memset(lo->lo_file_name, 0, LO_NAME_SIZE);
@@ -971,7 +974,7 @@ loop_set_status(struct loop_device *lo, 
 
 	if (lo->lo_encrypt_key_size &&
 	    lo->lo_key_owner != uid &&
-	    !capable(CAP_SYS_ADMIN))
+	    !vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_CLOOP))
 		return -EPERM;
 	if (lo->lo_state != Lo_bound)
 		return -ENXIO;
@@ -1055,7 +1058,8 @@ loop_get_status(struct loop_device *lo, 
 	memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
 	info->lo_encrypt_type =
 		lo->lo_encryption ? lo->lo_encryption->number : 0;
-	if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
+	if (lo->lo_encrypt_key_size &&
+		vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_CLOOP)) {
 		info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
 		memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
 		       lo->lo_encrypt_key_size);
@@ -1399,6 +1403,9 @@ static int lo_open(struct block_device *
 {
 	struct loop_device *lo = bdev->bd_disk->private_data;
 
+	if (!vx_check(lo->lo_xid, VS_IDENT|VS_HOSTID|VS_ADMIN_P))
+		return -EACCES;
+
 	mutex_lock(&lo->lo_ctl_mutex);
 	lo->lo_refcnt++;
 	mutex_unlock(&lo->lo_ctl_mutex);
diff -NurpP --minimal linux-2.6.32.17/drivers/block/Makefile linux-2.6.32.17-vs2.3.0.36.29.4/drivers/block/Makefile
--- linux-2.6.32.17/drivers/block/Makefile	2009-09-10 15:25:49.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/drivers/block/Makefile	2009-12-03 20:04:56.000000000 +0100
@@ -34,6 +34,7 @@ obj-$(CONFIG_VIODASD)		+= viodasd.o
 obj-$(CONFIG_BLK_DEV_SX8)	+= sx8.o
 obj-$(CONFIG_BLK_DEV_UB)	+= ub.o
 obj-$(CONFIG_BLK_DEV_HD)	+= hd.o
+obj-$(CONFIG_BLK_DEV_VROOT)	+= vroot.o
 
 obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	+= xen-blkfront.o
 
diff -NurpP --minimal linux-2.6.32.17/drivers/block/vroot.c linux-2.6.32.17-vs2.3.0.36.29.4/drivers/block/vroot.c
--- linux-2.6.32.17/drivers/block/vroot.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/drivers/block/vroot.c	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,281 @@
+/*
+ *  linux/drivers/block/vroot.c
+ *
+ *  written by Herbert Pötzl, 9/11/2002
+ *  ported to 2.6.10 by Herbert Pötzl, 30/12/2004
+ *
+ *  based on the loop.c code by Theodore Ts'o.
+ *
+ * Copyright (C) 2002-2007 by Herbert Pötzl.
+ * Redistribution of this file is permitted under the
+ * GNU General Public License.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/file.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+
+#include <linux/vroot.h>
+#include <linux/vs_context.h>
+
+
+static int max_vroot = 8;
+
+static struct vroot_device *vroot_dev;
+static struct gendisk **disks;
+
+
+static int vroot_set_dev(
+	struct vroot_device *vr,
+	struct block_device *bdev,
+	unsigned int arg)
+{
+	struct block_device *real_bdev;
+	struct file *file;
+	struct inode *inode;
+	int error;
+
+	error = -EBUSY;
+	if (vr->vr_state != Vr_unbound)
+		goto out;
+
+	error = -EBADF;
+	file = fget(arg);
+	if (!file)
+		goto out;
+
+	error = -EINVAL;
+	inode = file->f_dentry->d_inode;
+
+
+	if (S_ISBLK(inode->i_mode)) {
+		real_bdev = inode->i_bdev;
+		vr->vr_device = real_bdev;
+		__iget(real_bdev->bd_inode);
+	} else
+		goto out_fput;
+
+	vxdprintk(VXD_CBIT(misc, 0),
+		"vroot[%d]_set_dev: dev=" VXF_DEV,
+		vr->vr_number, VXD_DEV(real_bdev));
+
+	vr->vr_state = Vr_bound;
+	error = 0;
+
+ out_fput:
+	fput(file);
+ out:
+	return error;
+}
+
+static int vroot_clr_dev(
+	struct vroot_device *vr,
+	struct block_device *bdev)
+{
+	struct block_device *real_bdev;
+
+	if (vr->vr_state != Vr_bound)
+		return -ENXIO;
+	if (vr->vr_refcnt > 1)	/* we needed one fd for the ioctl */
+		return -EBUSY;
+
+	real_bdev = vr->vr_device;
+
+	vxdprintk(VXD_CBIT(misc, 0),
+		"vroot[%d]_clr_dev: dev=" VXF_DEV,
+		vr->vr_number, VXD_DEV(real_bdev));
+
+	bdput(real_bdev);
+	vr->vr_state = Vr_unbound;
+	vr->vr_device = NULL;
+	return 0;
+}
+
+
+static int vr_ioctl(struct block_device *bdev, fmode_t mode,
+	unsigned int cmd, unsigned long arg)
+{
+	struct vroot_device *vr = bdev->bd_disk->private_data;
+	int err;
+
+	down(&vr->vr_ctl_mutex);
+	switch (cmd) {
+	case VROOT_SET_DEV:
+		err = vroot_set_dev(vr, bdev, arg);
+		break;
+	case VROOT_CLR_DEV:
+		err = vroot_clr_dev(vr, bdev);
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+	up(&vr->vr_ctl_mutex);
+	return err;
+}
+
+static int vr_open(struct block_device *bdev, fmode_t mode)
+{
+	struct vroot_device *vr = bdev->bd_disk->private_data;
+
+	down(&vr->vr_ctl_mutex);
+	vr->vr_refcnt++;
+	up(&vr->vr_ctl_mutex);
+	return 0;
+}
+
+static int vr_release(struct gendisk *disk, fmode_t mode)
+{
+	struct vroot_device *vr = disk->private_data;
+
+	down(&vr->vr_ctl_mutex);
+	--vr->vr_refcnt;
+	up(&vr->vr_ctl_mutex);
+	return 0;
+}
+
+static struct block_device_operations vr_fops = {
+	.owner =	THIS_MODULE,
+	.open =		vr_open,
+	.release =	vr_release,
+	.ioctl =	vr_ioctl,
+};
+
+struct block_device *__vroot_get_real_bdev(struct block_device *bdev)
+{
+	struct inode *inode = bdev->bd_inode;
+	struct vroot_device *vr;
+	struct block_device *real_bdev;
+	int minor = iminor(inode);
+
+	vr = &vroot_dev[minor];
+	real_bdev = vr->vr_device;
+
+	vxdprintk(VXD_CBIT(misc, 0),
+		"vroot[%d]_get_real_bdev: dev=" VXF_DEV,
+		vr->vr_number, VXD_DEV(real_bdev));
+
+	if (vr->vr_state != Vr_bound)
+		return ERR_PTR(-ENXIO);
+
+	__iget(real_bdev->bd_inode);
+	return real_bdev;
+}
+
+/*
+ * And now the modules code and kernel interface.
+ */
+
+module_param(max_vroot, int, 0);
+
+MODULE_PARM_DESC(max_vroot, "Maximum number of vroot devices (1-256)");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_BLOCKDEV_MAJOR(VROOT_MAJOR);
+
+MODULE_AUTHOR ("Herbert Pötzl");
+MODULE_DESCRIPTION ("Virtual Root Device Mapper");
+
+
+int __init vroot_init(void)
+{
+	int err, i;
+
+	if (max_vroot < 1 || max_vroot > 256) {
+		max_vroot = MAX_VROOT_DEFAULT;
+		printk(KERN_WARNING "vroot: invalid max_vroot "
+			"(must be between 1 and 256), "
+			"using default (%d)\n", max_vroot);
+	}
+
+	if (register_blkdev(VROOT_MAJOR, "vroot"))
+		return -EIO;
+
+	err = -ENOMEM;
+	vroot_dev = kmalloc(max_vroot * sizeof(struct vroot_device), GFP_KERNEL);
+	if (!vroot_dev)
+		goto out_mem1;
+	memset(vroot_dev, 0, max_vroot * sizeof(struct vroot_device));
+
+	disks = kmalloc(max_vroot * sizeof(struct gendisk *), GFP_KERNEL);
+	if (!disks)
+		goto out_mem2;
+
+	for (i = 0; i < max_vroot; i++) {
+		disks[i] = alloc_disk(1);
+		if (!disks[i])
+			goto out_mem3;
+		disks[i]->queue = blk_alloc_queue(GFP_KERNEL);
+		if (!disks[i]->queue)
+			goto out_mem3;
+	}
+
+	for (i = 0; i < max_vroot; i++) {
+		struct vroot_device *vr = &vroot_dev[i];
+		struct gendisk *disk = disks[i];
+
+		memset(vr, 0, sizeof(*vr));
+		init_MUTEX(&vr->vr_ctl_mutex);
+		vr->vr_number = i;
+		disk->major = VROOT_MAJOR;
+		disk->first_minor = i;
+		disk->fops = &vr_fops;
+		sprintf(disk->disk_name, "vroot%d", i);
+		disk->private_data = vr;
+	}
+
+	err = register_vroot_grb(&__vroot_get_real_bdev);
+	if (err)
+		goto out_mem3;
+
+	for (i = 0; i < max_vroot; i++)
+		add_disk(disks[i]);
+	printk(KERN_INFO "vroot: loaded (max %d devices)\n", max_vroot);
+	return 0;
+
+out_mem3:
+	while (i--)
+		put_disk(disks[i]);
+	kfree(disks);
+out_mem2:
+	kfree(vroot_dev);
+out_mem1:
+	unregister_blkdev(VROOT_MAJOR, "vroot");
+	printk(KERN_ERR "vroot: ran out of memory\n");
+	return err;
+}
+
+void vroot_exit(void)
+{
+	int i;
+
+	if (unregister_vroot_grb(&__vroot_get_real_bdev))
+		printk(KERN_WARNING "vroot: cannot unregister grb\n");
+
+	for (i = 0; i < max_vroot; i++) {
+		del_gendisk(disks[i]);
+		put_disk(disks[i]);
+	}
+	unregister_blkdev(VROOT_MAJOR, "vroot");
+
+	kfree(disks);
+	kfree(vroot_dev);
+}
+
+module_init(vroot_init);
+module_exit(vroot_exit);
+
+#ifndef MODULE
+
+static int __init max_vroot_setup(char *str)
+{
+	max_vroot = simple_strtol(str, NULL, 0);
+	return 1;
+}
+
+__setup("max_vroot=", max_vroot_setup);
+
+#endif
+
diff -NurpP --minimal linux-2.6.32.17/drivers/char/sysrq.c linux-2.6.32.17-vs2.3.0.36.29.4/drivers/char/sysrq.c
--- linux-2.6.32.17/drivers/char/sysrq.c	2009-12-03 20:02:20.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/drivers/char/sysrq.c	2009-12-03 20:04:56.000000000 +0100
@@ -38,6 +38,7 @@
 #include <linux/workqueue.h>
 #include <linux/hrtimer.h>
 #include <linux/oom.h>
+#include <linux/vserver/debug.h>
 
 #include <asm/ptrace.h>
 #include <asm/irq_regs.h>
@@ -391,6 +392,21 @@ static struct sysrq_key_op sysrq_unrt_op
 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
 };
 
+
+#ifdef CONFIG_VSERVER_DEBUG
+static void sysrq_handle_vxinfo(int key, struct tty_struct *tty)
+{
+	dump_vx_info_inactive((key == 'x')?0:1);
+}
+
+static struct sysrq_key_op sysrq_showvxinfo_op = {
+	.handler	= sysrq_handle_vxinfo,
+	.help_msg	= "conteXt",
+	.action_msg	= "Show Context Info",
+	.enable_mask	= SYSRQ_ENABLE_DUMP,
+};
+#endif
+
 /* Key Operations table and lock */
 static DEFINE_SPINLOCK(sysrq_key_table_lock);
 
@@ -445,7 +461,11 @@ static struct sysrq_key_op *sysrq_key_ta
 	NULL,				/* v */
 	&sysrq_showstate_blocked_op,	/* w */
 	/* x: May be registered on ppc/powerpc for xmon */
+#ifdef CONFIG_VSERVER_DEBUG
+	&sysrq_showvxinfo_op,		/* x */
+#else
 	NULL,				/* x */
+#endif
 	/* y: May be registered on sparc64 for global register dump */
 	NULL,				/* y */
 	&sysrq_ftrace_dump_op,		/* z */
@@ -460,6 +480,8 @@ static int sysrq_key_table_key2index(int
 		retval = key - '0';
 	else if ((key >= 'a') && (key <= 'z'))
 		retval = key + 10 - 'a';
+	else if ((key >= 'A') && (key <= 'Z'))
+		retval = key + 10 - 'A';
 	else
 		retval = -1;
 	return retval;
diff -NurpP --minimal linux-2.6.32.17/drivers/char/tty_io.c linux-2.6.32.17-vs2.3.0.36.29.4/drivers/char/tty_io.c
--- linux-2.6.32.17/drivers/char/tty_io.c	2010-08-09 18:04:09.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/drivers/char/tty_io.c	2010-04-28 10:19:30.000000000 +0200
@@ -106,6 +106,7 @@
 
 #include <linux/kmod.h>
 #include <linux/nsproxy.h>
+#include <linux/vs_pid.h>
 
 #undef TTY_DEBUG_HANGUP
 
@@ -1970,7 +1971,8 @@ static int tiocsti(struct tty_struct *tt
 	char ch, mbz = 0;
 	struct tty_ldisc *ld;
 
-	if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
+	if (((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN)) ||
+		!vx_ccaps(VXC_TIOCSTI))
 		return -EPERM;
 	if (get_user(ch, p))
 		return -EFAULT;
@@ -2258,6 +2260,7 @@ static int tiocspgrp(struct tty_struct *
 		return -ENOTTY;
 	if (get_user(pgrp_nr, p))
 		return -EFAULT;
+	pgrp_nr = vx_rmap_pid(pgrp_nr);
 	if (pgrp_nr < 0)
 		return -EINVAL;
 	rcu_read_lock();
diff -NurpP --minimal linux-2.6.32.17/drivers/infiniband/hw/ipath/ipath_user_pages.c linux-2.6.32.17-vs2.3.0.36.29.4/drivers/infiniband/hw/ipath/ipath_user_pages.c
--- linux-2.6.32.17/drivers/infiniband/hw/ipath/ipath_user_pages.c	2009-12-03 20:02:23.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/drivers/infiniband/hw/ipath/ipath_user_pages.c	2009-12-04 23:31:17.000000000 +0100
@@ -34,6 +34,7 @@
 #include <linux/mm.h>
 #include <linux/device.h>
 #include <linux/sched.h>
+#include <linux/vs_memory.h>
 
 #include "ipath_kernel.h"
 
@@ -62,7 +63,8 @@ static int __get_user_pages(unsigned lon
 	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >>
 		PAGE_SHIFT;
 
-	if (num_pages > lock_limit) {
+	if (num_pages > lock_limit ||
+		!vx_vmlocked_avail(current->mm, num_pages)) {
 		ret = -ENOMEM;
 		goto bail;
 	}
@@ -79,7 +81,7 @@ static int __get_user_pages(unsigned lon
 			goto bail_release;
 	}
 
-	current->mm->locked_vm += num_pages;
+	vx_vmlocked_add(current->mm, num_pages);
 
 	ret = 0;
 	goto bail;
@@ -178,7 +180,7 @@ void ipath_release_user_pages(struct pag
 
 	__ipath_release_user_pages(p, num_pages, 1);
 
-	current->mm->locked_vm -= num_pages;
+	vx_vmlocked_sub(current->mm, num_pages);
 
 	up_write(&current->mm->mmap_sem);
 }
@@ -195,7 +197,7 @@ static void user_pages_account(struct wo
 		container_of(_work, struct ipath_user_pages_work, work);
 
 	down_write(&work->mm->mmap_sem);
-	work->mm->locked_vm -= work->num_pages;
+	vx_vmlocked_sub(work->mm, work->num_pages);
 	up_write(&work->mm->mmap_sem);
 	mmput(work->mm);
 	kfree(work);
diff -NurpP --minimal linux-2.6.32.17/drivers/md/dm.c linux-2.6.32.17-vs2.3.0.36.29.4/drivers/md/dm.c
--- linux-2.6.32.17/drivers/md/dm.c	2010-08-09 18:04:10.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/drivers/md/dm.c	2010-04-28 10:19:30.000000000 +0200
@@ -19,6 +19,7 @@
 #include <linux/slab.h>
 #include <linux/idr.h>
 #include <linux/hdreg.h>
+#include <linux/vs_base.h>
 
 #include <trace/events/block.h>
 
@@ -119,6 +120,7 @@ struct mapped_device {
 	rwlock_t map_lock;
 	atomic_t holders;
 	atomic_t open_count;
+	xid_t xid;
 
 	unsigned long flags;
 
@@ -323,6 +325,7 @@ static void __exit dm_exit(void)
 static int dm_blk_open(struct block_device *bdev, fmode_t mode)
 {
 	struct mapped_device *md;
+	int ret = -ENXIO;
 
 	spin_lock(&_minor_lock);
 
@@ -331,18 +334,19 @@ static int dm_blk_open(struct block_devi
 		goto out;
 
 	if (test_bit(DMF_FREEING, &md->flags) ||
-	    test_bit(DMF_DELETING, &md->flags)) {
-		md = NULL;
+	    test_bit(DMF_DELETING, &md->flags))
+		goto out;
+
+	ret = -EACCES;
+	if (!vx_check(md->xid, VS_IDENT|VS_HOSTID))
 		goto out;
-	}
 
 	dm_get(md);
 	atomic_inc(&md->open_count);
-
+	ret = 0;
 out:
 	spin_unlock(&_minor_lock);
-
-	return md ? 0 : -ENXIO;
+	return ret;
 }
 
 static int dm_blk_close(struct gendisk *disk, fmode_t mode)
@@ -553,6 +557,14 @@ int dm_set_geometry(struct mapped_device
 	return 0;
 }
 
+/*
+ * Get the xid associated with a dm device
+ */
+xid_t dm_get_xid(struct mapped_device *md)
+{
+	return md->xid;
+}
+
 /*-----------------------------------------------------------------
  * CRUD START:
  *   A more elegant soln is in the works that uses the queue
@@ -1775,6 +1787,7 @@ static struct mapped_device *alloc_dev(i
 	INIT_LIST_HEAD(&md->uevent_list);
 	spin_lock_init(&md->uevent_lock);
 
+	md->xid = vx_current_xid();
 	md->queue = blk_init_queue(dm_request_fn, NULL);
 	if (!md->queue)
 		goto bad_queue;
diff -NurpP --minimal linux-2.6.32.17/drivers/md/dm.h linux-2.6.32.17-vs2.3.0.36.29.4/drivers/md/dm.h
--- linux-2.6.32.17/drivers/md/dm.h	2009-09-10 15:25:55.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/drivers/md/dm.h	2009-12-03 20:04:56.000000000 +0100
@@ -41,6 +41,8 @@ struct dm_dev_internal {
 struct dm_table;
 struct dm_md_mempools;
 
+xid_t dm_get_xid(struct mapped_device *md);
+
 /*-----------------------------------------------------------------
  * Internal table functions.
  *---------------------------------------------------------------*/
diff -NurpP --minimal linux-2.6.32.17/drivers/md/dm-ioctl.c linux-2.6.32.17-vs2.3.0.36.29.4/drivers/md/dm-ioctl.c
--- linux-2.6.32.17/drivers/md/dm-ioctl.c	2010-08-09 18:04:10.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/drivers/md/dm-ioctl.c	2009-12-29 00:36:25.000000000 +0100
@@ -16,6 +16,7 @@
 #include <linux/dm-ioctl.h>
 #include <linux/hdreg.h>
 #include <linux/compat.h>
+#include <linux/vs_context.h>
 
 #include <asm/uaccess.h>
 
@@ -106,7 +107,8 @@ static struct hash_cell *__get_name_cell
 	unsigned int h = hash_str(str);
 
 	list_for_each_entry (hc, _name_buckets + h, name_list)
-		if (!strcmp(hc->name, str)) {
+		if (vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT) &&
+			!strcmp(hc->name, str)) {
 			dm_get(hc->md);
 			return hc;
 		}
@@ -120,7 +122,8 @@ static struct hash_cell *__get_uuid_cell
 	unsigned int h = hash_str(str);
 
 	list_for_each_entry (hc, _uuid_buckets + h, uuid_list)
-		if (!strcmp(hc->uuid, str)) {
+		if (vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT) &&
+			!strcmp(hc->uuid, str)) {
 			dm_get(hc->md);
 			return hc;
 		}
@@ -363,6 +366,9 @@ typedef int (*ioctl_fn)(struct dm_ioctl 
 
 static int remove_all(struct dm_ioctl *param, size_t param_size)
 {
+	if (!vx_check(0, VS_ADMIN))
+		return -EPERM;
+
 	dm_hash_remove_all(1);
 	param->data_size = 0;
 	return 0;
@@ -410,6 +416,8 @@ static int list_devices(struct dm_ioctl 
 	 */
 	for (i = 0; i < NUM_BUCKETS; i++) {
 		list_for_each_entry (hc, _name_buckets + i, name_list) {
+			if (!vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT))
+				continue;
 			needed += sizeof(struct dm_name_list);
 			needed += strlen(hc->name) + 1;
 			needed += ALIGN_MASK;
@@ -433,6 +441,8 @@ static int list_devices(struct dm_ioctl 
 	 */
 	for (i = 0; i < NUM_BUCKETS; i++) {
 		list_for_each_entry (hc, _name_buckets + i, name_list) {
+			if (!vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT))
+				continue;
 			if (old_nl)
 				old_nl->next = (uint32_t) ((void *) nl -
 							   (void *) old_nl);
@@ -623,10 +633,11 @@ static struct hash_cell *__find_device_h
 	if (!md)
 		goto out;
 
-	mdptr = dm_get_mdptr(md);
+	if (vx_check(dm_get_xid(md), VS_WATCH_P | VS_IDENT))
+		mdptr = dm_get_mdptr(md);
+
 	if (!mdptr)
 		dm_put(md);
-
 out:
 	return mdptr;
 }
@@ -1456,8 +1467,8 @@ static int ctl_ioctl(uint command, struc
 	ioctl_fn fn = NULL;
 	size_t param_size;
 
-	/* only root can play with this */
-	if (!capable(CAP_SYS_ADMIN))
+	/* only root and certain contexts can play with this */
+	if (!vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_MAPPER))
 		return -EACCES;
 
 	if (_IOC_TYPE(command) != DM_IOCTL)
diff -NurpP --minimal linux-2.6.32.17/drivers/net/tun.c linux-2.6.32.17-vs2.3.0.36.29.4/drivers/net/tun.c
--- linux-2.6.32.17/drivers/net/tun.c	2009-12-03 20:02:32.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/drivers/net/tun.c	2009-12-03 20:04:56.000000000 +0100
@@ -61,6 +61,7 @@
 #include <linux/crc32.h>
 #include <linux/nsproxy.h>
 #include <linux/virtio_net.h>
+#include <linux/vs_network.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
@@ -102,6 +103,7 @@ struct tun_struct {
 	unsigned int 		flags;
 	uid_t			owner;
 	gid_t			group;
+	nid_t			nid;
 
 	struct net_device	*dev;
 	struct fasync_struct	*fasync;
@@ -816,6 +818,7 @@ static void tun_setup(struct net_device 
 
 	tun->owner = -1;
 	tun->group = -1;
+	tun->nid = current->nid;
 
 	dev->ethtool_ops = &tun_ethtool_ops;
 	dev->destructor = tun_free_netdev;
@@ -932,7 +935,7 @@ static int tun_set_iff(struct net *net, 
 
 		if (((tun->owner != -1 && cred->euid != tun->owner) ||
 		     (tun->group != -1 && !in_egroup_p(tun->group))) &&
-		    !capable(CAP_NET_ADMIN))
+		!cap_raised(current_cap(), CAP_NET_ADMIN))
 			return -EPERM;
 		err = security_tun_dev_attach(tun->socket.sk);
 		if (err < 0)
@@ -946,7 +949,7 @@ static int tun_set_iff(struct net *net, 
 		char *name;
 		unsigned long flags = 0;
 
-		if (!capable(CAP_NET_ADMIN))
+		if (!nx_capable(CAP_NET_ADMIN, NXC_TUN_CREATE))
 			return -EPERM;
 		err = security_tun_dev_create();
 		if (err < 0)
@@ -1013,6 +1016,9 @@ static int tun_set_iff(struct net *net, 
 
 		sk->sk_destruct = tun_sock_destruct;
 
+		if (!nx_check(tun->nid, VS_IDENT | VS_HOSTID | VS_ADMIN_P))
+			return -EPERM;
+
 		err = tun_attach(tun, file);
 		if (err < 0)
 			goto failed;
@@ -1202,6 +1208,16 @@ static long tun_chr_ioctl(struct file *f
 		DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group);
 		break;
 
+	case TUNSETNID:
+		if (!capable(CAP_CONTEXT))
+			return -EPERM;
+
+		/* Set nid owner of the device */
+		tun->nid = (nid_t) arg;
+
+		DBG(KERN_INFO "%s: nid owner set to %u\n", tun->dev->name, tun->nid);
+		break;
+
 	case TUNSETLINK:
 		/* Only allow setting the type when the interface is down */
 		if (tun->dev->flags & IFF_UP) {
diff -NurpP --minimal linux-2.6.32.17/fs/attr.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/attr.c
--- linux-2.6.32.17/fs/attr.c	2009-12-03 20:02:51.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/attr.c	2009-12-03 20:04:56.000000000 +0100
@@ -14,6 +14,9 @@
 #include <linux/fcntl.h>
 #include <linux/quotaops.h>
 #include <linux/security.h>
+#include <linux/proc_fs.h>
+#include <linux/devpts_fs.h>
+#include <linux/vs_tag.h>
 
 /* Taken over from the old code... */
 
@@ -55,6 +58,10 @@ int inode_change_ok(const struct inode *
 		if (!is_owner_or_cap(inode))
 			goto error;
 	}
+
+	if (dx_permission(inode, MAY_WRITE))
+		goto error;
+
 fine:
 	retval = 0;
 error:
@@ -120,6 +127,8 @@ int inode_setattr(struct inode * inode, 
 		inode->i_uid = attr->ia_uid;
 	if (ia_valid & ATTR_GID)
 		inode->i_gid = attr->ia_gid;
+	if ((ia_valid & ATTR_TAG) && IS_TAGGED(inode))
+		inode->i_tag = attr->ia_tag;
 	if (ia_valid & ATTR_ATIME)
 		inode->i_atime = timespec_trunc(attr->ia_atime,
 						inode->i_sb->s_time_gran);
@@ -214,7 +223,8 @@ int notify_change(struct dentry * dentry
 		error = inode_change_ok(inode, attr);
 		if (!error) {
 			if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
-			    (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid))
+			    (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) ||
+			    (ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag))
 				error = vfs_dq_transfer(inode, attr) ?
 					-EDQUOT : 0;
 			if (!error)
diff -NurpP --minimal linux-2.6.32.17/fs/binfmt_aout.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/binfmt_aout.c
--- linux-2.6.32.17/fs/binfmt_aout.c	2010-08-09 18:04:13.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/binfmt_aout.c	2010-02-12 10:59:55.000000000 +0100
@@ -24,6 +24,7 @@
 #include <linux/binfmts.h>
 #include <linux/personality.h>
 #include <linux/init.h>
+#include <linux/vs_memory.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
diff -NurpP --minimal linux-2.6.32.17/fs/binfmt_elf.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/binfmt_elf.c
--- linux-2.6.32.17/fs/binfmt_elf.c	2010-08-09 18:04:13.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/binfmt_elf.c	2010-02-12 10:59:55.000000000 +0100
@@ -31,6 +31,7 @@
 #include <linux/random.h>
 #include <linux/elf.h>
 #include <linux/utsname.h>
+#include <linux/vs_memory.h>
 #include <asm/uaccess.h>
 #include <asm/param.h>
 #include <asm/page.h>
diff -NurpP --minimal linux-2.6.32.17/fs/binfmt_flat.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/binfmt_flat.c
--- linux-2.6.32.17/fs/binfmt_flat.c	2010-08-09 18:04:13.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/binfmt_flat.c	2010-02-12 10:59:55.000000000 +0100
@@ -35,6 +35,7 @@
 #include <linux/init.h>
 #include <linux/flat.h>
 #include <linux/syscalls.h>
+#include <linux/vs_memory.h>
 
 #include <asm/byteorder.h>
 #include <asm/system.h>
diff -NurpP --minimal linux-2.6.32.17/fs/binfmt_som.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/binfmt_som.c
--- linux-2.6.32.17/fs/binfmt_som.c	2010-08-09 18:04:13.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/binfmt_som.c	2010-02-12 10:59:55.000000000 +0100
@@ -28,6 +28,7 @@
 #include <linux/shm.h>
 #include <linux/personality.h>
 #include <linux/init.h>
+#include <linux/vs_memory.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
diff -NurpP --minimal linux-2.6.32.17/fs/block_dev.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/block_dev.c
--- linux-2.6.32.17/fs/block_dev.c	2010-08-09 18:04:13.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/block_dev.c	2010-04-28 10:19:32.000000000 +0200
@@ -26,6 +26,7 @@
 #include <linux/namei.h>
 #include <linux/log2.h>
 #include <linux/kmemleak.h>
+#include <linux/vs_device.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
@@ -557,6 +558,7 @@ struct block_device *bdget(dev_t dev)
 		bdev->bd_invalidated = 0;
 		inode->i_mode = S_IFBLK;
 		inode->i_rdev = dev;
+		inode->i_mdev = dev;
 		inode->i_bdev = bdev;
 		inode->i_data.a_ops = &def_blk_aops;
 		mapping_set_gfp_mask(&inode->i_data, GFP_USER);
@@ -603,6 +605,11 @@ EXPORT_SYMBOL(bdput);
 static struct block_device *bd_acquire(struct inode *inode)
 {
 	struct block_device *bdev;
+	dev_t mdev;
+
+	if (!vs_map_blkdev(inode->i_rdev, &mdev, DATTR_OPEN))
+		return NULL;
+	inode->i_mdev = mdev;
 
 	spin_lock(&bdev_lock);
 	bdev = inode->i_bdev;
@@ -613,7 +620,7 @@ static struct block_device *bd_acquire(s
 	}
 	spin_unlock(&bdev_lock);
 
-	bdev = bdget(inode->i_rdev);
+	bdev = bdget(mdev);
 	if (bdev) {
 		spin_lock(&bdev_lock);
 		if (!inode->i_bdev) {
diff -NurpP --minimal linux-2.6.32.17/fs/btrfs/ctree.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/btrfs/ctree.h
--- linux-2.6.32.17/fs/btrfs/ctree.h	2009-12-03 20:02:51.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/btrfs/ctree.h	2009-12-03 20:04:56.000000000 +0100
@@ -544,11 +544,14 @@ struct btrfs_inode_item {
 	/* modification sequence number for NFS */
 	__le64 sequence;
 
+	__le16 tag;
 	/*
 	 * a little future expansion, for more than this we can
 	 * just grow the inode item and version it
 	 */
-	__le64 reserved[4];
+	__le16 reserved16;
+	__le32 reserved32;
+	__le64 reserved[3];
 	struct btrfs_timespec atime;
 	struct btrfs_timespec ctime;
 	struct btrfs_timespec mtime;
@@ -1155,6 +1158,8 @@ struct btrfs_root {
 #define BTRFS_MOUNT_NOSSD		(1 << 9)
 #define BTRFS_MOUNT_DISCARD		(1 << 10)
 
+#define BTRFS_MOUNT_TAGGED		(1 << 24)
+
 #define btrfs_clear_opt(o, opt)		((o) &= ~BTRFS_MOUNT_##opt)
 #define btrfs_set_opt(o, opt)		((o) |= BTRFS_MOUNT_##opt)
 #define btrfs_test_opt(root, opt)	((root)->fs_info->mount_opt & \
@@ -1174,6 +1179,10 @@ struct btrfs_root {
 #define BTRFS_INODE_NOATIME		(1 << 9)
 #define BTRFS_INODE_DIRSYNC		(1 << 10)
 
+#define BTRFS_INODE_IXUNLINK		(1 << 24)
+#define BTRFS_INODE_BARRIER		(1 << 25)
+#define BTRFS_INODE_COW			(1 << 26)
+
 
 /* some macros to generate set/get funcs for the struct fields.  This
  * assumes there is a lefoo_to_cpu for every type, so lets make a simple
@@ -1376,6 +1385,7 @@ BTRFS_SETGET_FUNCS(inode_block_group, st
 BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32);
 BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32);
 BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32);
+BTRFS_SETGET_FUNCS(inode_tag, struct btrfs_inode_item, tag, 16);
 BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32);
 BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64);
 BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64);
@@ -2338,6 +2348,7 @@ extern const struct dentry_operations bt
 long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 void btrfs_update_iflags(struct inode *inode);
 void btrfs_inherit_iflags(struct inode *inode, struct inode *dir);
+int btrfs_sync_flags(struct inode *inode, int, int);
 
 /* file.c */
 int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync);
diff -NurpP --minimal linux-2.6.32.17/fs/btrfs/disk-io.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/btrfs/disk-io.c
--- linux-2.6.32.17/fs/btrfs/disk-io.c	2009-12-03 20:02:51.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/btrfs/disk-io.c	2009-12-03 20:04:56.000000000 +0100
@@ -1723,6 +1723,9 @@ struct btrfs_root *open_ctree(struct sup
 		goto fail_iput;
 	}
 
+	if (btrfs_test_opt(tree_root, TAGGED))
+		sb->s_flags |= MS_TAGGED;
+
 	features = btrfs_super_incompat_flags(disk_super) &
 		~BTRFS_FEATURE_INCOMPAT_SUPP;
 	if (features) {
diff -NurpP --minimal linux-2.6.32.17/fs/btrfs/inode.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/btrfs/inode.c
--- linux-2.6.32.17/fs/btrfs/inode.c	2009-12-03 20:02:51.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/btrfs/inode.c	2009-12-03 20:04:56.000000000 +0100
@@ -36,6 +36,8 @@
 #include <linux/xattr.h>
 #include <linux/posix_acl.h>
 #include <linux/falloc.h>
+#include <linux/vs_tag.h>
+
 #include "compat.h"
 #include "ctree.h"
 #include "disk-io.h"
@@ -2242,6 +2244,8 @@ static void btrfs_read_locked_inode(stru
 	int maybe_acls;
 	u64 alloc_group_block;
 	u32 rdev;
+	uid_t uid;
+	gid_t gid;
 	int ret;
 
 	path = btrfs_alloc_path();
@@ -2258,8 +2262,13 @@ static void btrfs_read_locked_inode(stru
 
 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
 	inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
-	inode->i_uid = btrfs_inode_uid(leaf, inode_item);
-	inode->i_gid = btrfs_inode_gid(leaf, inode_item);
+
+	uid = btrfs_inode_uid(leaf, inode_item);
+	gid = btrfs_inode_gid(leaf, inode_item);
+	inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
+	inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
+	inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid,
+		btrfs_inode_tag(leaf, inode_item));
 	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
 
 	tspec = btrfs_inode_atime(inode_item);
@@ -2341,8 +2350,15 @@ static void fill_inode_item(struct btrfs
 			    struct btrfs_inode_item *item,
 			    struct inode *inode)
 {
-	btrfs_set_inode_uid(leaf, item, inode->i_uid);
-	btrfs_set_inode_gid(leaf, item, inode->i_gid);
+	uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag);
+	gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag);
+
+	btrfs_set_inode_uid(leaf, item, uid);
+	btrfs_set_inode_gid(leaf, item, gid);
+#ifdef CONFIG_TAGGING_INTERN
+	btrfs_set_inode_tag(leaf, item, inode->i_tag);
+#endif
+
 	btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
 	btrfs_set_inode_mode(leaf, item, inode->i_mode);
 	btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
@@ -4065,6 +4081,7 @@ static struct inode *btrfs_new_inode(str
 	} else
 		inode->i_gid = current_fsgid();
 
+	inode->i_tag = dx_current_fstag(root->fs_info->sb);
 	inode->i_mode = mode;
 	inode->i_ino = objectid;
 	inode_set_bytes(inode, 0);
@@ -5836,6 +5853,7 @@ static const struct inode_operations btr
 	.listxattr	= btrfs_listxattr,
 	.removexattr	= btrfs_removexattr,
 	.permission	= btrfs_permission,
+	.sync_flags	= btrfs_sync_flags,
 };
 static const struct inode_operations btrfs_dir_ro_inode_operations = {
 	.lookup		= btrfs_lookup,
@@ -5911,6 +5929,7 @@ static const struct inode_operations btr
 	.permission	= btrfs_permission,
 	.fallocate	= btrfs_fallocate,
 	.fiemap		= btrfs_fiemap,
+	.sync_flags	= btrfs_sync_flags,
 };
 static const struct inode_operations btrfs_special_inode_operations = {
 	.getattr	= btrfs_getattr,
diff -NurpP --minimal linux-2.6.32.17/fs/btrfs/ioctl.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/btrfs/ioctl.c
--- linux-2.6.32.17/fs/btrfs/ioctl.c	2010-08-09 18:04:13.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/btrfs/ioctl.c	2010-08-09 18:06:32.000000000 +0200
@@ -67,10 +67,13 @@ static unsigned int btrfs_flags_to_ioctl
 {
 	unsigned int iflags = 0;
 
-	if (flags & BTRFS_INODE_SYNC)
-		iflags |= FS_SYNC_FL;
 	if (flags & BTRFS_INODE_IMMUTABLE)
 		iflags |= FS_IMMUTABLE_FL;
+	if (flags & BTRFS_INODE_IXUNLINK)
+		iflags |= FS_IXUNLINK_FL;
+
+	if (flags & BTRFS_INODE_SYNC)
+		iflags |= FS_SYNC_FL;
 	if (flags & BTRFS_INODE_APPEND)
 		iflags |= FS_APPEND_FL;
 	if (flags & BTRFS_INODE_NODUMP)
@@ -80,28 +83,78 @@ static unsigned int btrfs_flags_to_ioctl
 	if (flags & BTRFS_INODE_DIRSYNC)
 		iflags |= FS_DIRSYNC_FL;
 
+	if (flags & BTRFS_INODE_BARRIER)
+		iflags |= FS_BARRIER_FL;
+	if (flags & BTRFS_INODE_COW)
+		iflags |= FS_COW_FL;
 	return iflags;
 }
 
 /*
- * Update inode->i_flags based on the btrfs internal flags.
+ * Update inode->i_(v)flags based on the btrfs internal flags.
  */
 void btrfs_update_iflags(struct inode *inode)
 {
 	struct btrfs_inode *ip = BTRFS_I(inode);
 
-	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+	inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
+		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
 
-	if (ip->flags & BTRFS_INODE_SYNC)
-		inode->i_flags |= S_SYNC;
 	if (ip->flags & BTRFS_INODE_IMMUTABLE)
 		inode->i_flags |= S_IMMUTABLE;
+	if (ip->flags & BTRFS_INODE_IXUNLINK)
+		inode->i_flags |= S_IXUNLINK;
+
+	if (ip->flags & BTRFS_INODE_SYNC)
+		inode->i_flags |= S_SYNC;
 	if (ip->flags & BTRFS_INODE_APPEND)
 		inode->i_flags |= S_APPEND;
 	if (ip->flags & BTRFS_INODE_NOATIME)
 		inode->i_flags |= S_NOATIME;
 	if (ip->flags & BTRFS_INODE_DIRSYNC)
 		inode->i_flags |= S_DIRSYNC;
+
+	inode->i_vflags &= ~(V_BARRIER | V_COW);
+
+	if (ip->flags & BTRFS_INODE_BARRIER)
+		inode->i_vflags |= V_BARRIER;
+	if (ip->flags & BTRFS_INODE_COW)
+		inode->i_vflags |= V_COW;
+}
+
+/*
+ * Update btrfs internal flags from inode->i_(v)flags.
+ */
+void btrfs_update_flags(struct inode *inode)
+{
+	struct btrfs_inode *ip = BTRFS_I(inode);
+
+	unsigned int flags = inode->i_flags;
+	unsigned int vflags = inode->i_vflags;
+
+	ip->flags &= ~(BTRFS_INODE_SYNC | BTRFS_INODE_APPEND |
+			BTRFS_INODE_IMMUTABLE | BTRFS_INODE_IXUNLINK |
+			BTRFS_INODE_NOATIME | BTRFS_INODE_DIRSYNC |
+			BTRFS_INODE_BARRIER | BTRFS_INODE_COW);
+
+	if (flags & S_IMMUTABLE)
+		ip->flags |= BTRFS_INODE_IMMUTABLE;
+	if (flags & S_IXUNLINK)
+		ip->flags |= BTRFS_INODE_IXUNLINK;
+
+	if (flags & S_SYNC)
+		ip->flags |= BTRFS_INODE_SYNC;
+	if (flags & S_APPEND)
+		ip->flags |= BTRFS_INODE_APPEND;
+	if (flags & S_NOATIME)
+		ip->flags |= BTRFS_INODE_NOATIME;
+	if (flags & S_DIRSYNC)
+		ip->flags |= BTRFS_INODE_DIRSYNC;
+
+	if (vflags & V_BARRIER)
+		ip->flags |= BTRFS_INODE_BARRIER;
+	if (vflags & V_COW)
+		ip->flags |= BTRFS_INODE_COW;
 }
 
 /*
@@ -119,7 +172,7 @@ void btrfs_inherit_iflags(struct inode *
 	flags = BTRFS_I(dir)->flags;
 
 	if (S_ISREG(inode->i_mode))
-		flags &= ~BTRFS_INODE_DIRSYNC;
+		flags &= ~(BTRFS_INODE_DIRSYNC | BTRFS_INODE_BARRIER);
 	else if (!S_ISDIR(inode->i_mode))
 		flags &= (BTRFS_INODE_NODUMP | BTRFS_INODE_NOATIME);
 
@@ -127,6 +180,30 @@ void btrfs_inherit_iflags(struct inode *
 	btrfs_update_iflags(inode);
 }
 
+int btrfs_sync_flags(struct inode *inode, int flags, int vflags)
+{
+	struct btrfs_inode *ip = BTRFS_I(inode);
+	struct btrfs_root *root = ip->root;
+	struct btrfs_trans_handle *trans;
+	int ret;
+
+	trans = btrfs_join_transaction(root, 1);
+	BUG_ON(!trans);
+
+	inode->i_flags = flags;
+	inode->i_vflags = vflags;
+	btrfs_update_flags(inode);
+
+	ret = btrfs_update_inode(trans, root, inode);
+	BUG_ON(ret);
+
+	btrfs_update_iflags(inode);
+	inode->i_ctime = CURRENT_TIME;
+	btrfs_end_transaction(trans, root);
+
+	return 0;
+}
+
 static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
 {
 	struct btrfs_inode *ip = BTRFS_I(file->f_path.dentry->d_inode);
@@ -149,6 +226,7 @@ static int btrfs_ioctl_setflags(struct f
 	if (copy_from_user(&flags, arg, sizeof(flags)))
 		return -EFAULT;
 
+	/* maybe add FS_IXUNLINK_FL ? */
 	if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
 		      FS_NOATIME_FL | FS_NODUMP_FL | \
 		      FS_SYNC_FL | FS_DIRSYNC_FL))
@@ -161,7 +239,8 @@ static int btrfs_ioctl_setflags(struct f
 
 	flags = btrfs_mask_flags(inode->i_mode, flags);
 	oldflags = btrfs_flags_to_ioctl(ip->flags);
-	if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
+	if ((flags ^ oldflags) & (FS_APPEND_FL |
+		FS_IMMUTABLE_FL | FS_IXUNLINK_FL)) {
 		if (!capable(CAP_LINUX_IMMUTABLE)) {
 			ret = -EPERM;
 			goto out_unlock;
@@ -172,14 +251,19 @@ static int btrfs_ioctl_setflags(struct f
 	if (ret)
 		goto out_unlock;
 
-	if (flags & FS_SYNC_FL)
-		ip->flags |= BTRFS_INODE_SYNC;
-	else
-		ip->flags &= ~BTRFS_INODE_SYNC;
 	if (flags & FS_IMMUTABLE_FL)
 		ip->flags |= BTRFS_INODE_IMMUTABLE;
 	else
 		ip->flags &= ~BTRFS_INODE_IMMUTABLE;
+	if (flags & FS_IXUNLINK_FL)
+		ip->flags |= BTRFS_INODE_IXUNLINK;
+	else
+		ip->flags &= ~BTRFS_INODE_IXUNLINK;
+
+	if (flags & FS_SYNC_FL)
+		ip->flags |= BTRFS_INODE_SYNC;
+	else
+		ip->flags &= ~BTRFS_INODE_SYNC;
 	if (flags & FS_APPEND_FL)
 		ip->flags |= BTRFS_INODE_APPEND;
 	else
diff -NurpP --minimal linux-2.6.32.17/fs/btrfs/super.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/btrfs/super.c
--- linux-2.6.32.17/fs/btrfs/super.c	2009-12-03 20:02:51.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/btrfs/super.c	2009-12-03 20:04:56.000000000 +0100
@@ -67,7 +67,7 @@ enum {
 	Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier,
 	Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl,
 	Opt_compress, Opt_notreelog, Opt_ratio, Opt_flushoncommit,
-	Opt_discard, Opt_err,
+	Opt_tag, Opt_notag, Opt_tagid, Opt_discard, Opt_err,
 };
 
 static match_table_t tokens = {
@@ -90,6 +90,9 @@ static match_table_t tokens = {
 	{Opt_flushoncommit, "flushoncommit"},
 	{Opt_ratio, "metadata_ratio=%d"},
 	{Opt_discard, "discard"},
+	{Opt_tag, "tag"},
+	{Opt_notag, "notag"},
+	{Opt_tagid, "tagid=%u"},
 	{Opt_err, NULL},
 };
 
@@ -262,6 +265,22 @@ int btrfs_parse_options(struct btrfs_roo
 		case Opt_discard:
 			btrfs_set_opt(info->mount_opt, DISCARD);
 			break;
+#ifndef CONFIG_TAGGING_NONE
+		case Opt_tag:
+			printk(KERN_INFO "btrfs: use tagging\n");
+			btrfs_set_opt(info->mount_opt, TAGGED);
+			break;
+		case Opt_notag:
+			printk(KERN_INFO "btrfs: disabled tagging\n");
+			btrfs_clear_opt(info->mount_opt, TAGGED);
+			break;
+#endif
+#ifdef CONFIG_PROPAGATE
+		case Opt_tagid:
+			/* use args[0] */
+			btrfs_set_opt(info->mount_opt, TAGGED);
+			break;
+#endif
 		default:
 			break;
 		}
@@ -575,6 +594,12 @@ static int btrfs_remount(struct super_bl
 	if (ret)
 		return -EINVAL;
 
+	if (btrfs_test_opt(root, TAGGED) && !(sb->s_flags & MS_TAGGED)) {
+		printk("btrfs: %s: tagging not permitted on remount.\n",
+			sb->s_id);
+		return -EINVAL;
+	}
+
 	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
 		return 0;
 
diff -NurpP --minimal linux-2.6.32.17/fs/char_dev.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/char_dev.c
--- linux-2.6.32.17/fs/char_dev.c	2009-12-03 20:02:51.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/char_dev.c	2009-12-03 20:04:56.000000000 +0100
@@ -20,6 +20,8 @@
 #include <linux/cdev.h>
 #include <linux/mutex.h>
 #include <linux/backing-dev.h>
+#include <linux/vs_context.h>
+#include <linux/vs_device.h>
 
 #include "internal.h"
 
@@ -368,14 +370,21 @@ static int chrdev_open(struct inode *ino
 	struct cdev *p;
 	struct cdev *new = NULL;
 	int ret = 0;
+	dev_t mdev;
+
+	if (!vs_map_chrdev(inode->i_rdev, &mdev, DATTR_OPEN))
+		return -EPERM;
+	inode->i_mdev = mdev;
 
 	spin_lock(&cdev_lock);
 	p = inode->i_cdev;
 	if (!p) {
 		struct kobject *kobj;
 		int idx;
+
 		spin_unlock(&cdev_lock);
-		kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
+
+		kobj = kobj_lookup(cdev_map, mdev, &idx);
 		if (!kobj)
 			return -ENXIO;
 		new = container_of(kobj, struct cdev, kobj);
diff -NurpP --minimal linux-2.6.32.17/fs/dcache.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/dcache.c
--- linux-2.6.32.17/fs/dcache.c	2009-12-03 20:02:51.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/dcache.c	2009-12-03 20:04:56.000000000 +0100
@@ -33,6 +33,7 @@
 #include <linux/bootmem.h>
 #include <linux/fs_struct.h>
 #include <linux/hardirq.h>
+#include <linux/vs_limit.h>
 #include "internal.h"
 
 int sysctl_vfs_cache_pressure __read_mostly = 100;
@@ -230,6 +231,8 @@ repeat:
 		return;
 	}
 
+	vx_dentry_dec(dentry);
+
 	/*
 	 * AV: ->d_delete() is _NOT_ allowed to block now.
 	 */
@@ -321,6 +324,7 @@ static inline struct dentry * __dget_loc
 {
 	atomic_inc(&dentry->d_count);
 	dentry_lru_del_init(dentry);
+	vx_dentry_inc(dentry);
 	return dentry;
 }
 
@@ -919,6 +923,9 @@ struct dentry *d_alloc(struct dentry * p
 	struct dentry *dentry;
 	char *dname;
 
+	if (!vx_dentry_avail(1))
+		return NULL;
+
 	dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
 	if (!dentry)
 		return NULL;
@@ -964,6 +971,7 @@ struct dentry *d_alloc(struct dentry * p
 	if (parent)
 		list_add(&dentry->d_u.d_child, &parent->d_subdirs);
 	dentry_stat.nr_dentry++;
+	vx_dentry_inc(dentry);
 	spin_unlock(&dcache_lock);
 
 	return dentry;
@@ -1407,6 +1415,7 @@ struct dentry * __d_lookup(struct dentry
 		}
 
 		atomic_inc(&dentry->d_count);
+		vx_dentry_inc(dentry);
 		found = dentry;
 		spin_unlock(&dentry->d_lock);
 		break;
diff -NurpP --minimal linux-2.6.32.17/fs/devpts/inode.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/devpts/inode.c
--- linux-2.6.32.17/fs/devpts/inode.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/devpts/inode.c	2009-12-29 00:36:26.000000000 +0100
@@ -24,6 +24,7 @@
 #include <linux/parser.h>
 #include <linux/fsnotify.h>
 #include <linux/seq_file.h>
+#include <linux/vs_base.h>
 
 #define DEVPTS_DEFAULT_MODE 0600
 /*
@@ -35,6 +36,20 @@
 #define DEVPTS_DEFAULT_PTMX_MODE 0000
 #define PTMX_MINOR	2
 
+static int devpts_permission(struct inode *inode, int mask)
+{
+	int ret = -EACCES;
+
+	/* devpts is xid tagged */
+	if (vx_check((xid_t)inode->i_tag, VS_WATCH_P | VS_IDENT))
+		ret = generic_permission(inode, mask, NULL);
+	return ret;
+}
+
+static struct inode_operations devpts_file_inode_operations = {
+	.permission     = devpts_permission,
+};
+
 extern int pty_limit;			/* Config limit on Unix98 ptys */
 static DEFINE_MUTEX(allocated_ptys_lock);
 
@@ -262,6 +277,25 @@ static int devpts_show_options(struct se
 	return 0;
 }
 
+static int devpts_filter(struct dentry *de)
+{
+	/* devpts is xid tagged */
+	return vx_check((xid_t)de->d_inode->i_tag, VS_WATCH_P | VS_IDENT);
+}
+
+static int devpts_readdir(struct file * filp, void * dirent, filldir_t filldir)
+{
+	return dcache_readdir_filter(filp, dirent, filldir, devpts_filter);
+}
+
+static struct file_operations devpts_dir_operations = {
+	.open		= dcache_dir_open,
+	.release	= dcache_dir_close,
+	.llseek		= dcache_dir_lseek,
+	.read		= generic_read_dir,
+	.readdir	= devpts_readdir,
+};
+
 static const struct super_operations devpts_sops = {
 	.statfs		= simple_statfs,
 	.remount_fs	= devpts_remount,
@@ -301,12 +335,15 @@ devpts_fill_super(struct super_block *s,
 	inode = new_inode(s);
 	if (!inode)
 		goto free_fsi;
+
 	inode->i_ino = 1;
 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
 	inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
 	inode->i_op = &simple_dir_inode_operations;
-	inode->i_fop = &simple_dir_operations;
+	inode->i_fop = &devpts_dir_operations;
 	inode->i_nlink = 2;
+	/* devpts is xid tagged */
+	inode->i_tag = (tag_t)vx_current_xid();
 
 	s->s_root = d_alloc_root(inode);
 	if (s->s_root)
@@ -497,6 +534,9 @@ int devpts_pty_new(struct inode *ptmx_in
 	inode->i_gid = opts->setgid ? opts->gid : current_fsgid();
 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
 	init_special_inode(inode, S_IFCHR|opts->mode, device);
+	/* devpts is xid tagged */
+	inode->i_tag = (tag_t)vx_current_xid();
+	inode->i_op = &devpts_file_inode_operations;
 	inode->i_private = tty;
 	tty->driver_data = inode;
 
diff -NurpP --minimal linux-2.6.32.17/fs/exec.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/exec.c
--- linux-2.6.32.17/fs/exec.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/exec.c	2010-05-27 19:21:36.000000000 +0200
@@ -251,7 +251,9 @@ static int __bprm_mm_init(struct linux_b
 	if (err)
 		goto err;
 
-	mm->stack_vm = mm->total_vm = 1;
+	mm->total_vm = 0;
+	vx_vmpages_inc(mm);
+	mm->stack_vm = 1;
 	up_write(&mm->mmap_sem);
 	bprm->p = vma->vm_end - sizeof(void *);
 	return 0;
@@ -1494,7 +1496,7 @@ static int format_corename(char *corenam
 			/* UNIX time of coredump */
 			case 't': {
 				struct timeval tv;
-				do_gettimeofday(&tv);
+				vx_gettimeofday(&tv);
 				rc = snprintf(out_ptr, out_end - out_ptr,
 					      "%lu", tv.tv_sec);
 				if (rc > out_end - out_ptr)
diff -NurpP --minimal linux-2.6.32.17/fs/ext2/balloc.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext2/balloc.c
--- linux-2.6.32.17/fs/ext2/balloc.c	2009-06-11 17:13:03.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext2/balloc.c	2009-12-03 20:04:56.000000000 +0100
@@ -701,7 +701,6 @@ ext2_try_to_allocate(struct super_block 
 			start = 0;
 		end = EXT2_BLOCKS_PER_GROUP(sb);
 	}
-
 	BUG_ON(start > EXT2_BLOCKS_PER_GROUP(sb));
 
 repeat:
diff -NurpP --minimal linux-2.6.32.17/fs/ext2/ext2.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext2/ext2.h
--- linux-2.6.32.17/fs/ext2/ext2.h	2009-09-10 15:26:21.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext2/ext2.h	2009-12-03 20:04:56.000000000 +0100
@@ -131,6 +131,7 @@ extern int ext2_fiemap(struct inode *ino
 int __ext2_write_begin(struct file *file, struct address_space *mapping,
 		loff_t pos, unsigned len, unsigned flags,
 		struct page **pagep, void **fsdata);
+extern int ext2_sync_flags(struct inode *, int, int);
 
 /* ioctl.c */
 extern long ext2_ioctl(struct file *, unsigned int, unsigned long);
diff -NurpP --minimal linux-2.6.32.17/fs/ext2/file.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext2/file.c
--- linux-2.6.32.17/fs/ext2/file.c	2009-12-03 20:02:51.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext2/file.c	2009-12-03 20:04:56.000000000 +0100
@@ -87,4 +87,5 @@ const struct inode_operations ext2_file_
 	.setattr	= ext2_setattr,
 	.check_acl	= ext2_check_acl,
 	.fiemap		= ext2_fiemap,
+	.sync_flags	= ext2_sync_flags,
 };
diff -NurpP --minimal linux-2.6.32.17/fs/ext2/ialloc.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext2/ialloc.c
--- linux-2.6.32.17/fs/ext2/ialloc.c	2009-06-11 17:13:03.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext2/ialloc.c	2009-12-03 20:04:56.000000000 +0100
@@ -17,6 +17,7 @@
 #include <linux/backing-dev.h>
 #include <linux/buffer_head.h>
 #include <linux/random.h>
+#include <linux/vs_tag.h>
 #include "ext2.h"
 #include "xattr.h"
 #include "acl.h"
@@ -560,6 +561,7 @@ got:
 	} else
 		inode->i_gid = current_fsgid();
 	inode->i_mode = mode;
+	inode->i_tag = dx_current_fstag(sb);
 
 	inode->i_ino = ino;
 	inode->i_blocks = 0;
diff -NurpP --minimal linux-2.6.32.17/fs/ext2/inode.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext2/inode.c
--- linux-2.6.32.17/fs/ext2/inode.c	2009-12-03 20:02:51.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext2/inode.c	2009-12-03 20:04:56.000000000 +0100
@@ -33,6 +33,7 @@
 #include <linux/mpage.h>
 #include <linux/fiemap.h>
 #include <linux/namei.h>
+#include <linux/vs_tag.h>
 #include "ext2.h"
 #include "acl.h"
 #include "xip.h"
@@ -1040,7 +1041,7 @@ void ext2_truncate(struct inode *inode)
 		return;
 	if (ext2_inode_is_fast_symlink(inode))
 		return;
-	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+	if (IS_APPEND(inode) || IS_IXORUNLINK(inode))
 		return;
 
 	blocksize = inode->i_sb->s_blocksize;
@@ -1178,36 +1179,61 @@ void ext2_set_inode_flags(struct inode *
 {
 	unsigned int flags = EXT2_I(inode)->i_flags;
 
-	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+	inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
+		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
+
+
+	if (flags & EXT2_IMMUTABLE_FL)
+		inode->i_flags |= S_IMMUTABLE;
+	if (flags & EXT2_IXUNLINK_FL)
+		inode->i_flags |= S_IXUNLINK;
+
 	if (flags & EXT2_SYNC_FL)
 		inode->i_flags |= S_SYNC;
 	if (flags & EXT2_APPEND_FL)
 		inode->i_flags |= S_APPEND;
-	if (flags & EXT2_IMMUTABLE_FL)
-		inode->i_flags |= S_IMMUTABLE;
 	if (flags & EXT2_NOATIME_FL)
 		inode->i_flags |= S_NOATIME;
 	if (flags & EXT2_DIRSYNC_FL)
 		inode->i_flags |= S_DIRSYNC;
+
+	inode->i_vflags &= ~(V_BARRIER | V_COW);
+
+	if (flags & EXT2_BARRIER_FL)
+		inode->i_vflags |= V_BARRIER;
+	if (flags & EXT2_COW_FL)
+		inode->i_vflags |= V_COW;
 }
 
 /* Propagate flags from i_flags to EXT2_I(inode)->i_flags */
 void ext2_get_inode_flags(struct ext2_inode_info *ei)
 {
 	unsigned int flags = ei->vfs_inode.i_flags;
+	unsigned int vflags = ei->vfs_inode.i_vflags;
+
+	ei->i_flags &= ~(EXT2_SYNC_FL | EXT2_APPEND_FL |
+			EXT2_IMMUTABLE_FL | EXT2_IXUNLINK_FL |
+			EXT2_NOATIME_FL | EXT2_DIRSYNC_FL |
+			EXT2_BARRIER_FL | EXT2_COW_FL);
+
+	if (flags & S_IMMUTABLE)
+		ei->i_flags |= EXT2_IMMUTABLE_FL;
+	if (flags & S_IXUNLINK)
+		ei->i_flags |= EXT2_IXUNLINK_FL;
 
-	ei->i_flags &= ~(EXT2_SYNC_FL|EXT2_APPEND_FL|
-			EXT2_IMMUTABLE_FL|EXT2_NOATIME_FL|EXT2_DIRSYNC_FL);
 	if (flags & S_SYNC)
 		ei->i_flags |= EXT2_SYNC_FL;
 	if (flags & S_APPEND)
 		ei->i_flags |= EXT2_APPEND_FL;
-	if (flags & S_IMMUTABLE)
-		ei->i_flags |= EXT2_IMMUTABLE_FL;
 	if (flags & S_NOATIME)
 		ei->i_flags |= EXT2_NOATIME_FL;
 	if (flags & S_DIRSYNC)
 		ei->i_flags |= EXT2_DIRSYNC_FL;
+
+	if (vflags & V_BARRIER)
+		ei->i_flags |= EXT2_BARRIER_FL;
+	if (vflags & V_COW)
+		ei->i_flags |= EXT2_COW_FL;
 }
 
 struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
@@ -1217,6 +1243,8 @@ struct inode *ext2_iget (struct super_bl
 	struct ext2_inode *raw_inode;
 	struct inode *inode;
 	long ret = -EIO;
+	uid_t uid;
+	gid_t gid;
 	int n;
 
 	inode = iget_locked(sb, ino);
@@ -1235,12 +1263,17 @@ struct inode *ext2_iget (struct super_bl
 	}
 
 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
-	inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
-	inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
+	uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
+	gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
 	if (!(test_opt (inode->i_sb, NO_UID32))) {
-		inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
-		inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
+		uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
+		gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
 	}
+	inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
+	inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
+	inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid,
+		le16_to_cpu(raw_inode->i_raw_tag));
+
 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
 	inode->i_size = le32_to_cpu(raw_inode->i_size);
 	inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
@@ -1338,8 +1371,8 @@ int ext2_write_inode(struct inode *inode
 	struct ext2_inode_info *ei = EXT2_I(inode);
 	struct super_block *sb = inode->i_sb;
 	ino_t ino = inode->i_ino;
-	uid_t uid = inode->i_uid;
-	gid_t gid = inode->i_gid;
+	uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag);
+	gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag);
 	struct buffer_head * bh;
 	struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
 	int n;
@@ -1375,6 +1408,9 @@ int ext2_write_inode(struct inode *inode
 		raw_inode->i_uid_high = 0;
 		raw_inode->i_gid_high = 0;
 	}
+#ifdef CONFIG_TAGGING_INTERN
+	raw_inode->i_raw_tag = cpu_to_le16(inode->i_tag);
+#endif
 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
 	raw_inode->i_size = cpu_to_le32(inode->i_size);
 	raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
@@ -1456,7 +1492,8 @@ int ext2_setattr(struct dentry *dentry, 
 	if (error)
 		return error;
 	if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
-	    (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
+	    (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) ||
+	    (iattr->ia_valid & ATTR_TAG && iattr->ia_tag != inode->i_tag)) {
 		error = vfs_dq_transfer(inode, iattr) ? -EDQUOT : 0;
 		if (error)
 			return error;
diff -NurpP --minimal linux-2.6.32.17/fs/ext2/ioctl.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext2/ioctl.c
--- linux-2.6.32.17/fs/ext2/ioctl.c	2009-09-10 15:26:21.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext2/ioctl.c	2009-12-03 20:04:56.000000000 +0100
@@ -17,6 +17,16 @@
 #include <asm/uaccess.h>
 
 
+int ext2_sync_flags(struct inode *inode, int flags, int vflags)
+{
+	inode->i_flags = flags;
+	inode->i_vflags = vflags;
+	ext2_get_inode_flags(EXT2_I(inode));
+	inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+	return 0;
+}
+
 long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	struct inode *inode = filp->f_dentry->d_inode;
@@ -51,6 +61,11 @@ long ext2_ioctl(struct file *filp, unsig
 
 		flags = ext2_mask_flags(inode->i_mode, flags);
 
+		if (IS_BARRIER(inode)) {
+			vxwprintk_task(1, "messing with the barrier.");
+			return -EACCES;
+		}
+
 		mutex_lock(&inode->i_mutex);
 		/* Is it quota file? Do not allow user to mess with it */
 		if (IS_NOQUOTA(inode)) {
@@ -66,7 +81,9 @@ long ext2_ioctl(struct file *filp, unsig
 		 *
 		 * This test looks nicer. Thanks to Pauline Middelink
 		 */
-		if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) {
+		if ((oldflags & EXT2_IMMUTABLE_FL) ||
+			((flags ^ oldflags) & (EXT2_APPEND_FL |
+			EXT2_IMMUTABLE_FL | EXT2_IXUNLINK_FL))) {
 			if (!capable(CAP_LINUX_IMMUTABLE)) {
 				mutex_unlock(&inode->i_mutex);
 				ret = -EPERM;
@@ -74,7 +91,7 @@ long ext2_ioctl(struct file *filp, unsig
 			}
 		}
 
-		flags = flags & EXT2_FL_USER_MODIFIABLE;
+		flags &= EXT2_FL_USER_MODIFIABLE;
 		flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE;
 		ei->i_flags = flags;
 		mutex_unlock(&inode->i_mutex);
diff -NurpP --minimal linux-2.6.32.17/fs/ext2/namei.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext2/namei.c
--- linux-2.6.32.17/fs/ext2/namei.c	2009-12-03 20:02:51.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext2/namei.c	2009-12-03 20:04:56.000000000 +0100
@@ -31,6 +31,7 @@
  */
 
 #include <linux/pagemap.h>
+#include <linux/vs_tag.h>
 #include "ext2.h"
 #include "xattr.h"
 #include "acl.h"
@@ -74,6 +75,7 @@ static struct dentry *ext2_lookup(struct
 				return ERR_PTR(-EIO);
 			} else {
 				return ERR_CAST(inode);
+		dx_propagate_tag(nd, inode);
 			}
 		}
 	}
@@ -401,6 +403,7 @@ const struct inode_operations ext2_dir_i
 #endif
 	.setattr	= ext2_setattr,
 	.check_acl	= ext2_check_acl,
+	.sync_flags	= ext2_sync_flags,
 };
 
 const struct inode_operations ext2_special_inode_operations = {
diff -NurpP --minimal linux-2.6.32.17/fs/ext2/super.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext2/super.c
--- linux-2.6.32.17/fs/ext2/super.c	2009-09-10 15:26:21.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext2/super.c	2009-12-03 20:04:56.000000000 +0100
@@ -382,7 +382,8 @@ enum {
 	Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug,
 	Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr,
 	Opt_acl, Opt_noacl, Opt_xip, Opt_ignore, Opt_err, Opt_quota,
-	Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation
+	Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation,
+	Opt_tag, Opt_notag, Opt_tagid
 };
 
 static const match_table_t tokens = {
@@ -410,6 +411,9 @@ static const match_table_t tokens = {
 	{Opt_acl, "acl"},
 	{Opt_noacl, "noacl"},
 	{Opt_xip, "xip"},
+	{Opt_tag, "tag"},
+	{Opt_notag, "notag"},
+	{Opt_tagid, "tagid=%u"},
 	{Opt_grpquota, "grpquota"},
 	{Opt_ignore, "noquota"},
 	{Opt_quota, "quota"},
@@ -480,6 +484,20 @@ static int parse_options (char * options
 		case Opt_nouid32:
 			set_opt (sbi->s_mount_opt, NO_UID32);
 			break;
+#ifndef CONFIG_TAGGING_NONE
+		case Opt_tag:
+			set_opt (sbi->s_mount_opt, TAGGED);
+			break;
+		case Opt_notag:
+			clear_opt (sbi->s_mount_opt, TAGGED);
+			break;
+#endif
+#ifdef CONFIG_PROPAGATE
+		case Opt_tagid:
+			/* use args[0] */
+			set_opt (sbi->s_mount_opt, TAGGED);
+			break;
+#endif
 		case Opt_nocheck:
 			clear_opt (sbi->s_mount_opt, CHECK);
 			break;
@@ -829,6 +847,8 @@ static int ext2_fill_super(struct super_
 	if (!parse_options ((char *) data, sbi))
 		goto failed_mount;
 
+	if (EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_TAGGED)
+		sb->s_flags |= MS_TAGGED;
 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 		((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
 		 MS_POSIXACL : 0);
@@ -1175,6 +1195,14 @@ static int ext2_remount (struct super_bl
 		goto restore_opts;
 	}
 
+	if ((sbi->s_mount_opt & EXT2_MOUNT_TAGGED) &&
+		!(sb->s_flags & MS_TAGGED)) {
+		printk("EXT2-fs: %s: tagging not permitted on remount.\n",
+		       sb->s_id);
+		err = -EINVAL;
+		goto restore_opts;
+	}
+
 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 		((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
 
diff -NurpP --minimal linux-2.6.32.17/fs/ext3/file.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext3/file.c
--- linux-2.6.32.17/fs/ext3/file.c	2009-12-03 20:02:51.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext3/file.c	2009-12-03 20:04:56.000000000 +0100
@@ -80,5 +80,6 @@ const struct inode_operations ext3_file_
 #endif
 	.check_acl	= ext3_check_acl,
 	.fiemap		= ext3_fiemap,
+	.sync_flags	= ext3_sync_flags,
 };
 
diff -NurpP --minimal linux-2.6.32.17/fs/ext3/ialloc.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext3/ialloc.c
--- linux-2.6.32.17/fs/ext3/ialloc.c	2009-09-10 15:26:21.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext3/ialloc.c	2009-12-03 20:04:56.000000000 +0100
@@ -23,6 +23,7 @@
 #include <linux/buffer_head.h>
 #include <linux/random.h>
 #include <linux/bitops.h>
+#include <linux/vs_tag.h>
 
 #include <asm/byteorder.h>
 
@@ -548,6 +549,7 @@ got:
 	} else
 		inode->i_gid = current_fsgid();
 	inode->i_mode = mode;
+	inode->i_tag = dx_current_fstag(sb);
 
 	inode->i_ino = ino;
 	/* This is the optimal IO size (for stat), not the fs block size */
diff -NurpP --minimal linux-2.6.32.17/fs/ext3/inode.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext3/inode.c
--- linux-2.6.32.17/fs/ext3/inode.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext3/inode.c	2009-12-29 00:36:26.000000000 +0100
@@ -38,6 +38,7 @@
 #include <linux/bio.h>
 #include <linux/fiemap.h>
 #include <linux/namei.h>
+#include <linux/vs_tag.h>
 #include "xattr.h"
 #include "acl.h"
 
@@ -2343,7 +2344,7 @@ static void ext3_free_branches(handle_t 
 
 int ext3_can_truncate(struct inode *inode)
 {
-	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+	if (IS_APPEND(inode) || IS_IXORUNLINK(inode))
 		return 0;
 	if (S_ISREG(inode->i_mode))
 		return 1;
@@ -2728,36 +2729,60 @@ void ext3_set_inode_flags(struct inode *
 {
 	unsigned int flags = EXT3_I(inode)->i_flags;
 
-	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+	inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
+		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
+
+	if (flags & EXT3_IMMUTABLE_FL)
+		inode->i_flags |= S_IMMUTABLE;
+	if (flags & EXT3_IXUNLINK_FL)
+		inode->i_flags |= S_IXUNLINK;
+
 	if (flags & EXT3_SYNC_FL)
 		inode->i_flags |= S_SYNC;
 	if (flags & EXT3_APPEND_FL)
 		inode->i_flags |= S_APPEND;
-	if (flags & EXT3_IMMUTABLE_FL)
-		inode->i_flags |= S_IMMUTABLE;
 	if (flags & EXT3_NOATIME_FL)
 		inode->i_flags |= S_NOATIME;
 	if (flags & EXT3_DIRSYNC_FL)
 		inode->i_flags |= S_DIRSYNC;
+
+	inode->i_vflags &= ~(V_BARRIER | V_COW);
+
+	if (flags & EXT3_BARRIER_FL)
+		inode->i_vflags |= V_BARRIER;
+	if (flags & EXT3_COW_FL)
+		inode->i_vflags |= V_COW;
 }
 
 /* Propagate flags from i_flags to EXT3_I(inode)->i_flags */
 void ext3_get_inode_flags(struct ext3_inode_info *ei)
 {
 	unsigned int flags = ei->vfs_inode.i_flags;
+	unsigned int vflags = ei->vfs_inode.i_vflags;
+
+	ei->i_flags &= ~(EXT3_SYNC_FL | EXT3_APPEND_FL |
+			EXT3_IMMUTABLE_FL | EXT3_IXUNLINK_FL |
+			EXT3_NOATIME_FL | EXT3_DIRSYNC_FL |
+			EXT3_BARRIER_FL | EXT3_COW_FL);
+
+	if (flags & S_IMMUTABLE)
+		ei->i_flags |= EXT3_IMMUTABLE_FL;
+	if (flags & S_IXUNLINK)
+		ei->i_flags |= EXT3_IXUNLINK_FL;
 
-	ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
-			EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
 	if (flags & S_SYNC)
 		ei->i_flags |= EXT3_SYNC_FL;
 	if (flags & S_APPEND)
 		ei->i_flags |= EXT3_APPEND_FL;
-	if (flags & S_IMMUTABLE)
-		ei->i_flags |= EXT3_IMMUTABLE_FL;
 	if (flags & S_NOATIME)
 		ei->i_flags |= EXT3_NOATIME_FL;
 	if (flags & S_DIRSYNC)
 		ei->i_flags |= EXT3_DIRSYNC_FL;
+
+	if (vflags & V_BARRIER)
+		ei->i_flags |= EXT3_BARRIER_FL;
+	if (vflags & V_COW)
+		ei->i_flags |= EXT3_COW_FL;
 }
 
 struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
@@ -2771,6 +2796,8 @@ struct inode *ext3_iget(struct super_blo
 	transaction_t *transaction;
 	long ret;
 	int block;
+	uid_t uid;
+	gid_t gid;
 
 	inode = iget_locked(sb, ino);
 	if (!inode)
@@ -2787,12 +2814,17 @@ struct inode *ext3_iget(struct super_blo
 	bh = iloc.bh;
 	raw_inode = ext3_raw_inode(&iloc);
 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
-	inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
-	inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
+	uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
+	gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
 	if(!(test_opt (inode->i_sb, NO_UID32))) {
-		inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
-		inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
+		uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
+		gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
 	}
+	inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
+	inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
+	inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid,
+		le16_to_cpu(raw_inode->i_raw_tag));
+
 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
 	inode->i_size = le32_to_cpu(raw_inode->i_size);
 	inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
@@ -2947,6 +2979,8 @@ static int ext3_do_update_inode(handle_t
 	struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
 	struct ext3_inode_info *ei = EXT3_I(inode);
 	struct buffer_head *bh = iloc->bh;
+	uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag);
+	gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag);
 	int err = 0, rc, block;
 
 again:
@@ -2961,29 +2995,32 @@ again:
 	ext3_get_inode_flags(ei);
 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
 	if(!(test_opt(inode->i_sb, NO_UID32))) {
-		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
-		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
+		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
+		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
 /*
  * Fix up interoperability with old kernels. Otherwise, old inodes get
  * re-used with the upper 16 bits of the uid/gid intact
  */
 		if(!ei->i_dtime) {
 			raw_inode->i_uid_high =
-				cpu_to_le16(high_16_bits(inode->i_uid));
+				cpu_to_le16(high_16_bits(uid));
 			raw_inode->i_gid_high =
-				cpu_to_le16(high_16_bits(inode->i_gid));
+				cpu_to_le16(high_16_bits(gid));
 		} else {
 			raw_inode->i_uid_high = 0;
 			raw_inode->i_gid_high = 0;
 		}
 	} else {
 		raw_inode->i_uid_low =
-			cpu_to_le16(fs_high2lowuid(inode->i_uid));
+			cpu_to_le16(fs_high2lowuid(uid));
 		raw_inode->i_gid_low =
-			cpu_to_le16(fs_high2lowgid(inode->i_gid));
+			cpu_to_le16(fs_high2lowgid(gid));
 		raw_inode->i_uid_high = 0;
 		raw_inode->i_gid_high = 0;
 	}
+#ifdef CONFIG_TAGGING_INTERN
+	raw_inode->i_raw_tag = cpu_to_le16(inode->i_tag);
+#endif
 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
 	raw_inode->i_size = cpu_to_le32(ei->i_disksize);
 	raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
@@ -3141,7 +3178,8 @@ int ext3_setattr(struct dentry *dentry, 
 		return error;
 
 	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
-		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
+		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) ||
+		(ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag)) {
 		handle_t *handle;
 
 		/* (user+group)*(old+new) structure, inode write (sb,
@@ -3163,6 +3201,8 @@ int ext3_setattr(struct dentry *dentry, 
 			inode->i_uid = attr->ia_uid;
 		if (attr->ia_valid & ATTR_GID)
 			inode->i_gid = attr->ia_gid;
+		if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode))
+			inode->i_tag = attr->ia_tag;
 		error = ext3_mark_inode_dirty(handle, inode);
 		ext3_journal_stop(handle);
 	}
diff -NurpP --minimal linux-2.6.32.17/fs/ext3/ioctl.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext3/ioctl.c
--- linux-2.6.32.17/fs/ext3/ioctl.c	2009-06-11 17:13:03.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext3/ioctl.c	2009-12-03 20:04:56.000000000 +0100
@@ -8,6 +8,7 @@
  */
 
 #include <linux/fs.h>
+#include <linux/mount.h>
 #include <linux/jbd.h>
 #include <linux/capability.h>
 #include <linux/ext3_fs.h>
@@ -17,6 +18,34 @@
 #include <linux/compat.h>
 #include <asm/uaccess.h>
 
+
+int ext3_sync_flags(struct inode *inode, int flags, int vflags)
+{
+	handle_t *handle = NULL;
+	struct ext3_iloc iloc;
+	int err;
+
+	handle = ext3_journal_start(inode, 1);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	if (IS_SYNC(inode))
+		handle->h_sync = 1;
+	err = ext3_reserve_inode_write(handle, inode, &iloc);
+	if (err)
+		goto flags_err;
+
+	inode->i_flags = flags;
+	inode->i_vflags = vflags;
+	ext3_get_inode_flags(EXT3_I(inode));
+	inode->i_ctime = CURRENT_TIME_SEC;
+
+	err = ext3_mark_iloc_dirty(handle, inode, &iloc);
+flags_err:
+	ext3_journal_stop(handle);
+	return err;
+}
+
 long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	struct inode *inode = filp->f_dentry->d_inode;
@@ -50,6 +79,11 @@ long ext3_ioctl(struct file *filp, unsig
 
 		flags = ext3_mask_flags(inode->i_mode, flags);
 
+		if (IS_BARRIER(inode)) {
+			vxwprintk_task(1, "messing with the barrier.");
+			return -EACCES;
+		}
+
 		mutex_lock(&inode->i_mutex);
 
 		/* Is it quota file? Do not allow user to mess with it */
@@ -68,7 +102,9 @@ long ext3_ioctl(struct file *filp, unsig
 		 *
 		 * This test looks nicer. Thanks to Pauline Middelink
 		 */
-		if ((flags ^ oldflags) & (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL)) {
+		if ((oldflags & EXT3_IMMUTABLE_FL) ||
+			((flags ^ oldflags) & (EXT3_APPEND_FL |
+			EXT3_IMMUTABLE_FL | EXT3_IXUNLINK_FL))) {
 			if (!capable(CAP_LINUX_IMMUTABLE))
 				goto flags_out;
 		}
@@ -93,7 +129,7 @@ long ext3_ioctl(struct file *filp, unsig
 		if (err)
 			goto flags_err;
 
-		flags = flags & EXT3_FL_USER_MODIFIABLE;
+		flags &= EXT3_FL_USER_MODIFIABLE;
 		flags |= oldflags & ~EXT3_FL_USER_MODIFIABLE;
 		ei->i_flags = flags;
 
diff -NurpP --minimal linux-2.6.32.17/fs/ext3/namei.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext3/namei.c
--- linux-2.6.32.17/fs/ext3/namei.c	2009-12-03 20:02:51.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext3/namei.c	2009-12-03 20:04:56.000000000 +0100
@@ -36,6 +36,7 @@
 #include <linux/quotaops.h>
 #include <linux/buffer_head.h>
 #include <linux/bio.h>
+#include <linux/vs_tag.h>
 
 #include "namei.h"
 #include "xattr.h"
@@ -912,6 +913,7 @@ restart:
 				if (bh)
 					ll_rw_block(READ_META, 1, &bh);
 			}
+		dx_propagate_tag(nd, inode);
 		}
 		if ((bh = bh_use[ra_ptr++]) == NULL)
 			goto next;
@@ -2446,6 +2448,7 @@ const struct inode_operations ext3_dir_i
 	.removexattr	= generic_removexattr,
 #endif
 	.check_acl	= ext3_check_acl,
+	.sync_flags	= ext3_sync_flags,
 };
 
 const struct inode_operations ext3_special_inode_operations = {
diff -NurpP --minimal linux-2.6.32.17/fs/ext3/super.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext3/super.c
--- linux-2.6.32.17/fs/ext3/super.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext3/super.c	2010-04-28 10:19:32.000000000 +0200
@@ -789,7 +789,7 @@ enum {
 	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
 	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
 	Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
-	Opt_grpquota
+	Opt_grpquota, Opt_tag, Opt_notag, Opt_tagid
 };
 
 static const match_table_t tokens = {
@@ -842,6 +842,9 @@ static const match_table_t tokens = {
 	{Opt_usrquota, "usrquota"},
 	{Opt_barrier, "barrier=%u"},
 	{Opt_resize, "resize"},
+	{Opt_tag, "tag"},
+	{Opt_notag, "notag"},
+	{Opt_tagid, "tagid=%u"},
 	{Opt_err, NULL},
 };
 
@@ -934,6 +937,20 @@ static int parse_options (char *options,
 		case Opt_nouid32:
 			set_opt (sbi->s_mount_opt, NO_UID32);
 			break;
+#ifndef CONFIG_TAGGING_NONE
+		case Opt_tag:
+			set_opt (sbi->s_mount_opt, TAGGED);
+			break;
+		case Opt_notag:
+			clear_opt (sbi->s_mount_opt, TAGGED);
+			break;
+#endif
+#ifdef CONFIG_PROPAGATE
+		case Opt_tagid:
+			/* use args[0] */
+			set_opt (sbi->s_mount_opt, TAGGED);
+			break;
+#endif
 		case Opt_nocheck:
 			clear_opt (sbi->s_mount_opt, CHECK);
 			break;
@@ -1658,6 +1675,9 @@ static int ext3_fill_super (struct super
 			    NULL, 0))
 		goto failed_mount;
 
+	if (EXT3_SB(sb)->s_mount_opt & EXT3_MOUNT_TAGGED)
+		sb->s_flags |= MS_TAGGED;
+
 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 		((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
 
@@ -2527,6 +2547,14 @@ static int ext3_remount (struct super_bl
 	if (sbi->s_mount_opt & EXT3_MOUNT_ABORT)
 		ext3_abort(sb, __func__, "Abort forced by user");
 
+	if ((sbi->s_mount_opt & EXT3_MOUNT_TAGGED) &&
+		!(sb->s_flags & MS_TAGGED)) {
+		printk("EXT3-fs: %s: tagging not permitted on remount.\n",
+			sb->s_id);
+		err = -EINVAL;
+		goto restore_opts;
+	}
+
 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 		((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
 
diff -NurpP --minimal linux-2.6.32.17/fs/ext4/ext4.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext4/ext4.h
--- linux-2.6.32.17/fs/ext4/ext4.h	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext4/ext4.h	2010-08-09 18:08:03.000000000 +0200
@@ -289,8 +289,12 @@ struct flex_groups {
 #define EXT4_EXTENTS_FL			0x00080000 /* Inode uses extents */
 #define EXT4_EA_INODE_FL	        0x00200000 /* Inode used for large EA */
 #define EXT4_EOFBLOCKS_FL		0x00400000 /* Blocks allocated beyond EOF */
+#define EXT4_IXUNLINK_FL		0x08000000 /* Immutable invert on unlink */
 #define EXT4_RESERVED_FL		0x80000000 /* reserved for ext4 lib */
 
+#define EXT4_BARRIER_FL			0x04000000 /* Barrier for chroot() */
+#define EXT4_COW_FL			0x20000000 /* Copy on Write marker */
+
 #define EXT4_FL_USER_VISIBLE		0x004BDFFF /* User visible flags */
 #define EXT4_FL_USER_MODIFIABLE		0x004B80FF /* User modifiable flags */
 
@@ -552,7 +556,8 @@ struct ext4_inode {
 			__le16	l_i_file_acl_high;
 			__le16	l_i_uid_high;	/* these 2 fields */
 			__le16	l_i_gid_high;	/* were reserved2[0] */
-			__u32	l_i_reserved2;
+			__le16	l_i_tag;	/* Context Tag */
+			__u16	l_i_reserved2;
 		} linux2;
 		struct {
 			__le16	h_i_reserved1;	/* Obsoleted fragment number/size which are removed in ext4 */
@@ -666,6 +671,7 @@ do {									       \
 #define i_gid_low	i_gid
 #define i_uid_high	osd2.linux2.l_i_uid_high
 #define i_gid_high	osd2.linux2.l_i_gid_high
+#define i_raw_tag	osd2.linux2.l_i_tag
 #define i_reserved2	osd2.linux2.l_i_reserved2
 
 #elif defined(__GNU__)
@@ -840,6 +846,7 @@ struct ext4_inode_info {
 #define EXT4_MOUNT_QUOTA		0x80000 /* Some quota option set */
 #define EXT4_MOUNT_USRQUOTA		0x100000 /* "old" user quota */
 #define EXT4_MOUNT_GRPQUOTA		0x200000 /* "old" group quota */
+#define EXT4_MOUNT_TAGGED		0x400000 /* Enable Context Tags */
 #define EXT4_MOUNT_JOURNAL_CHECKSUM	0x800000 /* Journal checksums */
 #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT	0x1000000 /* Journal Async Commit */
 #define EXT4_MOUNT_I_VERSION            0x2000000 /* i_version support */
@@ -1865,6 +1872,7 @@ extern int ext4_get_blocks(handle_t *han
 			   struct buffer_head *bh, int flags);
 extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 			__u64 start, __u64 len);
+extern int ext4_sync_flags(struct inode *, int, int);
 /* move_extent.c */
 extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
 			     __u64 start_orig, __u64 start_donor,
diff -NurpP --minimal linux-2.6.32.17/fs/ext4/file.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext4/file.c
--- linux-2.6.32.17/fs/ext4/file.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext4/file.c	2010-08-09 18:06:32.000000000 +0200
@@ -161,5 +161,6 @@ const struct inode_operations ext4_file_
 	.check_acl	= ext4_check_acl,
 	.fallocate	= ext4_fallocate,
 	.fiemap		= ext4_fiemap,
+	.sync_flags	= ext4_sync_flags,
 };
 
diff -NurpP --minimal linux-2.6.32.17/fs/ext4/ialloc.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext4/ialloc.c
--- linux-2.6.32.17/fs/ext4/ialloc.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext4/ialloc.c	2010-08-09 18:06:32.000000000 +0200
@@ -22,6 +22,7 @@
 #include <linux/random.h>
 #include <linux/bitops.h>
 #include <linux/blkdev.h>
+#include <linux/vs_tag.h>
 #include <asm/byteorder.h>
 
 #include "ext4.h"
@@ -988,6 +989,7 @@ got:
 	} else
 		inode->i_gid = current_fsgid();
 	inode->i_mode = mode;
+	inode->i_tag = dx_current_fstag(sb);
 
 	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
 	/* This is the optimal IO size (for stat), not the fs block size */
diff -NurpP --minimal linux-2.6.32.17/fs/ext4/inode.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext4/inode.c
--- linux-2.6.32.17/fs/ext4/inode.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext4/inode.c	2010-08-09 18:06:32.000000000 +0200
@@ -38,6 +38,7 @@
 #include <linux/uio.h>
 #include <linux/bio.h>
 #include <linux/workqueue.h>
+#include <linux/vs_tag.h>
 
 #include "ext4_jbd2.h"
 #include "xattr.h"
@@ -4446,7 +4447,7 @@ static void ext4_free_branches(handle_t 
 
 int ext4_can_truncate(struct inode *inode)
 {
-	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+	if (IS_APPEND(inode) || IS_IXORUNLINK(inode))
 		return 0;
 	if (S_ISREG(inode->i_mode))
 		return 1;
@@ -4799,36 +4800,60 @@ void ext4_set_inode_flags(struct inode *
 {
 	unsigned int flags = EXT4_I(inode)->i_flags;
 
-	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+	inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
+		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
+
+	if (flags & EXT4_IMMUTABLE_FL)
+		inode->i_flags |= S_IMMUTABLE;
+	if (flags & EXT4_IXUNLINK_FL)
+		inode->i_flags |= S_IXUNLINK;
+
 	if (flags & EXT4_SYNC_FL)
 		inode->i_flags |= S_SYNC;
 	if (flags & EXT4_APPEND_FL)
 		inode->i_flags |= S_APPEND;
-	if (flags & EXT4_IMMUTABLE_FL)
-		inode->i_flags |= S_IMMUTABLE;
 	if (flags & EXT4_NOATIME_FL)
 		inode->i_flags |= S_NOATIME;
 	if (flags & EXT4_DIRSYNC_FL)
 		inode->i_flags |= S_DIRSYNC;
+
+	inode->i_vflags &= ~(V_BARRIER | V_COW);
+
+	if (flags & EXT4_BARRIER_FL)
+		inode->i_vflags |= V_BARRIER;
+	if (flags & EXT4_COW_FL)
+		inode->i_vflags |= V_COW;
 }
 
 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
 void ext4_get_inode_flags(struct ext4_inode_info *ei)
 {
 	unsigned int flags = ei->vfs_inode.i_flags;
+	unsigned int vflags = ei->vfs_inode.i_vflags;
+
+	ei->i_flags &= ~(EXT4_SYNC_FL | EXT4_APPEND_FL |
+			EXT4_IMMUTABLE_FL | EXT4_IXUNLINK_FL |
+			EXT4_NOATIME_FL | EXT4_DIRSYNC_FL |
+			EXT4_BARRIER_FL | EXT4_COW_FL);
+
+	if (flags & S_IMMUTABLE)
+		ei->i_flags |= EXT4_IMMUTABLE_FL;
+	if (flags & S_IXUNLINK)
+		ei->i_flags |= EXT4_IXUNLINK_FL;
 
-	ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
-			EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
 	if (flags & S_SYNC)
 		ei->i_flags |= EXT4_SYNC_FL;
 	if (flags & S_APPEND)
 		ei->i_flags |= EXT4_APPEND_FL;
-	if (flags & S_IMMUTABLE)
-		ei->i_flags |= EXT4_IMMUTABLE_FL;
 	if (flags & S_NOATIME)
 		ei->i_flags |= EXT4_NOATIME_FL;
 	if (flags & S_DIRSYNC)
 		ei->i_flags |= EXT4_DIRSYNC_FL;
+
+	if (vflags & V_BARRIER)
+		ei->i_flags |= EXT4_BARRIER_FL;
+	if (vflags & V_COW)
+		ei->i_flags |= EXT4_COW_FL;
 }
 
 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
@@ -4863,6 +4888,8 @@ struct inode *ext4_iget(struct super_blo
 	journal_t *journal = EXT4_SB(sb)->s_journal;
 	long ret;
 	int block;
+	uid_t uid;
+	gid_t gid;
 
 	inode = iget_locked(sb, ino);
 	if (!inode)
@@ -4878,12 +4905,16 @@ struct inode *ext4_iget(struct super_blo
 		goto bad_inode;
 	raw_inode = ext4_raw_inode(&iloc);
 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
-	inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
-	inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
+	uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
+	gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
 	if (!(test_opt(inode->i_sb, NO_UID32))) {
-		inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
-		inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
+		uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
+		gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
 	}
+	inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
+	inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
+	inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid,
+		le16_to_cpu(raw_inode->i_raw_tag));
 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
 
 	ei->i_state_flags = 0;
@@ -5105,6 +5136,8 @@ static int ext4_do_update_inode(handle_t
 	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
 	struct ext4_inode_info *ei = EXT4_I(inode);
 	struct buffer_head *bh = iloc->bh;
+	uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag);
+	gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag);
 	int err = 0, rc, block;
 
 	/* For fields not not tracking in the in-memory inode,
@@ -5115,29 +5148,32 @@ static int ext4_do_update_inode(handle_t
 	ext4_get_inode_flags(ei);
 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
 	if (!(test_opt(inode->i_sb, NO_UID32))) {
-		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
-		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
+		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
+		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
 /*
  * Fix up interoperability with old kernels. Otherwise, old inodes get
  * re-used with the upper 16 bits of the uid/gid intact
  */
 		if (!ei->i_dtime) {
 			raw_inode->i_uid_high =
-				cpu_to_le16(high_16_bits(inode->i_uid));
+				cpu_to_le16(high_16_bits(uid));
 			raw_inode->i_gid_high =
-				cpu_to_le16(high_16_bits(inode->i_gid));
+				cpu_to_le16(high_16_bits(gid));
 		} else {
 			raw_inode->i_uid_high = 0;
 			raw_inode->i_gid_high = 0;
 		}
 	} else {
 		raw_inode->i_uid_low =
-			cpu_to_le16(fs_high2lowuid(inode->i_uid));
+			cpu_to_le16(fs_high2lowuid(uid));
 		raw_inode->i_gid_low =
-			cpu_to_le16(fs_high2lowgid(inode->i_gid));
+			cpu_to_le16(fs_high2lowgid(gid));
 		raw_inode->i_uid_high = 0;
 		raw_inode->i_gid_high = 0;
 	}
+#ifdef CONFIG_TAGGING_INTERN
+	raw_inode->i_raw_tag = cpu_to_le16(inode->i_tag);
+#endif
 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
 
 	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
@@ -5323,7 +5359,8 @@ int ext4_setattr(struct dentry *dentry, 
 		return error;
 
 	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
-		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
+		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) ||
+		(ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag)) {
 		handle_t *handle;
 
 		/* (user+group)*(old+new) structure, inode write (sb,
@@ -5345,6 +5382,8 @@ int ext4_setattr(struct dentry *dentry, 
 			inode->i_uid = attr->ia_uid;
 		if (attr->ia_valid & ATTR_GID)
 			inode->i_gid = attr->ia_gid;
+		if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode))
+			inode->i_tag = attr->ia_tag;
 		error = ext4_mark_inode_dirty(handle, inode);
 		ext4_journal_stop(handle);
 	}
diff -NurpP --minimal linux-2.6.32.17/fs/ext4/ioctl.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext4/ioctl.c
--- linux-2.6.32.17/fs/ext4/ioctl.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext4/ioctl.c	2010-08-09 18:06:32.000000000 +0200
@@ -14,10 +14,39 @@
 #include <linux/compat.h>
 #include <linux/mount.h>
 #include <linux/file.h>
+#include <linux/vs_tag.h>
 #include <asm/uaccess.h>
 #include "ext4_jbd2.h"
 #include "ext4.h"
 
+
+int ext4_sync_flags(struct inode *inode, int flags, int vflags)
+{
+	handle_t *handle = NULL;
+	struct ext4_iloc iloc;
+	int err;
+
+	handle = ext4_journal_start(inode, 1);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	if (IS_SYNC(inode))
+		ext4_handle_sync(handle);
+	err = ext4_reserve_inode_write(handle, inode, &iloc);
+	if (err)
+		goto flags_err;
+
+	inode->i_flags = flags;
+	inode->i_vflags = vflags;
+	ext4_get_inode_flags(EXT4_I(inode));
+	inode->i_ctime = ext4_current_time(inode);
+
+	err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+flags_err:
+	ext4_journal_stop(handle);
+	return err;
+}
+
 long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	struct inode *inode = filp->f_dentry->d_inode;
@@ -50,6 +79,11 @@ long ext4_ioctl(struct file *filp, unsig
 
 		flags = ext4_mask_flags(inode->i_mode, flags);
 
+		if (IS_BARRIER(inode)) {
+			vxwprintk_task(1, "messing with the barrier.");
+			return -EACCES;
+		}
+
 		err = -EPERM;
 		mutex_lock(&inode->i_mutex);
 		/* Is it quota file? Do not allow user to mess with it */
@@ -67,7 +101,9 @@ long ext4_ioctl(struct file *filp, unsig
 		 *
 		 * This test looks nicer. Thanks to Pauline Middelink
 		 */
-		if ((flags ^ oldflags) & (EXT4_APPEND_FL | EXT4_IMMUTABLE_FL)) {
+		if ((oldflags & EXT4_IMMUTABLE_FL) ||
+			((flags ^ oldflags) & (EXT4_APPEND_FL |
+			EXT4_IMMUTABLE_FL | EXT4_IXUNLINK_FL))) {
 			if (!capable(CAP_LINUX_IMMUTABLE))
 				goto flags_out;
 		}
diff -NurpP --minimal linux-2.6.32.17/fs/ext4/namei.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext4/namei.c
--- linux-2.6.32.17/fs/ext4/namei.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext4/namei.c	2010-08-09 18:06:32.000000000 +0200
@@ -34,6 +34,7 @@
 #include <linux/quotaops.h>
 #include <linux/buffer_head.h>
 #include <linux/bio.h>
+#include <linux/vs_tag.h>
 #include "ext4.h"
 #include "ext4_jbd2.h"
 
@@ -941,6 +942,7 @@ restart:
 				if (bh)
 					ll_rw_block(READ_META, 1, &bh);
 			}
+		dx_propagate_tag(nd, inode);
 		}
 		if ((bh = bh_use[ra_ptr++]) == NULL)
 			goto next;
@@ -2543,6 +2545,7 @@ const struct inode_operations ext4_dir_i
 #endif
 	.check_acl	= ext4_check_acl,
 	.fiemap         = ext4_fiemap,
+	.sync_flags	= ext4_sync_flags,
 };
 
 const struct inode_operations ext4_special_inode_operations = {
diff -NurpP --minimal linux-2.6.32.17/fs/ext4/super.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext4/super.c
--- linux-2.6.32.17/fs/ext4/super.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ext4/super.c	2010-08-09 18:06:32.000000000 +0200
@@ -1100,6 +1100,7 @@ enum {
 	Opt_block_validity, Opt_noblock_validity,
 	Opt_inode_readahead_blks, Opt_journal_ioprio,
 	Opt_discard, Opt_nodiscard,
+	Opt_tag, Opt_notag, Opt_tagid
 };
 
 static const match_table_t tokens = {
@@ -1167,6 +1168,9 @@ static const match_table_t tokens = {
 	{Opt_noauto_da_alloc, "noauto_da_alloc"},
 	{Opt_discard, "discard"},
 	{Opt_nodiscard, "nodiscard"},
+	{Opt_tag, "tag"},
+	{Opt_notag, "notag"},
+	{Opt_tagid, "tagid=%u"},
 	{Opt_err, NULL},
 };
 
@@ -1264,6 +1268,20 @@ static int parse_options(char *options, 
 		case Opt_nouid32:
 			set_opt(sbi->s_mount_opt, NO_UID32);
 			break;
+#ifndef CONFIG_TAGGING_NONE
+		case Opt_tag:
+			set_opt (sbi->s_mount_opt, TAGGED);
+			break;
+		case Opt_notag:
+			clear_opt (sbi->s_mount_opt, TAGGED);
+			break;
+#endif
+#ifdef CONFIG_PROPAGATE
+		case Opt_tagid:
+			/* use args[0] */
+			set_opt (sbi->s_mount_opt, TAGGED);
+			break;
+#endif
 		case Opt_debug:
 			set_opt(sbi->s_mount_opt, DEBUG);
 			break;
@@ -2464,6 +2482,9 @@ static int ext4_fill_super(struct super_
 			   &journal_ioprio, NULL, 0))
 		goto failed_mount;
 
+	if (EXT4_SB(sb)->s_mount_opt & EXT4_MOUNT_TAGGED)
+		sb->s_flags |= MS_TAGGED;
+
 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 		((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
 
@@ -3515,6 +3536,14 @@ static int ext4_remount(struct super_blo
 	if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
 		ext4_abort(sb, __func__, "Abort forced by user");
 
+	if ((sbi->s_mount_opt & EXT4_MOUNT_TAGGED) &&
+		!(sb->s_flags & MS_TAGGED)) {
+		printk("EXT4-fs: %s: tagging not permitted on remount.\n",
+			sb->s_id);
+		err = -EINVAL;
+		goto restore_opts;
+	}
+
 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 		((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
 
diff -NurpP --minimal linux-2.6.32.17/fs/fcntl.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/fcntl.c
--- linux-2.6.32.17/fs/fcntl.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/fcntl.c	2010-02-24 12:32:54.000000000 +0100
@@ -19,6 +19,7 @@
 #include <linux/signal.h>
 #include <linux/rcupdate.h>
 #include <linux/pid_namespace.h>
+#include <linux/vs_limit.h>
 
 #include <asm/poll.h>
 #include <asm/siginfo.h>
@@ -102,6 +103,8 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldf
 
 	if (tofree)
 		filp_close(tofree, files);
+	else
+		vx_openfd_inc(newfd);	/* fd was unused */
 
 	return newfd;
 
@@ -426,6 +429,8 @@ SYSCALL_DEFINE3(fcntl, unsigned int, fd,
 	filp = fget(fd);
 	if (!filp)
 		goto out;
+	if (!vx_files_avail(1))
+		goto out;
 
 	err = security_file_fcntl(filp, cmd, arg);
 	if (err) {
diff -NurpP --minimal linux-2.6.32.17/fs/file.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/file.c
--- linux-2.6.32.17/fs/file.c	2009-12-03 20:02:51.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/file.c	2009-12-03 20:04:56.000000000 +0100
@@ -20,6 +20,7 @@
 #include <linux/spinlock.h>
 #include <linux/rcupdate.h>
 #include <linux/workqueue.h>
+#include <linux/vs_limit.h>
 
 struct fdtable_defer {
 	spinlock_t lock;
@@ -368,6 +369,8 @@ struct files_struct *dup_fd(struct files
 		struct file *f = *old_fds++;
 		if (f) {
 			get_file(f);
+			/* TODO: sum it first for check and performance */
+			vx_openfd_inc(open_files - i);
 		} else {
 			/*
 			 * The fd may be claimed in the fd bitmap but not yet
@@ -476,6 +479,7 @@ repeat:
 	else
 		FD_CLR(fd, fdt->close_on_exec);
 	error = fd;
+	vx_openfd_inc(fd);
 #if 1
 	/* Sanity check */
 	if (rcu_dereference(fdt->fd[fd]) != NULL) {
diff -NurpP --minimal linux-2.6.32.17/fs/file_table.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/file_table.c
--- linux-2.6.32.17/fs/file_table.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/file_table.c	2010-03-18 16:53:06.000000000 +0100
@@ -22,6 +22,8 @@
 #include <linux/fsnotify.h>
 #include <linux/sysctl.h>
 #include <linux/percpu_counter.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_context.h>
 
 #include <asm/atomic.h>
 
@@ -131,6 +133,8 @@ struct file *get_empty_filp(void)
 	spin_lock_init(&f->f_lock);
 	eventpoll_init_file(f);
 	/* f->f_version: 0 */
+	f->f_xid = vx_current_xid();
+	vx_files_inc(f);
 	return f;
 
 over:
@@ -285,6 +289,8 @@ void __fput(struct file *file)
 		cdev_put(inode->i_cdev);
 	fops_put(file->f_op);
 	put_pid(file->f_owner.pid);
+	vx_files_dec(file);
+	file->f_xid = 0;
 	file_kill(file);
 	if (file->f_mode & FMODE_WRITE)
 		drop_file_write_access(file);
@@ -352,6 +358,8 @@ void put_filp(struct file *file)
 {
 	if (atomic_long_dec_and_test(&file->f_count)) {
 		security_file_free(file);
+		vx_files_dec(file);
+		file->f_xid = 0;
 		file_kill(file);
 		file_free(file);
 	}
diff -NurpP --minimal linux-2.6.32.17/fs/fs_struct.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/fs_struct.c
--- linux-2.6.32.17/fs/fs_struct.c	2009-06-11 17:13:04.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/fs_struct.c	2009-12-03 20:04:56.000000000 +0100
@@ -4,6 +4,7 @@
 #include <linux/path.h>
 #include <linux/slab.h>
 #include <linux/fs_struct.h>
+#include <linux/vserver/global.h>
 
 /*
  * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
@@ -77,6 +78,7 @@ void free_fs_struct(struct fs_struct *fs
 {
 	path_put(&fs->root);
 	path_put(&fs->pwd);
+	atomic_dec(&vs_global_fs);
 	kmem_cache_free(fs_cachep, fs);
 }
 
@@ -112,6 +114,7 @@ struct fs_struct *copy_fs_struct(struct 
 		fs->pwd = old->pwd;
 		path_get(&old->pwd);
 		read_unlock(&old->lock);
+		atomic_inc(&vs_global_fs);
 	}
 	return fs;
 }
diff -NurpP --minimal linux-2.6.32.17/fs/gfs2/file.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/gfs2/file.c
--- linux-2.6.32.17/fs/gfs2/file.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/gfs2/file.c	2010-07-08 02:22:31.000000000 +0200
@@ -132,6 +132,9 @@ static const u32 fsflags_to_gfs2[32] = {
 	[7] = GFS2_DIF_NOATIME,
 	[12] = GFS2_DIF_EXHASH,
 	[14] = GFS2_DIF_INHERIT_JDATA,
+	[27] = GFS2_DIF_IXUNLINK,
+	[26] = GFS2_DIF_BARRIER,
+	[29] = GFS2_DIF_COW,
 };
 
 static const u32 gfs2_to_fsflags[32] = {
@@ -141,6 +144,9 @@ static const u32 gfs2_to_fsflags[32] = {
 	[gfs2fl_NoAtime] = FS_NOATIME_FL,
 	[gfs2fl_ExHash] = FS_INDEX_FL,
 	[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
+	[gfs2fl_IXUnlink] = FS_IXUNLINK_FL,
+	[gfs2fl_Barrier] = FS_BARRIER_FL,
+	[gfs2fl_Cow] = FS_COW_FL,
 };
 
 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
@@ -171,10 +177,16 @@ void gfs2_set_inode_flags(struct inode *
 {
 	struct gfs2_inode *ip = GFS2_I(inode);
 	unsigned int flags = inode->i_flags;
+	unsigned int vflags = inode->i_vflags;
+
+	flags &= ~(S_IMMUTABLE | S_IXUNLINK |
+		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
 
-	flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
 	if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
 		flags |= S_IMMUTABLE;
+	if (ip->i_diskflags & GFS2_DIF_IXUNLINK)
+		flags |= S_IXUNLINK;
+
 	if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
 		flags |= S_APPEND;
 	if (ip->i_diskflags & GFS2_DIF_NOATIME)
@@ -182,6 +194,43 @@ void gfs2_set_inode_flags(struct inode *
 	if (ip->i_diskflags & GFS2_DIF_SYNC)
 		flags |= S_SYNC;
 	inode->i_flags = flags;
+
+	vflags &= ~(V_BARRIER | V_COW);
+
+	if (ip->i_diskflags & GFS2_DIF_BARRIER)
+		vflags |= V_BARRIER;
+	if (ip->i_diskflags & GFS2_DIF_COW)
+		vflags |= V_COW;
+	inode->i_vflags = vflags;
+}
+
+void gfs2_get_inode_flags(struct inode *inode)
+{
+	struct gfs2_inode *ip = GFS2_I(inode);
+	unsigned int flags = inode->i_flags;
+	unsigned int vflags = inode->i_vflags;
+
+	ip->i_diskflags &= ~(GFS2_DIF_APPENDONLY |
+			GFS2_DIF_NOATIME | GFS2_DIF_SYNC |
+			GFS2_DIF_IMMUTABLE | GFS2_DIF_IXUNLINK |
+			GFS2_DIF_BARRIER | GFS2_DIF_COW);
+
+	if (flags & S_IMMUTABLE)
+		ip->i_diskflags |= GFS2_DIF_IMMUTABLE;
+	if (flags & S_IXUNLINK)
+		ip->i_diskflags |= GFS2_DIF_IXUNLINK;
+
+	if (flags & S_APPEND)
+		ip->i_diskflags |= GFS2_DIF_APPENDONLY;
+	if (flags & S_NOATIME)
+		ip->i_diskflags |= GFS2_DIF_NOATIME;
+	if (flags & S_SYNC)
+		ip->i_diskflags |= GFS2_DIF_SYNC;
+
+	if (vflags & V_BARRIER)
+		ip->i_diskflags |= GFS2_DIF_BARRIER;
+	if (vflags & V_COW)
+		ip->i_diskflags |= GFS2_DIF_COW;
 }
 
 /* Flags that can be set by user space */
@@ -293,6 +342,37 @@ static int gfs2_set_flags(struct file *f
 	return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
 }
 
+int gfs2_sync_flags(struct inode *inode, int flags, int vflags)
+{
+	struct gfs2_inode *ip = GFS2_I(inode);
+	struct gfs2_sbd *sdp = GFS2_SB(inode);
+	struct buffer_head *bh;
+	struct gfs2_holder gh;
+	int error;
+
+	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
+	if (error)
+		return error;
+	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
+	if (error)
+		goto out;
+	error = gfs2_meta_inode_buffer(ip, &bh);
+	if (error)
+		goto out_trans_end;
+	gfs2_trans_add_bh(ip->i_gl, bh, 1);
+	inode->i_flags = flags;
+	inode->i_vflags = vflags;
+	gfs2_get_inode_flags(inode);
+	gfs2_dinode_out(ip, bh->b_data);
+	brelse(bh);
+	gfs2_set_aops(inode);
+out_trans_end:
+	gfs2_trans_end(sdp);
+out:
+	gfs2_glock_dq_uninit(&gh);
+	return error;
+}
+
 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	switch(cmd) {
diff -NurpP --minimal linux-2.6.32.17/fs/gfs2/inode.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/gfs2/inode.h
--- linux-2.6.32.17/fs/gfs2/inode.h	2009-09-10 15:26:22.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/gfs2/inode.h	2009-12-03 20:04:56.000000000 +0100
@@ -109,6 +109,7 @@ extern const struct file_operations gfs2
 extern const struct file_operations gfs2_dir_fops_nolock;
 
 extern void gfs2_set_inode_flags(struct inode *inode);
+extern int gfs2_sync_flags(struct inode *inode, int flags, int vflags);
  
 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
 extern const struct file_operations gfs2_file_fops;
diff -NurpP --minimal linux-2.6.32.17/fs/gfs2/ops_inode.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/gfs2/ops_inode.c
--- linux-2.6.32.17/fs/gfs2/ops_inode.c	2009-12-03 20:02:52.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/gfs2/ops_inode.c	2009-12-03 20:04:56.000000000 +0100
@@ -1400,6 +1400,7 @@ const struct inode_operations gfs2_file_
 	.listxattr = gfs2_listxattr,
 	.removexattr = gfs2_removexattr,
 	.fiemap = gfs2_fiemap,
+	.sync_flags = gfs2_sync_flags,
 };
 
 const struct inode_operations gfs2_dir_iops = {
@@ -1420,6 +1421,7 @@ const struct inode_operations gfs2_dir_i
 	.listxattr = gfs2_listxattr,
 	.removexattr = gfs2_removexattr,
 	.fiemap = gfs2_fiemap,
+	.sync_flags = gfs2_sync_flags,
 };
 
 const struct inode_operations gfs2_symlink_iops = {
diff -NurpP --minimal linux-2.6.32.17/fs/hfsplus/ioctl.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/hfsplus/ioctl.c
--- linux-2.6.32.17/fs/hfsplus/ioctl.c	2008-12-25 00:26:37.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/hfsplus/ioctl.c	2009-12-03 20:04:56.000000000 +0100
@@ -17,6 +17,7 @@
 #include <linux/mount.h>
 #include <linux/sched.h>
 #include <linux/xattr.h>
+#include <linux/mount.h>
 #include <asm/uaccess.h>
 #include "hfsplus_fs.h"
 
diff -NurpP --minimal linux-2.6.32.17/fs/inode.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/inode.c
--- linux-2.6.32.17/fs/inode.c	2009-12-03 20:02:52.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/inode.c	2009-12-03 20:04:56.000000000 +0100
@@ -133,6 +133,9 @@ int inode_init_always(struct super_block
 	struct address_space *const mapping = &inode->i_data;
 
 	inode->i_sb = sb;
+
+	/* essential because of inode slab reuse */
+	inode->i_tag = 0;
 	inode->i_blkbits = sb->s_blocksize_bits;
 	inode->i_flags = 0;
 	atomic_set(&inode->i_count, 1);
@@ -153,6 +156,7 @@ int inode_init_always(struct super_block
 	inode->i_bdev = NULL;
 	inode->i_cdev = NULL;
 	inode->i_rdev = 0;
+	inode->i_mdev = 0;
 	inode->dirtied_when = 0;
 
 	if (security_inode_alloc(inode))
@@ -307,6 +311,8 @@ void __iget(struct inode *inode)
 	inodes_stat.nr_unused--;
 }
 
+EXPORT_SYMBOL_GPL(__iget);
+
 /**
  * clear_inode - clear an inode
  * @inode: inode to clear
@@ -1611,9 +1617,11 @@ void init_special_inode(struct inode *in
 	if (S_ISCHR(mode)) {
 		inode->i_fop = &def_chr_fops;
 		inode->i_rdev = rdev;
+		inode->i_mdev = rdev;
 	} else if (S_ISBLK(mode)) {
 		inode->i_fop = &def_blk_fops;
 		inode->i_rdev = rdev;
+		inode->i_mdev = rdev;
 	} else if (S_ISFIFO(mode))
 		inode->i_fop = &def_fifo_fops;
 	else if (S_ISSOCK(mode))
diff -NurpP --minimal linux-2.6.32.17/fs/ioctl.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ioctl.c
--- linux-2.6.32.17/fs/ioctl.c	2009-12-03 20:02:52.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ioctl.c	2009-12-03 20:04:56.000000000 +0100
@@ -16,6 +16,9 @@
 #include <linux/writeback.h>
 #include <linux/buffer_head.h>
 #include <linux/falloc.h>
+#include <linux/proc_fs.h>
+#include <linux/vserver/inode.h>
+#include <linux/vs_tag.h>
 
 #include <asm/ioctls.h>
 
diff -NurpP --minimal linux-2.6.32.17/fs/ioprio.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ioprio.c
--- linux-2.6.32.17/fs/ioprio.c	2009-03-24 14:22:26.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ioprio.c	2009-12-03 20:04:56.000000000 +0100
@@ -26,6 +26,7 @@
 #include <linux/syscalls.h>
 #include <linux/security.h>
 #include <linux/pid_namespace.h>
+#include <linux/vs_base.h>
 
 int set_task_ioprio(struct task_struct *task, int ioprio)
 {
@@ -123,6 +124,8 @@ SYSCALL_DEFINE3(ioprio_set, int, which, 
 			else
 				pgrp = find_vpid(who);
 			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
+				if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
+					continue;
 				ret = set_task_ioprio(p, ioprio);
 				if (ret)
 					break;
@@ -212,6 +215,8 @@ SYSCALL_DEFINE2(ioprio_get, int, which, 
 			else
 				pgrp = find_vpid(who);
 			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
+				if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
+					continue;
 				tmpio = get_task_ioprio(p);
 				if (tmpio < 0)
 					continue;
diff -NurpP --minimal linux-2.6.32.17/fs/jfs/acl.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/acl.c
--- linux-2.6.32.17/fs/jfs/acl.c	2009-12-03 20:02:52.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/acl.c	2009-12-03 20:04:56.000000000 +0100
@@ -216,7 +216,8 @@ int jfs_setattr(struct dentry *dentry, s
 		return rc;
 
 	if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
-	    (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
+	    (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) ||
+	    (iattr->ia_valid & ATTR_TAG && iattr->ia_tag != inode->i_tag)) {
 		if (vfs_dq_transfer(inode, iattr))
 			return -EDQUOT;
 	}
diff -NurpP --minimal linux-2.6.32.17/fs/jfs/file.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/file.c
--- linux-2.6.32.17/fs/jfs/file.c	2009-12-03 20:02:52.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/file.c	2009-12-03 20:04:56.000000000 +0100
@@ -98,6 +98,7 @@ const struct inode_operations jfs_file_i
 	.setattr	= jfs_setattr,
 	.check_acl	= jfs_check_acl,
 #endif
+	.sync_flags	= jfs_sync_flags,
 };
 
 const struct file_operations jfs_file_operations = {
diff -NurpP --minimal linux-2.6.32.17/fs/jfs/ioctl.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/ioctl.c
--- linux-2.6.32.17/fs/jfs/ioctl.c	2008-12-25 00:26:37.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/ioctl.c	2009-12-03 20:04:56.000000000 +0100
@@ -11,6 +11,7 @@
 #include <linux/mount.h>
 #include <linux/time.h>
 #include <linux/sched.h>
+#include <linux/mount.h>
 #include <asm/current.h>
 #include <asm/uaccess.h>
 
@@ -52,6 +53,16 @@ static long jfs_map_ext2(unsigned long f
 }
 
 
+int jfs_sync_flags(struct inode *inode, int flags, int vflags)
+{
+	inode->i_flags = flags;
+	inode->i_vflags = vflags;
+	jfs_get_inode_flags(JFS_IP(inode));
+	inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+	return 0;
+}
+
 long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	struct inode *inode = filp->f_dentry->d_inode;
@@ -85,6 +96,11 @@ long jfs_ioctl(struct file *filp, unsign
 		if (!S_ISDIR(inode->i_mode))
 			flags &= ~JFS_DIRSYNC_FL;
 
+		if (IS_BARRIER(inode)) {
+			vxwprintk_task(1, "messing with the barrier.");
+			return -EACCES;
+		}
+
 		/* Is it quota file? Do not allow user to mess with it */
 		if (IS_NOQUOTA(inode)) {
 			err = -EPERM;
@@ -102,8 +118,8 @@ long jfs_ioctl(struct file *filp, unsign
 		 * the relevant capability.
 		 */
 		if ((oldflags & JFS_IMMUTABLE_FL) ||
-			((flags ^ oldflags) &
-			(JFS_APPEND_FL | JFS_IMMUTABLE_FL))) {
+			((flags ^ oldflags) & (JFS_APPEND_FL |
+			JFS_IMMUTABLE_FL | JFS_IXUNLINK_FL))) {
 			if (!capable(CAP_LINUX_IMMUTABLE)) {
 				mutex_unlock(&inode->i_mutex);
 				err = -EPERM;
@@ -111,7 +127,7 @@ long jfs_ioctl(struct file *filp, unsign
 			}
 		}
 
-		flags = flags & JFS_FL_USER_MODIFIABLE;
+		flags &= JFS_FL_USER_MODIFIABLE;
 		flags |= oldflags & ~JFS_FL_USER_MODIFIABLE;
 		jfs_inode->mode2 = flags;
 
diff -NurpP --minimal linux-2.6.32.17/fs/jfs/jfs_dinode.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/jfs_dinode.h
--- linux-2.6.32.17/fs/jfs/jfs_dinode.h	2008-12-25 00:26:37.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/jfs_dinode.h	2009-12-03 20:04:56.000000000 +0100
@@ -161,9 +161,13 @@ struct dinode {
 
 #define JFS_APPEND_FL		0x01000000 /* writes to file may only append */
 #define JFS_IMMUTABLE_FL	0x02000000 /* Immutable file */
+#define JFS_IXUNLINK_FL		0x08000000 /* Immutable invert on unlink */
 
-#define JFS_FL_USER_VISIBLE	0x03F80000
-#define JFS_FL_USER_MODIFIABLE	0x03F80000
+#define JFS_BARRIER_FL		0x04000000 /* Barrier for chroot() */
+#define JFS_COW_FL		0x20000000 /* Copy on Write marker */
+
+#define JFS_FL_USER_VISIBLE	0x07F80000
+#define JFS_FL_USER_MODIFIABLE	0x07F80000
 #define JFS_FL_INHERIT		0x03C80000
 
 /* These are identical to EXT[23]_IOC_GETFLAGS/SETFLAGS */
diff -NurpP --minimal linux-2.6.32.17/fs/jfs/jfs_filsys.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/jfs_filsys.h
--- linux-2.6.32.17/fs/jfs/jfs_filsys.h	2008-12-25 00:26:37.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/jfs_filsys.h	2009-12-03 20:04:56.000000000 +0100
@@ -263,6 +263,7 @@
 #define JFS_NAME_MAX	255
 #define JFS_PATH_MAX	BPSIZE
 
+#define JFS_TAGGED		0x00800000	/* Context Tagging */
 
 /*
  *	file system state (superblock state)
diff -NurpP --minimal linux-2.6.32.17/fs/jfs/jfs_imap.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/jfs_imap.c
--- linux-2.6.32.17/fs/jfs/jfs_imap.c	2009-09-10 15:26:22.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/jfs_imap.c	2009-12-03 20:04:56.000000000 +0100
@@ -45,6 +45,7 @@
 #include <linux/buffer_head.h>
 #include <linux/pagemap.h>
 #include <linux/quotaops.h>
+#include <linux/vs_tag.h>
 
 #include "jfs_incore.h"
 #include "jfs_inode.h"
@@ -3059,6 +3060,8 @@ static int copy_from_dinode(struct dinod
 {
 	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
 	struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+	uid_t uid;
+	gid_t gid;
 
 	jfs_ip->fileset = le32_to_cpu(dip->di_fileset);
 	jfs_ip->mode2 = le32_to_cpu(dip->di_mode);
@@ -3079,14 +3082,18 @@ static int copy_from_dinode(struct dinod
 	}
 	ip->i_nlink = le32_to_cpu(dip->di_nlink);
 
-	jfs_ip->saved_uid = le32_to_cpu(dip->di_uid);
+	uid = le32_to_cpu(dip->di_uid);
+	gid = le32_to_cpu(dip->di_gid);
+	ip->i_tag = INOTAG_TAG(DX_TAG(ip), uid, gid, 0);
+
+	jfs_ip->saved_uid = INOTAG_UID(DX_TAG(ip), uid, gid);
 	if (sbi->uid == -1)
 		ip->i_uid = jfs_ip->saved_uid;
 	else {
 		ip->i_uid = sbi->uid;
 	}
 
-	jfs_ip->saved_gid = le32_to_cpu(dip->di_gid);
+	jfs_ip->saved_gid = INOTAG_GID(DX_TAG(ip), uid, gid);
 	if (sbi->gid == -1)
 		ip->i_gid = jfs_ip->saved_gid;
 	else {
@@ -3151,14 +3158,12 @@ static void copy_to_dinode(struct dinode
 	dip->di_size = cpu_to_le64(ip->i_size);
 	dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks));
 	dip->di_nlink = cpu_to_le32(ip->i_nlink);
-	if (sbi->uid == -1)
-		dip->di_uid = cpu_to_le32(ip->i_uid);
-	else
-		dip->di_uid = cpu_to_le32(jfs_ip->saved_uid);
-	if (sbi->gid == -1)
-		dip->di_gid = cpu_to_le32(ip->i_gid);
-	else
-		dip->di_gid = cpu_to_le32(jfs_ip->saved_gid);
+
+	dip->di_uid = cpu_to_le32(TAGINO_UID(DX_TAG(ip),
+		(sbi->uid == -1) ? ip->i_uid : jfs_ip->saved_uid, ip->i_tag));
+	dip->di_gid = cpu_to_le32(TAGINO_GID(DX_TAG(ip),
+		(sbi->gid == -1) ? ip->i_gid : jfs_ip->saved_gid, ip->i_tag));
+
 	jfs_get_inode_flags(jfs_ip);
 	/*
 	 * mode2 is only needed for storing the higher order bits.
diff -NurpP --minimal linux-2.6.32.17/fs/jfs/jfs_inode.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/jfs_inode.c
--- linux-2.6.32.17/fs/jfs/jfs_inode.c	2009-06-11 17:13:05.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/jfs_inode.c	2009-12-03 20:04:56.000000000 +0100
@@ -18,6 +18,7 @@
 
 #include <linux/fs.h>
 #include <linux/quotaops.h>
+#include <linux/vs_tag.h>
 #include "jfs_incore.h"
 #include "jfs_inode.h"
 #include "jfs_filsys.h"
@@ -30,29 +31,46 @@ void jfs_set_inode_flags(struct inode *i
 {
 	unsigned int flags = JFS_IP(inode)->mode2;
 
-	inode->i_flags &= ~(S_IMMUTABLE | S_APPEND |
-		S_NOATIME | S_DIRSYNC | S_SYNC);
+	inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
+		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
 
 	if (flags & JFS_IMMUTABLE_FL)
 		inode->i_flags |= S_IMMUTABLE;
+	if (flags & JFS_IXUNLINK_FL)
+		inode->i_flags |= S_IXUNLINK;
+
+	if (flags & JFS_SYNC_FL)
+		inode->i_flags |= S_SYNC;
 	if (flags & JFS_APPEND_FL)
 		inode->i_flags |= S_APPEND;
 	if (flags & JFS_NOATIME_FL)
 		inode->i_flags |= S_NOATIME;
 	if (flags & JFS_DIRSYNC_FL)
 		inode->i_flags |= S_DIRSYNC;
-	if (flags & JFS_SYNC_FL)
-		inode->i_flags |= S_SYNC;
+
+	inode->i_vflags &= ~(V_BARRIER | V_COW);
+
+	if (flags & JFS_BARRIER_FL)
+		inode->i_vflags |= V_BARRIER;
+	if (flags & JFS_COW_FL)
+		inode->i_vflags |= V_COW;
 }
 
 void jfs_get_inode_flags(struct jfs_inode_info *jfs_ip)
 {
 	unsigned int flags = jfs_ip->vfs_inode.i_flags;
+	unsigned int vflags = jfs_ip->vfs_inode.i_vflags;
+
+	jfs_ip->mode2 &= ~(JFS_IMMUTABLE_FL | JFS_IXUNLINK_FL |
+			   JFS_APPEND_FL | JFS_NOATIME_FL |
+			   JFS_DIRSYNC_FL | JFS_SYNC_FL |
+			   JFS_BARRIER_FL | JFS_COW_FL);
 
-	jfs_ip->mode2 &= ~(JFS_IMMUTABLE_FL | JFS_APPEND_FL | JFS_NOATIME_FL |
-			   JFS_DIRSYNC_FL | JFS_SYNC_FL);
 	if (flags & S_IMMUTABLE)
 		jfs_ip->mode2 |= JFS_IMMUTABLE_FL;
+	if (flags & S_IXUNLINK)
+		jfs_ip->mode2 |= JFS_IXUNLINK_FL;
+
 	if (flags & S_APPEND)
 		jfs_ip->mode2 |= JFS_APPEND_FL;
 	if (flags & S_NOATIME)
@@ -61,6 +79,11 @@ void jfs_get_inode_flags(struct jfs_inod
 		jfs_ip->mode2 |= JFS_DIRSYNC_FL;
 	if (flags & S_SYNC)
 		jfs_ip->mode2 |= JFS_SYNC_FL;
+
+	if (vflags & V_BARRIER)
+		jfs_ip->mode2 |= JFS_BARRIER_FL;
+	if (vflags & V_COW)
+		jfs_ip->mode2 |= JFS_COW_FL;
 }
 
 /*
@@ -105,6 +128,7 @@ struct inode *ialloc(struct inode *paren
 			mode |= S_ISGID;
 	} else
 		inode->i_gid = current_fsgid();
+	inode->i_tag = dx_current_fstag(sb);
 
 	/*
 	 * New inodes need to save sane values on disk when
diff -NurpP --minimal linux-2.6.32.17/fs/jfs/jfs_inode.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/jfs_inode.h
--- linux-2.6.32.17/fs/jfs/jfs_inode.h	2009-06-11 17:13:05.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/jfs_inode.h	2009-12-03 20:04:56.000000000 +0100
@@ -39,6 +39,7 @@ extern struct dentry *jfs_fh_to_dentry(s
 extern struct dentry *jfs_fh_to_parent(struct super_block *sb, struct fid *fid,
 	int fh_len, int fh_type);
 extern void jfs_set_inode_flags(struct inode *);
+extern int jfs_sync_flags(struct inode *, int, int);
 extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
 
 extern const struct address_space_operations jfs_aops;
diff -NurpP --minimal linux-2.6.32.17/fs/jfs/namei.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/namei.c
--- linux-2.6.32.17/fs/jfs/namei.c	2009-12-03 20:02:52.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/namei.c	2009-12-03 20:04:56.000000000 +0100
@@ -21,6 +21,7 @@
 #include <linux/ctype.h>
 #include <linux/quotaops.h>
 #include <linux/exportfs.h>
+#include <linux/vs_tag.h>
 #include "jfs_incore.h"
 #include "jfs_superblock.h"
 #include "jfs_inode.h"
@@ -1476,6 +1477,7 @@ static struct dentry *jfs_lookup(struct 
 		return ERR_CAST(ip);
 	}
 
+	dx_propagate_tag(nd, ip);
 	dentry = d_splice_alias(ip, dentry);
 
 	if (dentry && (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2))
@@ -1545,6 +1547,7 @@ const struct inode_operations jfs_dir_in
 	.setattr	= jfs_setattr,
 	.check_acl	= jfs_check_acl,
 #endif
+	.sync_flags	= jfs_sync_flags,
 };
 
 const struct file_operations jfs_dir_operations = {
diff -NurpP --minimal linux-2.6.32.17/fs/jfs/super.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/super.c
--- linux-2.6.32.17/fs/jfs/super.c	2009-12-03 20:02:52.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/jfs/super.c	2009-12-03 20:04:56.000000000 +0100
@@ -192,7 +192,8 @@ static void jfs_put_super(struct super_b
 enum {
 	Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
 	Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
-	Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask
+	Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask,
+	Opt_tag, Opt_notag, Opt_tagid
 };
 
 static const match_table_t tokens = {
@@ -202,6 +203,10 @@ static const match_table_t tokens = {
 	{Opt_resize, "resize=%u"},
 	{Opt_resize_nosize, "resize"},
 	{Opt_errors, "errors=%s"},
+	{Opt_tag, "tag"},
+	{Opt_notag, "notag"},
+	{Opt_tagid, "tagid=%u"},
+	{Opt_tag, "tagxid"},
 	{Opt_ignore, "noquota"},
 	{Opt_ignore, "quota"},
 	{Opt_usrquota, "usrquota"},
@@ -336,6 +341,20 @@ static int parse_options(char *options, 
 			}
 			break;
 		}
+#ifndef CONFIG_TAGGING_NONE
+		case Opt_tag:
+			*flag |= JFS_TAGGED;
+			break;
+		case Opt_notag:
+			*flag &= JFS_TAGGED;
+			break;
+#endif
+#ifdef CONFIG_PROPAGATE
+		case Opt_tagid:
+			/* use args[0] */
+			*flag |= JFS_TAGGED;
+			break;
+#endif
 		default:
 			printk("jfs: Unrecognized mount option \"%s\" "
 					" or missing value\n", p);
@@ -366,6 +385,12 @@ static int jfs_remount(struct super_bloc
 	if (!parse_options(data, sb, &newLVSize, &flag)) {
 		return -EINVAL;
 	}
+	if ((flag & JFS_TAGGED) && !(sb->s_flags & MS_TAGGED)) {
+		printk(KERN_ERR "JFS: %s: tagging not permitted on remount.\n",
+			sb->s_id);
+		return -EINVAL;
+	}
+
 	lock_kernel();
 	if (newLVSize) {
 		if (sb->s_flags & MS_RDONLY) {
@@ -449,6 +474,9 @@ static int jfs_fill_super(struct super_b
 #ifdef CONFIG_JFS_POSIX_ACL
 	sb->s_flags |= MS_POSIXACL;
 #endif
+	/* map mount option tagxid */
+	if (sbi->flag & JFS_TAGGED)
+		sb->s_flags |= MS_TAGGED;
 
 	if (newLVSize) {
 		printk(KERN_ERR "resize option for remount only\n");
diff -NurpP --minimal linux-2.6.32.17/fs/libfs.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/libfs.c
--- linux-2.6.32.17/fs/libfs.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/libfs.c	2010-07-08 02:22:31.000000000 +0200
@@ -127,7 +127,8 @@ static inline unsigned char dt_type(stru
  * both impossible due to the lock on directory.
  */
 
-int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
+static inline int do_dcache_readdir_filter(struct file *filp,
+	void *dirent, filldir_t filldir, int (*filter)(struct dentry *dentry))
 {
 	struct dentry *dentry = filp->f_path.dentry;
 	struct dentry *cursor = filp->private_data;
@@ -160,6 +161,8 @@ int dcache_readdir(struct file * filp, v
 				next = list_entry(p, struct dentry, d_u.d_child);
 				if (d_unhashed(next) || !next->d_inode)
 					continue;
+				if (filter && !filter(next))
+					continue;
 
 				spin_unlock(&dcache_lock);
 				if (filldir(dirent, next->d_name.name, 
@@ -178,6 +181,18 @@ int dcache_readdir(struct file * filp, v
 	return 0;
 }
 
+int dcache_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+	return do_dcache_readdir_filter(filp, dirent, filldir, NULL);
+}
+
+int dcache_readdir_filter(struct file *filp, void *dirent, filldir_t filldir,
+	int (*filter)(struct dentry *))
+{
+	return do_dcache_readdir_filter(filp, dirent, filldir, filter);
+}
+
+
 ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos)
 {
 	return -EISDIR;
@@ -842,6 +857,7 @@ EXPORT_SYMBOL(dcache_dir_close);
 EXPORT_SYMBOL(dcache_dir_lseek);
 EXPORT_SYMBOL(dcache_dir_open);
 EXPORT_SYMBOL(dcache_readdir);
+EXPORT_SYMBOL(dcache_readdir_filter);
 EXPORT_SYMBOL(generic_read_dir);
 EXPORT_SYMBOL(get_sb_pseudo);
 EXPORT_SYMBOL(simple_write_begin);
diff -NurpP --minimal linux-2.6.32.17/fs/locks.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/locks.c
--- linux-2.6.32.17/fs/locks.c	2009-12-03 20:02:52.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/locks.c	2009-12-03 20:04:56.000000000 +0100
@@ -127,6 +127,8 @@
 #include <linux/time.h>
 #include <linux/rcupdate.h>
 #include <linux/pid_namespace.h>
+#include <linux/vs_base.h>
+#include <linux/vs_limit.h>
 
 #include <asm/uaccess.h>
 
@@ -148,6 +150,8 @@ static struct kmem_cache *filelock_cache
 /* Allocate an empty lock structure. */
 static struct file_lock *locks_alloc_lock(void)
 {
+	if (!vx_locks_avail(1))
+		return NULL;
 	return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
 }
 
@@ -174,6 +178,7 @@ static void locks_free_lock(struct file_
 	BUG_ON(!list_empty(&fl->fl_block));
 	BUG_ON(!list_empty(&fl->fl_link));
 
+	vx_locks_dec(fl);
 	locks_release_private(fl);
 	kmem_cache_free(filelock_cache, fl);
 }
@@ -194,6 +199,7 @@ void locks_init_lock(struct file_lock *f
 	fl->fl_start = fl->fl_end = 0;
 	fl->fl_ops = NULL;
 	fl->fl_lmops = NULL;
+	fl->fl_xid = -1;
 }
 
 EXPORT_SYMBOL(locks_init_lock);
@@ -248,6 +254,7 @@ void locks_copy_lock(struct file_lock *n
 	new->fl_file = fl->fl_file;
 	new->fl_ops = fl->fl_ops;
 	new->fl_lmops = fl->fl_lmops;
+	new->fl_xid = fl->fl_xid;
 
 	locks_copy_private(new, fl);
 }
@@ -286,6 +293,11 @@ static int flock_make_lock(struct file *
 	fl->fl_flags = FL_FLOCK;
 	fl->fl_type = type;
 	fl->fl_end = OFFSET_MAX;
+
+	vxd_assert(filp->f_xid == vx_current_xid(),
+		"f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid());
+	fl->fl_xid = filp->f_xid;
+	vx_locks_inc(fl);
 	
 	*lock = fl;
 	return 0;
@@ -451,6 +463,7 @@ static int lease_init(struct file *filp,
 
 	fl->fl_owner = current->files;
 	fl->fl_pid = current->tgid;
+	fl->fl_xid = vx_current_xid();
 
 	fl->fl_file = filp;
 	fl->fl_flags = FL_LEASE;
@@ -470,6 +483,11 @@ static struct file_lock *lease_alloc(str
 	if (fl == NULL)
 		return ERR_PTR(error);
 
+	fl->fl_xid = vx_current_xid();
+	if (filp)
+		vxd_assert(filp->f_xid == fl->fl_xid,
+			"f_xid(%d) == fl_xid(%d)", filp->f_xid, fl->fl_xid);
+	vx_locks_inc(fl);
 	error = lease_init(filp, type, fl);
 	if (error) {
 		locks_free_lock(fl);
@@ -770,6 +788,7 @@ static int flock_lock_file(struct file *
 	if (found)
 		cond_resched();
 
+	new_fl->fl_xid = -1;
 find_conflict:
 	for_each_lock(inode, before) {
 		struct file_lock *fl = *before;
@@ -790,6 +809,7 @@ find_conflict:
 		goto out;
 	locks_copy_lock(new_fl, request);
 	locks_insert_lock(before, new_fl);
+	vx_locks_inc(new_fl);
 	new_fl = NULL;
 	error = 0;
 
@@ -800,7 +820,8 @@ out:
 	return error;
 }
 
-static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
+static int __posix_lock_file(struct inode *inode, struct file_lock *request,
+	struct file_lock *conflock, xid_t xid)
 {
 	struct file_lock *fl;
 	struct file_lock *new_fl = NULL;
@@ -810,6 +831,8 @@ static int __posix_lock_file(struct inod
 	struct file_lock **before;
 	int error, added = 0;
 
+	vxd_assert(xid == vx_current_xid(),
+		"xid(%d) == current(%d)", xid, vx_current_xid());
 	/*
 	 * We may need two file_lock structures for this operation,
 	 * so we get them in advance to avoid races.
@@ -820,7 +843,11 @@ static int __posix_lock_file(struct inod
 	    (request->fl_type != F_UNLCK ||
 	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
 		new_fl = locks_alloc_lock();
+		new_fl->fl_xid = xid;
+		vx_locks_inc(new_fl);
 		new_fl2 = locks_alloc_lock();
+		new_fl2->fl_xid = xid;
+		vx_locks_inc(new_fl2);
 	}
 
 	lock_kernel();
@@ -1019,7 +1046,8 @@ static int __posix_lock_file(struct inod
 int posix_lock_file(struct file *filp, struct file_lock *fl,
 			struct file_lock *conflock)
 {
-	return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
+	return __posix_lock_file(filp->f_path.dentry->d_inode,
+		fl, conflock, filp->f_xid);
 }
 EXPORT_SYMBOL(posix_lock_file);
 
@@ -1109,7 +1137,7 @@ int locks_mandatory_area(int read_write,
 	fl.fl_end = offset + count - 1;
 
 	for (;;) {
-		error = __posix_lock_file(inode, &fl, NULL);
+		error = __posix_lock_file(inode, &fl, NULL, filp->f_xid);
 		if (error != FILE_LOCK_DEFERRED)
 			break;
 		error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
@@ -1424,6 +1452,7 @@ int generic_setlease(struct file *filp, 
 
 	locks_copy_lock(new_fl, lease);
 	locks_insert_lock(before, new_fl);
+	vx_locks_inc(new_fl);
 
 	*flp = new_fl;
 	return 0;
@@ -1779,6 +1808,11 @@ int fcntl_setlk(unsigned int fd, struct 
 	if (file_lock == NULL)
 		return -ENOLCK;
 
+	vxd_assert(filp->f_xid == vx_current_xid(),
+		"f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid());
+	file_lock->fl_xid = filp->f_xid;
+	vx_locks_inc(file_lock);
+
 	/*
 	 * This might block, so we do it before checking the inode.
 	 */
@@ -1897,6 +1931,11 @@ int fcntl_setlk64(unsigned int fd, struc
 	if (file_lock == NULL)
 		return -ENOLCK;
 
+	vxd_assert(filp->f_xid == vx_current_xid(),
+		"f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid());
+	file_lock->fl_xid = filp->f_xid;
+	vx_locks_inc(file_lock);
+
 	/*
 	 * This might block, so we do it before checking the inode.
 	 */
@@ -2162,8 +2201,11 @@ static int locks_show(struct seq_file *f
 
 	lock_get_status(f, fl, (long)f->private, "");
 
-	list_for_each_entry(bfl, &fl->fl_block, fl_block)
+	list_for_each_entry(bfl, &fl->fl_block, fl_block) {
+		if (!vx_check(fl->fl_xid, VS_WATCH_P | VS_IDENT))
+			continue;
 		lock_get_status(f, bfl, (long)f->private, " ->");
+	}
 
 	f->private++;
 	return 0;
diff -NurpP --minimal linux-2.6.32.17/fs/namei.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/namei.c
--- linux-2.6.32.17/fs/namei.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/namei.c	2010-03-18 16:53:06.000000000 +0100
@@ -33,6 +33,14 @@
 #include <linux/fcntl.h>
 #include <linux/device_cgroup.h>
 #include <linux/fs_struct.h>
+#include <linux/proc_fs.h>
+#include <linux/vserver/inode.h>
+#include <linux/vs_base.h>
+#include <linux/vs_tag.h>
+#include <linux/vs_cowbl.h>
+#include <linux/vs_device.h>
+#include <linux/vs_context.h>
+#include <linux/pid_namespace.h>
 #include <asm/uaccess.h>
 
 #define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE])
@@ -169,6 +177,77 @@ void putname(const char *name)
 EXPORT_SYMBOL(putname);
 #endif
 
+static inline int dx_barrier(const struct inode *inode)
+{
+	if (IS_BARRIER(inode) && !vx_check(0, VS_ADMIN | VS_WATCH)) {
+		vxwprintk_task(1, "did hit the barrier.");
+		return 1;
+	}
+	return 0;
+}
+
+static int __dx_permission(const struct inode *inode, int mask)
+{
+	if (dx_barrier(inode))
+		return -EACCES;
+
+	if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) {
+		/* devpts is xid tagged */
+		if (S_ISDIR(inode->i_mode) ||
+		    vx_check((xid_t)inode->i_tag, VS_IDENT | VS_WATCH_P))
+			return 0;
+	}
+	else if (inode->i_sb->s_magic == PROC_SUPER_MAGIC) {
+		struct proc_dir_entry *de = PDE(inode);
+
+		if (de && !vx_hide_check(0, de->vx_flags))
+			goto out;
+
+		if ((mask & (MAY_WRITE | MAY_APPEND))) {
+			struct pid *pid;
+			struct task_struct *tsk;
+
+			if (vx_check(0, VS_ADMIN | VS_WATCH_P) ||
+			    vx_flags(VXF_STATE_SETUP, 0))
+				return 0;
+
+			pid = PROC_I(inode)->pid;
+			if (!pid)
+				goto out;
+
+			tsk = pid_task(pid, PIDTYPE_PID);
+			vxdprintk(VXD_CBIT(tag, 0), "accessing %p[#%u]",
+				  tsk, (tsk ? vx_task_xid(tsk) : 0));
+			if (tsk && vx_check(vx_task_xid(tsk), VS_IDENT | VS_WATCH_P))
+				return 0;
+		}
+		else {
+			/* FIXME: Should we block some entries here? */
+			return 0;
+		}
+	}
+	else {
+		if (dx_notagcheck(inode->i_sb) ||
+		    dx_check(inode->i_tag, DX_HOSTID | DX_ADMIN | DX_WATCH |
+			     DX_IDENT))
+			return 0;
+	}
+
+out:
+	return -EACCES;
+}
+
+int dx_permission(const struct inode *inode, int mask)
+{
+	int ret = __dx_permission(inode, mask);
+	if (unlikely(ret)) {
+		vxwprintk_task(1, "denied %x access to %s:%p[#%d,%lu]",
+			mask, inode->i_sb->s_id, inode, inode->i_tag,
+			inode->i_ino);
+	}
+	return ret;
+}
+
 /*
  * This does basic POSIX ACL permission checking
  */
@@ -269,10 +348,14 @@ int inode_permission(struct inode *inode
 		/*
 		 * Nobody gets write access to an immutable file.
 		 */
-		if (IS_IMMUTABLE(inode))
+		if (IS_IMMUTABLE(inode) && !IS_COW(inode))
 			return -EACCES;
 	}
 
+	retval = dx_permission(inode, mask);
+	if (retval)
+		return retval;
+
 	if (inode->i_op->permission)
 		retval = inode->i_op->permission(inode, mask);
 	else
@@ -448,6 +531,9 @@ static int exec_permission_lite(struct i
 {
 	int ret;
 
+	if (dx_barrier(inode))
+		return -EACCES;
+
 	if (inode->i_op->permission) {
 		ret = inode->i_op->permission(inode, MAY_EXEC);
 		if (!ret)
@@ -763,7 +849,8 @@ static __always_inline void follow_dotdo
 
 		if (nd->path.dentry == nd->root.dentry &&
 		    nd->path.mnt == nd->root.mnt) {
-			break;
+			/* for sane '/' avoid follow_mount() */
+			return;
 		}
 		spin_lock(&dcache_lock);
 		if (nd->path.dentry != nd->path.mnt->mnt_root) {
@@ -799,16 +886,30 @@ static int do_lookup(struct nameidata *n
 {
 	struct vfsmount *mnt = nd->path.mnt;
 	struct dentry *dentry = __d_lookup(nd->path.dentry, name);
+	struct inode *inode;
 
 	if (!dentry)
 		goto need_lookup;
 	if (dentry->d_op && dentry->d_op->d_revalidate)
 		goto need_revalidate;
+	inode = dentry->d_inode;
+	if (!inode)
+		goto done;
+
+	if (__dx_permission(inode, MAY_ACCESS))
+		goto hidden;
+
 done:
 	path->mnt = mnt;
 	path->dentry = dentry;
 	__follow_mount(path);
 	return 0;
+hidden:
+	vxwprintk_task(1, "did lookup hidden %s:%p[#%d,%lu] »%s/%.*s«.",
+		inode->i_sb->s_id, inode, inode->i_tag, inode->i_ino,
+		vxd_path(&nd->path), name->len, name->name);
+	dput(dentry);
+	return -ENOENT;
 
 need_lookup:
 	dentry = real_lookup(nd->path.dentry, name, nd);
@@ -1400,7 +1501,7 @@ static int may_delete(struct inode *dir,
 	if (IS_APPEND(dir))
 		return -EPERM;
 	if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)||
-	    IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
+		IS_IXORUNLINK(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
 		return -EPERM;
 	if (isdir) {
 		if (!S_ISDIR(victim->d_inode->i_mode))
@@ -1540,6 +1641,14 @@ int may_open(struct path *path, int acc_
 		break;
 	}
 
+#ifdef	CONFIG_VSERVER_COWBL
+	if (IS_COW(inode) && (flag & FMODE_WRITE)) {
+		if (IS_COW_LINK(inode))
+			return -EMLINK;
+		inode->i_flags &= ~(S_IXUNLINK|S_IMMUTABLE);
+		mark_inode_dirty(inode);
+	}
+#endif
 	error = inode_permission(inode, acc_mode);
 	if (error)
 		return error;
@@ -1688,7 +1797,11 @@ struct file *do_filp_open(int dfd, const
 	int count = 0;
 	int will_write;
 	int flag = open_to_namei_flags(open_flag);
-
+#ifdef	CONFIG_VSERVER_COWBL
+	int rflag = flag;
+	int rmode = mode;
+restart:
+#endif
 	if (!acc_mode)
 		acc_mode = MAY_OPEN | ACC_MODE(flag);
 
@@ -1836,6 +1949,25 @@ ok:
 			goto exit;
 	}
 	error = may_open(&nd.path, acc_mode, flag);
+#ifdef	CONFIG_VSERVER_COWBL
+	if (error == -EMLINK) {
+		struct dentry *dentry;
+		dentry = cow_break_link(pathname);
+		if (IS_ERR(dentry)) {
+			error = PTR_ERR(dentry);
+			goto exit_cow;
+		}
+		dput(dentry);
+		if (will_write)
+			mnt_drop_write(nd.path.mnt);
+		release_open_intent(&nd);
+		path_put(&nd.path);
+		flag = rflag;
+		mode = rmode;
+		goto restart;
+	}
+exit_cow:
+#endif
 	if (error) {
 		if (will_write)
 			mnt_drop_write(nd.path.mnt);
@@ -1998,9 +2130,17 @@ int vfs_mknod(struct inode *dir, struct 
 	if (error)
 		return error;
 
-	if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
+	if (!(S_ISCHR(mode) || S_ISBLK(mode)))
+		goto okay;
+
+	if (!capable(CAP_MKNOD))
 		return -EPERM;
 
+	if (S_ISCHR(mode) && !vs_chrdev_perm(dev, DATTR_CREATE))
+		return -EPERM;
+	if (S_ISBLK(mode) && !vs_blkdev_perm(dev, DATTR_CREATE))
+		return -EPERM;
+okay:
 	if (!dir->i_op->mknod)
 		return -EPERM;
 
@@ -2467,7 +2607,7 @@ int vfs_link(struct dentry *old_dentry, 
 	/*
 	 * A link to an append-only or immutable file cannot be created.
 	 */
-	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+	if (IS_APPEND(inode) || IS_IXORUNLINK(inode))
 		return -EPERM;
 	if (!dir->i_op->link)
 		return -EPERM;
@@ -2840,6 +2980,219 @@ int vfs_follow_link(struct nameidata *nd
 	return __vfs_follow_link(nd, link);
 }
 
+
+#ifdef	CONFIG_VSERVER_COWBL
+
+#include <linux/file.h>
+
+static inline
+long do_cow_splice(struct file *in, struct file *out, size_t len)
+{
+	loff_t ppos = 0;
+
+	return do_splice_direct(in, &ppos, out, len, 0);
+}
+
+struct dentry *cow_break_link(const char *pathname)
+{
+	int ret, mode, pathlen, redo = 0;
+	struct nameidata old_nd, dir_nd;
+	struct path old_path, new_path;
+	struct dentry *dir, *res = NULL;
+	struct file *old_file;
+	struct file *new_file;
+	char *to, *path, pad='\251';
+	loff_t size;
+
+	vxdprintk(VXD_CBIT(misc, 1), "cow_break_link(»%s«)", pathname);
+	path = kmalloc(PATH_MAX, GFP_KERNEL);
+	ret = -ENOMEM;
+	if (!path)
+		goto out;
+
+	/* old_nd will have refs to dentry and mnt */
+	ret = path_lookup(pathname, LOOKUP_FOLLOW, &old_nd);
+	vxdprintk(VXD_CBIT(misc, 2), "path_lookup(old): %d", ret);
+	if (ret < 0)
+		goto out_free_path;
+
+	old_path = old_nd.path;
+	mode = old_path.dentry->d_inode->i_mode;
+
+	to = d_path(&old_path, path, PATH_MAX-2);
+	pathlen = strlen(to);
+	vxdprintk(VXD_CBIT(misc, 2), "old path »%s« [»%.*s«:%d]", to,
+		old_path.dentry->d_name.len, old_path.dentry->d_name.name,
+		old_path.dentry->d_name.len);
+
+	to[pathlen + 1] = 0;
+retry:
+	to[pathlen] = pad--;
+	ret = -EMLINK;
+	if (pad <= '\240')
+		goto out_rel_old;
+
+	vxdprintk(VXD_CBIT(misc, 1), "temp copy »%s«", to);
+	/* dir_nd will have refs to dentry and mnt */
+	ret = path_lookup(to,
+		LOOKUP_PARENT | LOOKUP_OPEN | LOOKUP_CREATE, &dir_nd);
+	vxdprintk(VXD_CBIT(misc, 2),
+		"path_lookup(new): %d", ret);
+	if (ret < 0)
+		goto retry;
+
+	/* this puppy downs the inode mutex */
+	new_path.dentry = lookup_create(&dir_nd, 0);
+	if (!new_path.dentry || IS_ERR(new_path.dentry)) {
+		vxdprintk(VXD_CBIT(misc, 2),
+			"lookup_create(new): %p", new_path.dentry);
+		mutex_unlock(&dir_nd.path.dentry->d_inode->i_mutex);
+		path_put(&dir_nd.path);
+		goto retry;
+	}
+	vxdprintk(VXD_CBIT(misc, 2),
+		"lookup_create(new): %p [»%.*s«:%d]", new_path.dentry,
+		new_path.dentry->d_name.len, new_path.dentry->d_name.name,
+		new_path.dentry->d_name.len);
+	dir = dir_nd.path.dentry;
+
+	ret = vfs_create(dir_nd.path.dentry->d_inode, new_path.dentry, mode, &dir_nd);
+	vxdprintk(VXD_CBIT(misc, 2),
+		"vfs_create(new): %d", ret);
+	if (ret == -EEXIST) {
+		mutex_unlock(&dir->d_inode->i_mutex);
+		dput(new_path.dentry);
+		path_put(&dir_nd.path);
+		goto retry;
+	}
+	else if (ret < 0)
+		goto out_unlock_new;
+
+	/* drop out early, ret passes ENOENT */
+	ret = -ENOENT;
+	if ((redo = d_unhashed(old_path.dentry)))
+		goto out_unlock_new;
+
+	new_path.mnt = dir_nd.path.mnt;
+	dget(old_path.dentry);
+	mntget(old_path.mnt);
+	/* this one cleans up the dentry/mnt in case of failure */
+	old_file = dentry_open(old_path.dentry, old_path.mnt,
+		O_RDONLY, current_cred());
+	vxdprintk(VXD_CBIT(misc, 2),
+		"dentry_open(old): %p", old_file);
+	if (!old_file || IS_ERR(old_file)) {
+		res = IS_ERR(old_file) ? (void *) old_file : res;
+		goto out_unlock_new;
+	}
+
+	dget(new_path.dentry);
+	mntget(new_path.mnt);
+	/* this one cleans up the dentry/mnt in case of failure */
+	new_file = dentry_open(new_path.dentry, new_path.mnt,
+		O_WRONLY, current_cred());
+	vxdprintk(VXD_CBIT(misc, 2),
+		"dentry_open(new): %p", new_file);
+
+	ret = IS_ERR(new_file) ? PTR_ERR(new_file) : -ENOENT;
+	if (!new_file || IS_ERR(new_file))
+		goto out_fput_old;
+
+	size = i_size_read(old_file->f_dentry->d_inode);
+	ret = do_cow_splice(old_file, new_file, size);
+	vxdprintk(VXD_CBIT(misc, 2), "do_splice_direct: %d", ret);
+	if (ret < 0) {
+		goto out_fput_both;
+	} else if (ret < size) {
+		ret = -ENOSPC;
+		goto out_fput_both;
+	} else {
+		struct inode *old_inode = old_path.dentry->d_inode;
+		struct inode *new_inode = new_path.dentry->d_inode;
+		struct iattr attr = {
+			.ia_uid = old_inode->i_uid,
+			.ia_gid = old_inode->i_gid,
+			.ia_valid = ATTR_UID | ATTR_GID
+			};
+
+		ret = inode_setattr(new_inode, &attr);
+		if (ret)
+			goto out_fput_both;
+	}
+
+	mutex_lock(&old_path.dentry->d_inode->i_sb->s_vfs_rename_mutex);
+
+	/* drop out late */
+	ret = -ENOENT;
+	if ((redo = d_unhashed(old_path.dentry)))
+		goto out_unlock;
+
+	vxdprintk(VXD_CBIT(misc, 2),
+		"vfs_rename: [»%*s«:%d] -> [»%*s«:%d]",
+		new_path.dentry->d_name.len, new_path.dentry->d_name.name,
+		new_path.dentry->d_name.len,
+		old_path.dentry->d_name.len, old_path.dentry->d_name.name,
+		old_path.dentry->d_name.len);
+	ret = vfs_rename(dir_nd.path.dentry->d_inode, new_path.dentry,
+		old_nd.path.dentry->d_parent->d_inode, old_path.dentry);
+	vxdprintk(VXD_CBIT(misc, 2), "vfs_rename: %d", ret);
+	res = new_path.dentry;
+
+out_unlock:
+	mutex_unlock(&old_path.dentry->d_inode->i_sb->s_vfs_rename_mutex);
+
+out_fput_both:
+	vxdprintk(VXD_CBIT(misc, 3),
+		"fput(new_file=%p[#%ld])", new_file,
+		atomic_long_read(&new_file->f_count));
+	fput(new_file);
+
+out_fput_old:
+	vxdprintk(VXD_CBIT(misc, 3),
+		"fput(old_file=%p[#%ld])", old_file,
+		atomic_long_read(&old_file->f_count));
+	fput(old_file);
+
+out_unlock_new:
+	mutex_unlock(&dir->d_inode->i_mutex);
+	if (!ret)
+		goto out_redo;
+
+	/* error path cleanup */
+	vfs_unlink(dir->d_inode, new_path.dentry);
+	dput(new_path.dentry);
+
+out_redo:
+	if (!redo)
+		goto out_rel_both;
+	/* lookup dentry once again */
+	path_put(&old_nd.path);
+	ret = path_lookup(pathname, LOOKUP_FOLLOW, &old_nd);
+	if (ret)
+		goto out_rel_both;
+
+	new_path.dentry = old_nd.path.dentry;
+	vxdprintk(VXD_CBIT(misc, 2),
+		"path_lookup(redo): %p [»%.*s«:%d]", new_path.dentry,
+		new_path.dentry->d_name.len, new_path.dentry->d_name.name,
+		new_path.dentry->d_name.len);
+	dget(new_path.dentry);
+	res = new_path.dentry;
+
+out_rel_both:
+	path_put(&dir_nd.path);
+out_rel_old:
+	path_put(&old_nd.path);
+out_free_path:
+	kfree(path);
+out:
+	if (ret)
+		res = ERR_PTR(ret);
+	return res;
+}
+
+#endif
+
 /* get the link contents into pagecache */
 static char *page_getlink(struct dentry * dentry, struct page **ppage)
 {
diff -NurpP --minimal linux-2.6.32.17/fs/namespace.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/namespace.c
--- linux-2.6.32.17/fs/namespace.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/namespace.c	2010-07-08 02:22:31.000000000 +0200
@@ -29,6 +29,11 @@
 #include <linux/log2.h>
 #include <linux/idr.h>
 #include <linux/fs_struct.h>
+#include <linux/vs_base.h>
+#include <linux/vs_context.h>
+#include <linux/vs_tag.h>
+#include <linux/vserver/space.h>
+#include <linux/vserver/global.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include "pnode.h"
@@ -567,6 +572,7 @@ static struct vfsmount *clone_mnt(struct
 		mnt->mnt_root = dget(root);
 		mnt->mnt_mountpoint = mnt->mnt_root;
 		mnt->mnt_parent = mnt;
+		mnt->mnt_tag = old->mnt_tag;
 
 		if (flag & CL_SLAVE) {
 			list_add(&mnt->mnt_slave, &old->mnt_slave_list);
@@ -661,6 +667,31 @@ static inline void mangle(struct seq_fil
 	seq_escape(m, s, " \t\n\\");
 }
 
+static int mnt_is_reachable(struct vfsmount *mnt)
+{
+	struct path root;
+	struct dentry *point;
+	int ret;
+
+	if (mnt == mnt->mnt_ns->root)
+		return 1;
+
+	spin_lock(&vfsmount_lock);
+	root = current->fs->root;
+	point = root.dentry;
+
+	while ((mnt != mnt->mnt_parent) && (mnt != root.mnt)) {
+		point = mnt->mnt_mountpoint;
+		mnt = mnt->mnt_parent;
+	}
+
+	ret = (mnt == root.mnt) && is_subdir(point, root.dentry);
+
+	spin_unlock(&vfsmount_lock);
+
+	return ret;
+}
+
 /*
  * Simple .show_options callback for filesystems which don't want to
  * implement more complex mount option showing.
@@ -748,6 +779,8 @@ static int show_sb_opts(struct seq_file 
 		{ MS_SYNCHRONOUS, ",sync" },
 		{ MS_DIRSYNC, ",dirsync" },
 		{ MS_MANDLOCK, ",mand" },
+		{ MS_TAGGED, ",tag" },
+		{ MS_NOTAGCHECK, ",notagcheck" },
 		{ 0, NULL }
 	};
 	const struct proc_fs_info *fs_infop;
@@ -795,10 +828,20 @@ static int show_vfsmnt(struct seq_file *
 	int err = 0;
 	struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
 
-	mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
-	seq_putc(m, ' ');
-	seq_path(m, &mnt_path, " \t\n\\");
-	seq_putc(m, ' ');
+	if (vx_flags(VXF_HIDE_MOUNT, 0))
+		return SEQ_SKIP;
+	if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P))
+		return SEQ_SKIP;
+
+	if (!vx_check(0, VS_ADMIN|VS_WATCH) &&
+		mnt == current->fs->root.mnt) {
+		seq_puts(m, "/dev/root / ");
+	} else {
+		mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
+		seq_putc(m, ' ');
+		seq_path(m, &mnt_path, " \t\n\\");
+		seq_putc(m, ' ');
+	}
 	show_type(m, mnt->mnt_sb);
 	seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
 	err = show_sb_opts(m, mnt->mnt_sb);
@@ -828,6 +871,11 @@ static int show_mountinfo(struct seq_fil
 	struct path root = p->root;
 	int err = 0;
 
+	if (vx_flags(VXF_HIDE_MOUNT, 0))
+		return SEQ_SKIP;
+	if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P))
+		return SEQ_SKIP;
+
 	seq_printf(m, "%i %i %u:%u ", mnt->mnt_id, mnt->mnt_parent->mnt_id,
 		   MAJOR(sb->s_dev), MINOR(sb->s_dev));
 	seq_dentry(m, mnt->mnt_root, " \t\n\\");
@@ -886,17 +934,27 @@ static int show_vfsstat(struct seq_file 
 	struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
 	int err = 0;
 
-	/* device */
-	if (mnt->mnt_devname) {
-		seq_puts(m, "device ");
-		mangle(m, mnt->mnt_devname);
-	} else
-		seq_puts(m, "no device");
+	if (vx_flags(VXF_HIDE_MOUNT, 0))
+		return SEQ_SKIP;
+	if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P))
+		return SEQ_SKIP;
 
-	/* mount point */
-	seq_puts(m, " mounted on ");
-	seq_path(m, &mnt_path, " \t\n\\");
-	seq_putc(m, ' ');
+	if (!vx_check(0, VS_ADMIN|VS_WATCH) &&
+		mnt == current->fs->root.mnt) {
+		seq_puts(m, "device /dev/root mounted on / ");
+	} else {
+		/* device */
+		if (mnt->mnt_devname) {
+			seq_puts(m, "device ");
+			mangle(m, mnt->mnt_devname);
+		} else
+			seq_puts(m, "no device");
+
+		/* mount point */
+		seq_puts(m, " mounted on ");
+		seq_path(m, &mnt_path, " \t\n\\");
+		seq_putc(m, ' ');
+	}
 
 	/* file system type */
 	seq_puts(m, "with fstype ");
@@ -1137,7 +1195,7 @@ SYSCALL_DEFINE2(umount, char __user *, n
 		goto dput_and_out;
 
 	retval = -EPERM;
-	if (!capable(CAP_SYS_ADMIN))
+	if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT))
 		goto dput_and_out;
 
 	retval = do_umount(path.mnt, flags);
@@ -1163,7 +1221,7 @@ SYSCALL_DEFINE1(oldumount, char __user *
 
 static int mount_is_safe(struct path *path)
 {
-	if (capable(CAP_SYS_ADMIN))
+	if (vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT))
 		return 0;
 	return -EPERM;
 #ifdef notyet
@@ -1427,7 +1485,7 @@ static int do_change_type(struct path *p
 	int type = flag & ~MS_REC;
 	int err = 0;
 
-	if (!capable(CAP_SYS_ADMIN))
+	if (!vx_capable(CAP_SYS_ADMIN, VXC_NAMESPACE))
 		return -EPERM;
 
 	if (path->dentry != path->mnt->mnt_root)
@@ -1454,11 +1512,13 @@ static int do_change_type(struct path *p
  * do loopback mount.
  */
 static int do_loopback(struct path *path, char *old_name,
-				int recurse)
+	tag_t tag, unsigned long flags, int mnt_flags)
 {
 	struct path old_path;
 	struct vfsmount *mnt = NULL;
 	int err = mount_is_safe(path);
+	int recurse = flags & MS_REC;
+
 	if (err)
 		return err;
 	if (!old_name || !*old_name)
@@ -1492,6 +1552,7 @@ static int do_loopback(struct path *path
 		spin_unlock(&vfsmount_lock);
 		release_mounts(&umount_list);
 	}
+	mnt->mnt_flags = mnt_flags;
 
 out:
 	up_write(&namespace_sem);
@@ -1522,12 +1583,12 @@ static int change_mount_flags(struct vfs
  * on it - tough luck.
  */
 static int do_remount(struct path *path, int flags, int mnt_flags,
-		      void *data)
+	void *data, xid_t xid)
 {
 	int err;
 	struct super_block *sb = path->mnt->mnt_sb;
 
-	if (!capable(CAP_SYS_ADMIN))
+	if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_REMOUNT))
 		return -EPERM;
 
 	if (!check_mnt(path->mnt))
@@ -1569,7 +1630,7 @@ static int do_move_mount(struct path *pa
 	struct path old_path, parent_path;
 	struct vfsmount *p;
 	int err = 0;
-	if (!capable(CAP_SYS_ADMIN))
+	if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT))
 		return -EPERM;
 	if (!old_name || !*old_name)
 		return -EINVAL;
@@ -1651,7 +1712,7 @@ static int do_new_mount(struct path *pat
 		return -EINVAL;
 
 	/* we need capabilities... */
-	if (!capable(CAP_SYS_ADMIN))
+	if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT))
 		return -EPERM;
 
 	lock_kernel();
@@ -1915,6 +1976,7 @@ long do_mount(char *dev_name, char *dir_
 	struct path path;
 	int retval = 0;
 	int mnt_flags = 0;
+	tag_t tag = 0;
 
 	/* Discard magic */
 	if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
@@ -1932,6 +1994,12 @@ long do_mount(char *dev_name, char *dir_
 	if (!(flags & MS_NOATIME))
 		mnt_flags |= MNT_RELATIME;
 
+	if (dx_parse_tag(data_page, &tag, 1, &mnt_flags, &flags)) {
+		/* FIXME: bind and re-mounts get the tag flag? */
+		if (flags & (MS_BIND|MS_REMOUNT))
+			flags |= MS_TAGID;
+	}
+
 	/* Separate the per-mountpoint flags */
 	if (flags & MS_NOSUID)
 		mnt_flags |= MNT_NOSUID;
@@ -1948,6 +2016,8 @@ long do_mount(char *dev_name, char *dir_
 	if (flags & MS_RDONLY)
 		mnt_flags |= MNT_READONLY;
 
+	if (!capable(CAP_SYS_ADMIN))
+		mnt_flags |= MNT_NODEV;
 	flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
 		   MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
 		   MS_STRICTATIME);
@@ -1964,9 +2034,9 @@ long do_mount(char *dev_name, char *dir_
 
 	if (flags & MS_REMOUNT)
 		retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
-				    data_page);
+				    data_page, tag);
 	else if (flags & MS_BIND)
-		retval = do_loopback(&path, dev_name, flags & MS_REC);
+		retval = do_loopback(&path, dev_name, tag, flags, mnt_flags);
 	else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
 		retval = do_change_type(&path, flags);
 	else if (flags & MS_MOVE)
@@ -2045,6 +2115,7 @@ static struct mnt_namespace *dup_mnt_ns(
 		q = next_mnt(q, new_ns->root);
 	}
 	up_write(&namespace_sem);
+	atomic_inc(&vs_global_mnt_ns);
 
 	if (rootmnt)
 		mntput(rootmnt);
@@ -2189,9 +2260,10 @@ SYSCALL_DEFINE2(pivot_root, const char _
 	down_write(&namespace_sem);
 	mutex_lock(&old.dentry->d_inode->i_mutex);
 	error = -EINVAL;
-	if (IS_MNT_SHARED(old.mnt) ||
+	if ((IS_MNT_SHARED(old.mnt) ||
 		IS_MNT_SHARED(new.mnt->mnt_parent) ||
-		IS_MNT_SHARED(root.mnt->mnt_parent))
+		IS_MNT_SHARED(root.mnt->mnt_parent)) &&
+		!vx_flags(VXF_STATE_SETUP, 0))
 		goto out2;
 	if (!check_mnt(root.mnt))
 		goto out2;
@@ -2327,6 +2399,7 @@ void put_mnt_ns(struct mnt_namespace *ns
 	spin_unlock(&vfsmount_lock);
 	up_write(&namespace_sem);
 	release_mounts(&umount_list);
+	atomic_dec(&vs_global_mnt_ns);
 	kfree(ns);
 }
 EXPORT_SYMBOL(put_mnt_ns);
diff -NurpP --minimal linux-2.6.32.17/fs/nfs/client.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfs/client.c
--- linux-2.6.32.17/fs/nfs/client.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfs/client.c	2010-05-21 13:20:19.000000000 +0200
@@ -738,6 +738,9 @@ static int nfs_init_server_rpcclient(str
 	if (server->flags & NFS_MOUNT_SOFT)
 		server->client->cl_softrtry = 1;
 
+	server->client->cl_tag = 0;
+	if (server->flags & NFS_MOUNT_TAGGED)
+		server->client->cl_tag = 1;
 	return 0;
 }
 
@@ -909,6 +912,10 @@ static void nfs_server_set_fsinfo(struct
 		server->acdirmin = server->acdirmax = 0;
 	}
 
+	/* FIXME: needs fsinfo
+	if (server->flags & NFS_MOUNT_TAGGED)
+		sb->s_flags |= MS_TAGGED;	*/
+
 	server->maxfilesize = fsinfo->maxfilesize;
 
 	/* We're airborne Set socket buffersize */
diff -NurpP --minimal linux-2.6.32.17/fs/nfs/dir.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfs/dir.c
--- linux-2.6.32.17/fs/nfs/dir.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfs/dir.c	2010-05-21 13:20:19.000000000 +0200
@@ -33,6 +33,7 @@
 #include <linux/namei.h>
 #include <linux/mount.h>
 #include <linux/sched.h>
+#include <linux/vs_tag.h>
 
 #include "nfs4_fs.h"
 #include "delegation.h"
@@ -951,6 +952,7 @@ static struct dentry *nfs_lookup(struct 
 	if (IS_ERR(res))
 		goto out_unblock_sillyrename;
 
+	dx_propagate_tag(nd, inode);
 no_entry:
 	res = d_materialise_unique(dentry, inode);
 	if (res != NULL) {
diff -NurpP --minimal linux-2.6.32.17/fs/nfs/inode.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfs/inode.c
--- linux-2.6.32.17/fs/nfs/inode.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfs/inode.c	2010-04-05 20:56:22.000000000 +0200
@@ -36,6 +36,7 @@
 #include <linux/vfs.h>
 #include <linux/inet.h>
 #include <linux/nfs_xdr.h>
+#include <linux/vs_tag.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
@@ -279,6 +280,8 @@ nfs_fhget(struct super_block *sb, struct
 	if (inode->i_state & I_NEW) {
 		struct nfs_inode *nfsi = NFS_I(inode);
 		unsigned long now = jiffies;
+		uid_t uid;
+		gid_t gid;
 
 		/* We set i_ino for the few things that still rely on it,
 		 * such as stat(2) */
@@ -327,8 +330,8 @@ nfs_fhget(struct super_block *sb, struct
 		nfsi->change_attr = 0;
 		inode->i_size = 0;
 		inode->i_nlink = 0;
-		inode->i_uid = -2;
-		inode->i_gid = -2;
+		uid = -2;
+		gid = -2;
 		inode->i_blocks = 0;
 		memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
 
@@ -365,13 +368,13 @@ nfs_fhget(struct super_block *sb, struct
 		else if (nfs_server_capable(inode, NFS_CAP_NLINK))
 			nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
 		if (fattr->valid & NFS_ATTR_FATTR_OWNER)
-			inode->i_uid = fattr->uid;
+			uid = fattr->uid;
 		else if (nfs_server_capable(inode, NFS_CAP_OWNER))
 			nfsi->cache_validity |= NFS_INO_INVALID_ATTR
 				| NFS_INO_INVALID_ACCESS
 				| NFS_INO_INVALID_ACL;
 		if (fattr->valid & NFS_ATTR_FATTR_GROUP)
-			inode->i_gid = fattr->gid;
+			gid = fattr->gid;
 		else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
 			nfsi->cache_validity |= NFS_INO_INVALID_ATTR
 				| NFS_INO_INVALID_ACCESS
@@ -384,6 +387,11 @@ nfs_fhget(struct super_block *sb, struct
 			 */
 			inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
 		}
+		inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
+		inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
+		inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid, 0);
+				/* maybe fattr->xid someday */
+
 		nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
 		nfsi->attrtimeo_timestamp = now;
 		nfsi->access_cache = RB_ROOT;
@@ -496,6 +504,8 @@ void nfs_setattr_update_inode(struct ino
 			inode->i_uid = attr->ia_uid;
 		if ((attr->ia_valid & ATTR_GID) != 0)
 			inode->i_gid = attr->ia_gid;
+		if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode))
+			inode->i_tag = attr->ia_tag;
 		NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
 		spin_unlock(&inode->i_lock);
 	}
@@ -906,6 +916,9 @@ static int nfs_check_inode_attributes(st
 	struct nfs_inode *nfsi = NFS_I(inode);
 	loff_t cur_size, new_isize;
 	unsigned long invalid = 0;
+	uid_t uid;
+	gid_t gid;
+	tag_t tag;
 
 
 	/* Has the inode gone and changed behind our back? */
@@ -929,13 +942,18 @@ static int nfs_check_inode_attributes(st
 			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
 	}
 
+	uid = INOTAG_UID(DX_TAG(inode), fattr->uid, fattr->gid);
+	gid = INOTAG_GID(DX_TAG(inode), fattr->uid, fattr->gid);
+	tag = INOTAG_TAG(DX_TAG(inode), fattr->uid, fattr->gid, 0);
+
 	/* Have any file permissions changed? */
 	if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
 		invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
-	if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && inode->i_uid != fattr->uid)
+	if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && uid != fattr->uid)
 		invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
-	if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && inode->i_gid != fattr->gid)
+	if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && gid != fattr->gid)
 		invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
+		/* maybe check for tag too? */
 
 	/* Has the link count changed? */
 	if ((fattr->valid & NFS_ATTR_FATTR_NLINK) && inode->i_nlink != fattr->nlink)
@@ -1150,6 +1168,9 @@ static int nfs_update_inode(struct inode
 	unsigned long invalid = 0;
 	unsigned long now = jiffies;
 	unsigned long save_cache_validity;
+	uid_t uid;
+	gid_t gid;
+	tag_t tag;
 
 	dfprintk(VFS, "NFS: %s(%s/%ld ct=%d info=0x%x)\n",
 			__func__, inode->i_sb->s_id, inode->i_ino,
@@ -1252,6 +1273,9 @@ static int nfs_update_inode(struct inode
 				| NFS_INO_REVAL_PAGECACHE
 				| NFS_INO_REVAL_FORCED);
 
+	uid = INOTAG_UID(DX_TAG(inode), fattr->uid, fattr->gid);
+	gid = INOTAG_GID(DX_TAG(inode), fattr->uid, fattr->gid);
+	tag = INOTAG_TAG(DX_TAG(inode), fattr->uid, fattr->gid, 0);
 
 	if (fattr->valid & NFS_ATTR_FATTR_ATIME)
 		memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
@@ -1271,9 +1295,9 @@ static int nfs_update_inode(struct inode
 				| NFS_INO_REVAL_FORCED);
 
 	if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
-		if (inode->i_uid != fattr->uid) {
+		if (uid != fattr->uid) {
 			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
-			inode->i_uid = fattr->uid;
+			uid = fattr->uid;
 		}
 	} else if (server->caps & NFS_CAP_OWNER)
 		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
@@ -1282,9 +1306,9 @@ static int nfs_update_inode(struct inode
 				| NFS_INO_REVAL_FORCED);
 
 	if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
-		if (inode->i_gid != fattr->gid) {
+		if (gid != fattr->gid) {
 			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
-			inode->i_gid = fattr->gid;
+			gid = fattr->gid;
 		}
 	} else if (server->caps & NFS_CAP_OWNER_GROUP)
 		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
@@ -1292,6 +1316,10 @@ static int nfs_update_inode(struct inode
 				| NFS_INO_INVALID_ACL
 				| NFS_INO_REVAL_FORCED);
 
+	inode->i_uid = uid;
+	inode->i_gid = gid;
+	inode->i_tag = tag;
+
 	if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
 		if (inode->i_nlink != fattr->nlink) {
 			invalid |= NFS_INO_INVALID_ATTR;
diff -NurpP --minimal linux-2.6.32.17/fs/nfs/nfs3xdr.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfs/nfs3xdr.c
--- linux-2.6.32.17/fs/nfs/nfs3xdr.c	2009-12-03 20:02:52.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfs/nfs3xdr.c	2009-12-03 20:04:56.000000000 +0100
@@ -21,6 +21,7 @@
 #include <linux/nfs3.h>
 #include <linux/nfs_fs.h>
 #include <linux/nfsacl.h>
+#include <linux/vs_tag.h>
 #include "internal.h"
 
 #define NFSDBG_FACILITY		NFSDBG_XDR
@@ -176,7 +177,7 @@ xdr_decode_fattr(__be32 *p, struct nfs_f
 }
 
 static inline __be32 *
-xdr_encode_sattr(__be32 *p, struct iattr *attr)
+xdr_encode_sattr(__be32 *p, struct iattr *attr, int tag)
 {
 	if (attr->ia_valid & ATTR_MODE) {
 		*p++ = xdr_one;
@@ -184,15 +185,17 @@ xdr_encode_sattr(__be32 *p, struct iattr
 	} else {
 		*p++ = xdr_zero;
 	}
-	if (attr->ia_valid & ATTR_UID) {
+	if (attr->ia_valid & ATTR_UID ||
+		(tag && (attr->ia_valid & ATTR_TAG))) {
 		*p++ = xdr_one;
-		*p++ = htonl(attr->ia_uid);
+		*p++ = htonl(TAGINO_UID(tag, attr->ia_uid, attr->ia_tag));
 	} else {
 		*p++ = xdr_zero;
 	}
-	if (attr->ia_valid & ATTR_GID) {
+	if (attr->ia_valid & ATTR_GID ||
+		(tag && (attr->ia_valid & ATTR_TAG))) {
 		*p++ = xdr_one;
-		*p++ = htonl(attr->ia_gid);
+		*p++ = htonl(TAGINO_GID(tag, attr->ia_gid, attr->ia_tag));
 	} else {
 		*p++ = xdr_zero;
 	}
@@ -279,7 +282,8 @@ static int
 nfs3_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs3_sattrargs *args)
 {
 	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_sattr(p, args->sattr);
+	p = xdr_encode_sattr(p, args->sattr,
+		req->rq_task->tk_client->cl_tag);
 	*p++ = htonl(args->guard);
 	if (args->guard)
 		p = xdr_encode_time3(p, &args->guardtime);
@@ -384,7 +388,8 @@ nfs3_xdr_createargs(struct rpc_rqst *req
 		*p++ = args->verifier[0];
 		*p++ = args->verifier[1];
 	} else
-		p = xdr_encode_sattr(p, args->sattr);
+		p = xdr_encode_sattr(p, args->sattr,
+			req->rq_task->tk_client->cl_tag);
 
 	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
 	return 0;
@@ -398,7 +403,8 @@ nfs3_xdr_mkdirargs(struct rpc_rqst *req,
 {
 	p = xdr_encode_fhandle(p, args->fh);
 	p = xdr_encode_array(p, args->name, args->len);
-	p = xdr_encode_sattr(p, args->sattr);
+	p = xdr_encode_sattr(p, args->sattr,
+		req->rq_task->tk_client->cl_tag);
 	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
 	return 0;
 }
@@ -411,7 +417,8 @@ nfs3_xdr_symlinkargs(struct rpc_rqst *re
 {
 	p = xdr_encode_fhandle(p, args->fromfh);
 	p = xdr_encode_array(p, args->fromname, args->fromlen);
-	p = xdr_encode_sattr(p, args->sattr);
+	p = xdr_encode_sattr(p, args->sattr,
+		req->rq_task->tk_client->cl_tag);
 	*p++ = htonl(args->pathlen);
 	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
 
@@ -429,7 +436,8 @@ nfs3_xdr_mknodargs(struct rpc_rqst *req,
 	p = xdr_encode_fhandle(p, args->fh);
 	p = xdr_encode_array(p, args->name, args->len);
 	*p++ = htonl(args->type);
-	p = xdr_encode_sattr(p, args->sattr);
+	p = xdr_encode_sattr(p, args->sattr,
+		req->rq_task->tk_client->cl_tag);
 	if (args->type == NF3CHR || args->type == NF3BLK) {
 		*p++ = htonl(MAJOR(args->rdev));
 		*p++ = htonl(MINOR(args->rdev));
diff -NurpP --minimal linux-2.6.32.17/fs/nfs/nfsroot.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfs/nfsroot.c
--- linux-2.6.32.17/fs/nfs/nfsroot.c	2009-09-10 15:26:23.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfs/nfsroot.c	2009-12-03 20:04:56.000000000 +0100
@@ -122,12 +122,12 @@ static int mount_port __initdata = 0;		/
 enum {
 	/* Options that take integer arguments */
 	Opt_port, Opt_rsize, Opt_wsize, Opt_timeo, Opt_retrans, Opt_acregmin,
-	Opt_acregmax, Opt_acdirmin, Opt_acdirmax,
+	Opt_acregmax, Opt_acdirmin, Opt_acdirmax, Opt_tagid,
 	/* Options that take no arguments */
 	Opt_soft, Opt_hard, Opt_intr,
 	Opt_nointr, Opt_posix, Opt_noposix, Opt_cto, Opt_nocto, Opt_ac, 
 	Opt_noac, Opt_lock, Opt_nolock, Opt_v2, Opt_v3, Opt_udp, Opt_tcp,
-	Opt_acl, Opt_noacl,
+	Opt_acl, Opt_noacl, Opt_tag, Opt_notag,
 	/* Error token */
 	Opt_err
 };
@@ -164,6 +164,9 @@ static const match_table_t tokens __init
 	{Opt_tcp, "tcp"},
 	{Opt_acl, "acl"},
 	{Opt_noacl, "noacl"},
+	{Opt_tag, "tag"},
+	{Opt_notag, "notag"},
+	{Opt_tagid, "tagid=%u"},
 	{Opt_err, NULL}
 	
 };
@@ -275,6 +278,20 @@ static int __init root_nfs_parse(char *n
 			case Opt_noacl:
 				nfs_data.flags |= NFS_MOUNT_NOACL;
 				break;
+#ifndef CONFIG_TAGGING_NONE
+			case Opt_tag:
+				nfs_data.flags |= NFS_MOUNT_TAGGED;
+				break;
+			case Opt_notag:
+				nfs_data.flags &= ~NFS_MOUNT_TAGGED;
+				break;
+#endif
+#ifdef CONFIG_PROPAGATE
+			case Opt_tagid:
+				/* use args[0] */
+				nfs_data.flags |= NFS_MOUNT_TAGGED;
+				break;
+#endif
 			default:
 				printk(KERN_WARNING "Root-NFS: unknown "
 					"option: %s\n", p);
diff -NurpP --minimal linux-2.6.32.17/fs/nfs/super.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfs/super.c
--- linux-2.6.32.17/fs/nfs/super.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfs/super.c	2010-08-09 18:06:32.000000000 +0200
@@ -53,6 +53,7 @@
 #include <linux/nfs_xdr.h>
 #include <linux/magic.h>
 #include <linux/parser.h>
+#include <linux/vs_tag.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
@@ -570,6 +571,7 @@ static void nfs_show_mount_options(struc
 		{ NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" },
 		{ NFS_MOUNT_UNSHARED, ",nosharecache", "" },
 		{ NFS_MOUNT_NORESVPORT, ",noresvport", "" },
+		{ NFS_MOUNT_TAGGED, ",tag", "" },
 		{ 0, NULL, NULL }
 	};
 	const struct proc_nfs_info *nfs_infop;
diff -NurpP --minimal linux-2.6.32.17/fs/nfsd/auth.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfsd/auth.c
--- linux-2.6.32.17/fs/nfsd/auth.c	2009-12-03 20:02:52.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfsd/auth.c	2009-12-03 20:04:56.000000000 +0100
@@ -10,6 +10,7 @@
 #include <linux/sunrpc/svcauth.h>
 #include <linux/nfsd/nfsd.h>
 #include <linux/nfsd/export.h>
+#include <linux/vs_tag.h>
 #include "auth.h"
 
 int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
@@ -44,6 +45,9 @@ int nfsd_setuser(struct svc_rqst *rqstp,
 
 	new->fsuid = rqstp->rq_cred.cr_uid;
 	new->fsgid = rqstp->rq_cred.cr_gid;
+	/* FIXME: this desperately needs a tag :)
+	new->xid = (xid_t)INOTAG_TAG(DX_TAG_NFSD, cred.cr_uid, cred.cr_gid, 0);
+			*/
 
 	rqgi = rqstp->rq_cred.cr_group_info;
 
diff -NurpP --minimal linux-2.6.32.17/fs/nfsd/nfs3xdr.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfsd/nfs3xdr.c
--- linux-2.6.32.17/fs/nfsd/nfs3xdr.c	2009-12-03 20:02:52.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfsd/nfs3xdr.c	2009-12-03 20:04:56.000000000 +0100
@@ -21,6 +21,7 @@
 #include <linux/sunrpc/svc.h>
 #include <linux/nfsd/nfsd.h>
 #include <linux/nfsd/xdr3.h>
+#include <linux/vs_tag.h>
 #include "auth.h"
 
 #define NFSDDBG_FACILITY		NFSDDBG_XDR
@@ -108,6 +109,8 @@ static __be32 *
 decode_sattr3(__be32 *p, struct iattr *iap)
 {
 	u32	tmp;
+	uid_t	uid = 0;
+	gid_t	gid = 0;
 
 	iap->ia_valid = 0;
 
@@ -117,12 +120,15 @@ decode_sattr3(__be32 *p, struct iattr *i
 	}
 	if (*p++) {
 		iap->ia_valid |= ATTR_UID;
-		iap->ia_uid = ntohl(*p++);
+		uid = ntohl(*p++);
 	}
 	if (*p++) {
 		iap->ia_valid |= ATTR_GID;
-		iap->ia_gid = ntohl(*p++);
+		gid = ntohl(*p++);
 	}
+	iap->ia_uid = INOTAG_UID(DX_TAG_NFSD, uid, gid);
+	iap->ia_gid = INOTAG_GID(DX_TAG_NFSD, uid, gid);
+	iap->ia_tag = INOTAG_TAG(DX_TAG_NFSD, uid, gid, 0);
 	if (*p++) {
 		u64	newsize;
 
@@ -178,8 +184,12 @@ encode_fattr3(struct svc_rqst *rqstp, __
 	*p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]);
 	*p++ = htonl((u32) stat->mode);
 	*p++ = htonl((u32) stat->nlink);
-	*p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid));
-	*p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid));
+	*p++ = htonl((u32) nfsd_ruid(rqstp,
+		TAGINO_UID(0 /* FIXME: DX_TAG(dentry->d_inode) */,
+		stat->uid, stat->tag)));
+	*p++ = htonl((u32) nfsd_rgid(rqstp,
+		TAGINO_GID(0 /* FIXME: DX_TAG(dentry->d_inode) */,
+		stat->gid, stat->tag)));
 	if (S_ISLNK(stat->mode) && stat->size > NFS3_MAXPATHLEN) {
 		p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN);
 	} else {
diff -NurpP --minimal linux-2.6.32.17/fs/nfsd/nfs4xdr.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfsd/nfs4xdr.c
--- linux-2.6.32.17/fs/nfsd/nfs4xdr.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfsd/nfs4xdr.c	2010-05-21 13:20:19.000000000 +0200
@@ -57,6 +57,7 @@
 #include <linux/nfs4_acl.h>
 #include <linux/sunrpc/gss_api.h>
 #include <linux/sunrpc/svcauth_gss.h>
+#include <linux/vs_tag.h>
 
 #define NFSDDBG_FACILITY		NFSDDBG_XDR
 
@@ -2050,14 +2051,18 @@ out_acl:
 		WRITE32(stat.nlink);
 	}
 	if (bmval1 & FATTR4_WORD1_OWNER) {
-		status = nfsd4_encode_user(rqstp, stat.uid, &p, &buflen);
+		status = nfsd4_encode_user(rqstp,
+			TAGINO_UID(DX_TAG(dentry->d_inode),
+			stat.uid, stat.tag), &p, &buflen);
 		if (status == nfserr_resource)
 			goto out_resource;
 		if (status)
 			goto out;
 	}
 	if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
-		status = nfsd4_encode_group(rqstp, stat.gid, &p, &buflen);
+		status = nfsd4_encode_group(rqstp,
+			TAGINO_GID(DX_TAG(dentry->d_inode),
+			stat.gid, stat.tag), &p, &buflen);
 		if (status == nfserr_resource)
 			goto out_resource;
 		if (status)
diff -NurpP --minimal linux-2.6.32.17/fs/nfsd/nfsxdr.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfsd/nfsxdr.c
--- linux-2.6.32.17/fs/nfsd/nfsxdr.c	2008-12-25 00:26:37.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/nfsd/nfsxdr.c	2009-12-03 20:04:56.000000000 +0100
@@ -15,6 +15,7 @@
 #include <linux/nfsd/nfsd.h>
 #include <linux/nfsd/xdr.h>
 #include <linux/mm.h>
+#include <linux/vs_tag.h>
 #include "auth.h"
 
 #define NFSDDBG_FACILITY		NFSDDBG_XDR
@@ -98,6 +99,8 @@ static __be32 *
 decode_sattr(__be32 *p, struct iattr *iap)
 {
 	u32	tmp, tmp1;
+	uid_t	uid = 0;
+	gid_t	gid = 0;
 
 	iap->ia_valid = 0;
 
@@ -111,12 +114,15 @@ decode_sattr(__be32 *p, struct iattr *ia
 	}
 	if ((tmp = ntohl(*p++)) != (u32)-1) {
 		iap->ia_valid |= ATTR_UID;
-		iap->ia_uid = tmp;
+		uid = tmp;
 	}
 	if ((tmp = ntohl(*p++)) != (u32)-1) {
 		iap->ia_valid |= ATTR_GID;
-		iap->ia_gid = tmp;
+		gid = tmp;
 	}
+	iap->ia_uid = INOTAG_UID(DX_TAG_NFSD, uid, gid);
+	iap->ia_gid = INOTAG_GID(DX_TAG_NFSD, uid, gid);
+	iap->ia_tag = INOTAG_TAG(DX_TAG_NFSD, uid, gid, 0);
 	if ((tmp = ntohl(*p++)) != (u32)-1) {
 		iap->ia_valid |= ATTR_SIZE;
 		iap->ia_size = tmp;
@@ -161,8 +167,10 @@ encode_fattr(struct svc_rqst *rqstp, __b
 	*p++ = htonl(nfs_ftypes[type >> 12]);
 	*p++ = htonl((u32) stat->mode);
 	*p++ = htonl((u32) stat->nlink);
-	*p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid));
-	*p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid));
+	*p++ = htonl((u32) nfsd_ruid(rqstp,
+		TAGINO_UID(DX_TAG(dentry->d_inode), stat->uid, stat->tag)));
+	*p++ = htonl((u32) nfsd_rgid(rqstp,
+		TAGINO_GID(DX_TAG(dentry->d_inode), stat->gid, stat->tag)));
 
 	if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN) {
 		*p++ = htonl(NFS_MAXPATHLEN);
diff -NurpP --minimal linux-2.6.32.17/fs/ocfs2/dlm/dlmfs.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/dlm/dlmfs.c
--- linux-2.6.32.17/fs/ocfs2/dlm/dlmfs.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/dlm/dlmfs.c	2010-05-21 13:20:19.000000000 +0200
@@ -43,6 +43,7 @@
 #include <linux/init.h>
 #include <linux/string.h>
 #include <linux/backing-dev.h>
+#include <linux/vs_tag.h>
 
 #include <asm/uaccess.h>
 
@@ -342,6 +343,7 @@ static struct inode *dlmfs_get_root_inod
 		inode->i_mode = mode;
 		inode->i_uid = current_fsuid();
 		inode->i_gid = current_fsgid();
+		inode->i_tag = dx_current_fstag(sb);
 		inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 		inc_nlink(inode);
@@ -367,6 +369,7 @@ static struct inode *dlmfs_get_inode(str
 	inode->i_mode = mode;
 	inode->i_uid = current_fsuid();
 	inode->i_gid = current_fsgid();
+	inode->i_tag = dx_current_fstag(sb);
 	inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
 	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 
diff -NurpP --minimal linux-2.6.32.17/fs/ocfs2/dlmglue.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/dlmglue.c
--- linux-2.6.32.17/fs/ocfs2/dlmglue.c	2009-12-03 20:02:53.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/dlmglue.c	2009-12-03 20:04:56.000000000 +0100
@@ -1991,6 +1991,7 @@ static void __ocfs2_stuff_meta_lvb(struc
 	lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
 	lvb->lvb_iuid      = cpu_to_be32(inode->i_uid);
 	lvb->lvb_igid      = cpu_to_be32(inode->i_gid);
+	lvb->lvb_itag      = cpu_to_be16(inode->i_tag);
 	lvb->lvb_imode     = cpu_to_be16(inode->i_mode);
 	lvb->lvb_inlink    = cpu_to_be16(inode->i_nlink);
 	lvb->lvb_iatime_packed  =
@@ -2045,6 +2046,7 @@ static void ocfs2_refresh_inode_from_lvb
 
 	inode->i_uid     = be32_to_cpu(lvb->lvb_iuid);
 	inode->i_gid     = be32_to_cpu(lvb->lvb_igid);
+	inode->i_tag     = be16_to_cpu(lvb->lvb_itag);
 	inode->i_mode    = be16_to_cpu(lvb->lvb_imode);
 	inode->i_nlink   = be16_to_cpu(lvb->lvb_inlink);
 	ocfs2_unpack_timespec(&inode->i_atime,
diff -NurpP --minimal linux-2.6.32.17/fs/ocfs2/dlmglue.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/dlmglue.h
--- linux-2.6.32.17/fs/ocfs2/dlmglue.h	2009-12-03 20:02:53.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/dlmglue.h	2009-12-03 20:04:56.000000000 +0100
@@ -46,7 +46,8 @@ struct ocfs2_meta_lvb {
 	__be16       lvb_inlink;
 	__be32       lvb_iattr;
 	__be32       lvb_igeneration;
-	__be32       lvb_reserved2;
+	__be16       lvb_itag;
+	__be16       lvb_reserved2;
 };
 
 #define OCFS2_QINFO_LVB_VERSION 1
diff -NurpP --minimal linux-2.6.32.17/fs/ocfs2/file.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/file.c
--- linux-2.6.32.17/fs/ocfs2/file.c	2009-12-03 20:02:53.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/file.c	2009-12-03 20:04:56.000000000 +0100
@@ -960,13 +960,15 @@ int ocfs2_setattr(struct dentry *dentry,
 		mlog(0, "uid change: %d\n", attr->ia_uid);
 	if (attr->ia_valid & ATTR_GID)
 		mlog(0, "gid change: %d\n", attr->ia_gid);
+	if (attr->ia_valid & ATTR_TAG)
+		mlog(0, "tag change: %d\n", attr->ia_tag);
 	if (attr->ia_valid & ATTR_SIZE)
 		mlog(0, "size change...\n");
 	if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME))
 		mlog(0, "time change...\n");
 
 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
-			   | ATTR_GID | ATTR_UID | ATTR_MODE)
+			   | ATTR_GID | ATTR_UID | ATTR_TAG | ATTR_MODE)
 	if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) {
 		mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid);
 		return 0;
diff -NurpP --minimal linux-2.6.32.17/fs/ocfs2/inode.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/inode.c
--- linux-2.6.32.17/fs/ocfs2/inode.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/inode.c	2010-05-21 13:20:19.000000000 +0200
@@ -29,6 +29,7 @@
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
 #include <linux/quotaops.h>
+#include <linux/vs_tag.h>
 
 #include <asm/byteorder.h>
 
@@ -79,11 +80,13 @@ void ocfs2_set_inode_flags(struct inode 
 {
 	unsigned int flags = OCFS2_I(inode)->ip_attr;
 
-	inode->i_flags &= ~(S_IMMUTABLE |
+	inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
 		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
 
 	if (flags & OCFS2_IMMUTABLE_FL)
 		inode->i_flags |= S_IMMUTABLE;
+	if (flags & OCFS2_IXUNLINK_FL)
+		inode->i_flags |= S_IXUNLINK;
 
 	if (flags & OCFS2_SYNC_FL)
 		inode->i_flags |= S_SYNC;
@@ -93,25 +96,44 @@ void ocfs2_set_inode_flags(struct inode 
 		inode->i_flags |= S_NOATIME;
 	if (flags & OCFS2_DIRSYNC_FL)
 		inode->i_flags |= S_DIRSYNC;
+
+	inode->i_vflags &= ~(V_BARRIER | V_COW);
+
+	if (flags & OCFS2_BARRIER_FL)
+		inode->i_vflags |= V_BARRIER;
+	if (flags & OCFS2_COW_FL)
+		inode->i_vflags |= V_COW;
 }
 
 /* Propagate flags from i_flags to OCFS2_I(inode)->ip_attr */
 void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi)
 {
 	unsigned int flags = oi->vfs_inode.i_flags;
+	unsigned int vflags = oi->vfs_inode.i_vflags;
+
+	oi->ip_attr &= ~(OCFS2_SYNC_FL | OCFS2_APPEND_FL |
+			OCFS2_IMMUTABLE_FL | OCFS2_IXUNLINK_FL |
+			OCFS2_NOATIME_FL | OCFS2_DIRSYNC_FL |
+			OCFS2_BARRIER_FL | OCFS2_COW_FL);
+
+	if (flags & S_IMMUTABLE)
+		oi->ip_attr |= OCFS2_IMMUTABLE_FL;
+	if (flags & S_IXUNLINK)
+		oi->ip_attr |= OCFS2_IXUNLINK_FL;
 
-	oi->ip_attr &= ~(OCFS2_SYNC_FL|OCFS2_APPEND_FL|
-			OCFS2_IMMUTABLE_FL|OCFS2_NOATIME_FL|OCFS2_DIRSYNC_FL);
 	if (flags & S_SYNC)
 		oi->ip_attr |= OCFS2_SYNC_FL;
 	if (flags & S_APPEND)
 		oi->ip_attr |= OCFS2_APPEND_FL;
-	if (flags & S_IMMUTABLE)
-		oi->ip_attr |= OCFS2_IMMUTABLE_FL;
 	if (flags & S_NOATIME)
 		oi->ip_attr |= OCFS2_NOATIME_FL;
 	if (flags & S_DIRSYNC)
 		oi->ip_attr |= OCFS2_DIRSYNC_FL;
+
+	if (vflags & V_BARRIER)
+		oi->ip_attr |= OCFS2_BARRIER_FL;
+	if (vflags & V_COW)
+		oi->ip_attr |= OCFS2_COW_FL;
 }
 
 struct inode *ocfs2_ilookup(struct super_block *sb, u64 blkno)
@@ -246,6 +268,8 @@ void ocfs2_populate_inode(struct inode *
 	struct super_block *sb;
 	struct ocfs2_super *osb;
 	int use_plocks = 1;
+	uid_t uid;
+	gid_t gid;
 
 	mlog_entry("(0x%p, size:%llu)\n", inode,
 		   (unsigned long long)le64_to_cpu(fe->i_size));
@@ -277,8 +301,12 @@ void ocfs2_populate_inode(struct inode *
 	inode->i_generation = le32_to_cpu(fe->i_generation);
 	inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
 	inode->i_mode = le16_to_cpu(fe->i_mode);
-	inode->i_uid = le32_to_cpu(fe->i_uid);
-	inode->i_gid = le32_to_cpu(fe->i_gid);
+	uid = le32_to_cpu(fe->i_uid);
+	gid = le32_to_cpu(fe->i_gid);
+	inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
+	inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
+	inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid,
+		/* le16_to_cpu(raw_inode->i_raw_tag)i */ 0);
 
 	/* Fast symlinks will have i_size but no allocated clusters. */
 	if (S_ISLNK(inode->i_mode) && !fe->i_clusters)
diff -NurpP --minimal linux-2.6.32.17/fs/ocfs2/inode.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/inode.h
--- linux-2.6.32.17/fs/ocfs2/inode.h	2009-12-03 20:02:53.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/inode.h	2009-12-03 20:04:56.000000000 +0100
@@ -150,6 +150,7 @@ struct buffer_head *ocfs2_bread(struct i
 
 void ocfs2_set_inode_flags(struct inode *inode);
 void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi);
+int ocfs2_sync_flags(struct inode *inode, int, int);
 
 static inline blkcnt_t ocfs2_inode_sector_count(struct inode *inode)
 {
diff -NurpP --minimal linux-2.6.32.17/fs/ocfs2/ioctl.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/ioctl.c
--- linux-2.6.32.17/fs/ocfs2/ioctl.c	2009-12-03 20:02:53.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/ioctl.c	2009-12-03 20:04:56.000000000 +0100
@@ -42,7 +42,41 @@ static int ocfs2_get_inode_attr(struct i
 	return status;
 }
 
-static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
+int ocfs2_sync_flags(struct inode *inode, int flags, int vflags)
+{
+	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+	struct buffer_head *bh = NULL;
+	handle_t *handle = NULL;
+	int status;
+
+	status = ocfs2_inode_lock(inode, &bh, 1);
+	if (status < 0) {
+		mlog_errno(status);
+		return status;
+	}
+	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
+	if (IS_ERR(handle)) {
+		status = PTR_ERR(handle);
+		mlog_errno(status);
+		goto bail_unlock;
+	}
+
+	inode->i_flags = flags;
+	inode->i_vflags = vflags;
+	ocfs2_get_inode_flags(OCFS2_I(inode));
+
+	status = ocfs2_mark_inode_dirty(handle, inode, bh);
+	if (status < 0)
+		mlog_errno(status);
+
+	ocfs2_commit_trans(osb, handle);
+bail_unlock:
+	ocfs2_inode_unlock(inode, 1);
+	brelse(bh);
+	return status;
+}
+
+int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
 				unsigned mask)
 {
 	struct ocfs2_inode_info *ocfs2_inode = OCFS2_I(inode);
@@ -67,6 +101,11 @@ static int ocfs2_set_inode_attr(struct i
 	if (!S_ISDIR(inode->i_mode))
 		flags &= ~OCFS2_DIRSYNC_FL;
 
+	if (IS_BARRIER(inode)) {
+		vxwprintk_task(1, "messing with the barrier.");
+		goto bail_unlock;
+	}
+
 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
@@ -108,6 +147,7 @@ bail:
 	return status;
 }
 
+
 long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	struct inode *inode = filp->f_path.dentry->d_inode;
diff -NurpP --minimal linux-2.6.32.17/fs/ocfs2/namei.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/namei.c
--- linux-2.6.32.17/fs/ocfs2/namei.c	2009-12-03 20:02:53.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/namei.c	2009-12-03 20:04:56.000000000 +0100
@@ -41,6 +41,7 @@
 #include <linux/slab.h>
 #include <linux/highmem.h>
 #include <linux/quotaops.h>
+#include <linux/vs_tag.h>
 
 #define MLOG_MASK_PREFIX ML_NAMEI
 #include <cluster/masklog.h>
@@ -481,6 +482,7 @@ static int ocfs2_mknod_locked(struct ocf
 	u64 fe_blkno = 0;
 	u16 suballoc_bit;
 	u16 feat;
+	tag_t tag;
 
 	*new_fe_bh = NULL;
 
@@ -524,8 +526,11 @@ static int ocfs2_mknod_locked(struct ocf
 	fe->i_blkno = cpu_to_le64(fe_blkno);
 	fe->i_suballoc_bit = cpu_to_le16(suballoc_bit);
 	fe->i_suballoc_slot = cpu_to_le16(inode_ac->ac_alloc_slot);
-	fe->i_uid = cpu_to_le32(inode->i_uid);
-	fe->i_gid = cpu_to_le32(inode->i_gid);
+
+	tag = dx_current_fstag(osb->sb);
+	fe->i_uid = cpu_to_le32(TAGINO_UID(DX_TAG(inode), inode->i_uid, tag));
+	fe->i_gid = cpu_to_le32(TAGINO_GID(DX_TAG(inode), inode->i_gid, tag));
+	inode->i_tag = tag;
 	fe->i_mode = cpu_to_le16(inode->i_mode);
 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
 		fe->id1.dev1.i_rdev = cpu_to_le64(huge_encode_dev(dev));
diff -NurpP --minimal linux-2.6.32.17/fs/ocfs2/ocfs2_fs.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/ocfs2_fs.h
--- linux-2.6.32.17/fs/ocfs2/ocfs2_fs.h	2009-12-03 20:02:53.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/ocfs2_fs.h	2009-12-03 20:04:56.000000000 +0100
@@ -231,18 +231,23 @@
 #define OCFS2_HAS_REFCOUNT_FL   (0x0010)
 
 /* Inode attributes, keep in sync with EXT2 */
-#define OCFS2_SECRM_FL		(0x00000001)	/* Secure deletion */
-#define OCFS2_UNRM_FL		(0x00000002)	/* Undelete */
-#define OCFS2_COMPR_FL		(0x00000004)	/* Compress file */
-#define OCFS2_SYNC_FL		(0x00000008)	/* Synchronous updates */
-#define OCFS2_IMMUTABLE_FL	(0x00000010)	/* Immutable file */
-#define OCFS2_APPEND_FL		(0x00000020)	/* writes to file may only append */
-#define OCFS2_NODUMP_FL		(0x00000040)	/* do not dump file */
-#define OCFS2_NOATIME_FL	(0x00000080)	/* do not update atime */
-#define OCFS2_DIRSYNC_FL	(0x00010000)	/* dirsync behaviour (directories only) */
+#define OCFS2_SECRM_FL		FS_SECRM_FL	/* Secure deletion */
+#define OCFS2_UNRM_FL		FS_UNRM_FL	/* Undelete */
+#define OCFS2_COMPR_FL		FS_COMPR_FL	/* Compress file */
+#define OCFS2_SYNC_FL		FS_SYNC_FL	/* Synchronous updates */
+#define OCFS2_IMMUTABLE_FL	FS_IMMUTABLE_FL	/* Immutable file */
+#define OCFS2_APPEND_FL		FS_APPEND_FL	/* writes to file may only append */
+#define OCFS2_NODUMP_FL		FS_NODUMP_FL	/* do not dump file */
+#define OCFS2_NOATIME_FL	FS_NOATIME_FL	/* do not update atime */
 
-#define OCFS2_FL_VISIBLE	(0x000100FF)	/* User visible flags */
-#define OCFS2_FL_MODIFIABLE	(0x000100FF)	/* User modifiable flags */
+#define OCFS2_DIRSYNC_FL	FS_DIRSYNC_FL	/* dirsync behaviour (directories only) */
+#define OCFS2_IXUNLINK_FL	FS_IXUNLINK_FL	/* Immutable invert on unlink */
+
+#define OCFS2_BARRIER_FL	FS_BARRIER_FL	/* Barrier for chroot() */
+#define OCFS2_COW_FL		FS_COW_FL	/* Copy on Write marker */
+
+#define OCFS2_FL_VISIBLE	(0x010300FF)	/* User visible flags */
+#define OCFS2_FL_MODIFIABLE	(0x010300FF)	/* User modifiable flags */
 
 /*
  * Extent record flags (e_node.leaf.flags)
diff -NurpP --minimal linux-2.6.32.17/fs/ocfs2/ocfs2.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/ocfs2.h
--- linux-2.6.32.17/fs/ocfs2/ocfs2.h	2009-12-03 20:02:53.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/ocfs2.h	2009-12-03 20:04:56.000000000 +0100
@@ -248,6 +248,7 @@ enum ocfs2_mount_options
 	OCFS2_MOUNT_POSIX_ACL = 1 << 8,	/* POSIX access control lists */
 	OCFS2_MOUNT_USRQUOTA = 1 << 9, /* We support user quotas */
 	OCFS2_MOUNT_GRPQUOTA = 1 << 10, /* We support group quotas */
+	OCFS2_MOUNT_TAGGED = 1 << 11, /* use tagging */
 };
 
 #define OCFS2_OSB_SOFT_RO			0x0001
diff -NurpP --minimal linux-2.6.32.17/fs/ocfs2/super.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/super.c
--- linux-2.6.32.17/fs/ocfs2/super.c	2009-12-03 20:02:53.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/ocfs2/super.c	2009-12-03 20:04:56.000000000 +0100
@@ -173,6 +173,7 @@ enum {
 	Opt_noacl,
 	Opt_usrquota,
 	Opt_grpquota,
+	Opt_tag, Opt_notag, Opt_tagid,
 	Opt_err,
 };
 
@@ -199,6 +200,9 @@ static const match_table_t tokens = {
 	{Opt_noacl, "noacl"},
 	{Opt_usrquota, "usrquota"},
 	{Opt_grpquota, "grpquota"},
+	{Opt_tag, "tag"},
+	{Opt_notag, "notag"},
+	{Opt_tagid, "tagid=%u"},
 	{Opt_err, NULL}
 };
 
@@ -605,6 +609,13 @@ static int ocfs2_remount(struct super_bl
 		goto out;
 	}
 
+	if ((osb->s_mount_opt & OCFS2_MOUNT_TAGGED) !=
+	    (parsed_options.mount_opt & OCFS2_MOUNT_TAGGED)) {
+		ret = -EINVAL;
+		mlog(ML_ERROR, "Cannot change tagging on remount\n");
+		goto out;
+	}
+
 	if ((osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) !=
 	    (parsed_options.mount_opt & OCFS2_MOUNT_HB_LOCAL)) {
 		ret = -EINVAL;
@@ -1148,6 +1159,9 @@ static int ocfs2_fill_super(struct super
 
 	ocfs2_complete_mount_recovery(osb);
 
+	if (osb->s_mount_opt & OCFS2_MOUNT_TAGGED)
+		sb->s_flags |= MS_TAGGED;
+
 	if (ocfs2_mount_local(osb))
 		snprintf(nodestr, sizeof(nodestr), "local");
 	else
@@ -1426,6 +1440,20 @@ static int ocfs2_parse_options(struct su
 			printk(KERN_INFO "ocfs2 (no)acl options not supported\n");
 			break;
 #endif
+#ifndef CONFIG_TAGGING_NONE
+		case Opt_tag:
+			mopt->mount_opt |= OCFS2_MOUNT_TAGGED;
+			break;
+		case Opt_notag:
+			mopt->mount_opt &= ~OCFS2_MOUNT_TAGGED;
+			break;
+#endif
+#ifdef CONFIG_PROPAGATE
+		case Opt_tagid:
+			/* use args[0] */
+			mopt->mount_opt |= OCFS2_MOUNT_TAGGED;
+			break;
+#endif
 		default:
 			mlog(ML_ERROR,
 			     "Unrecognized mount option \"%s\" "
diff -NurpP --minimal linux-2.6.32.17/fs/open.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/open.c
--- linux-2.6.32.17/fs/open.c	2009-12-03 20:02:53.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/open.c	2009-12-03 20:04:56.000000000 +0100
@@ -30,22 +30,30 @@
 #include <linux/audit.h>
 #include <linux/falloc.h>
 #include <linux/fs_struct.h>
+#include <linux/vs_base.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_tag.h>
+#include <linux/vs_cowbl.h>
 
 int vfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
 	int retval = -ENODEV;
 
 	if (dentry) {
+		struct super_block *sb = dentry->d_sb;
+
 		retval = -ENOSYS;
-		if (dentry->d_sb->s_op->statfs) {
+		if (sb->s_op->statfs) {
 			memset(buf, 0, sizeof(*buf));
 			retval = security_sb_statfs(dentry);
 			if (retval)
 				return retval;
-			retval = dentry->d_sb->s_op->statfs(dentry, buf);
+			retval = sb->s_op->statfs(dentry, buf);
 			if (retval == 0 && buf->f_frsize == 0)
 				buf->f_frsize = buf->f_bsize;
 		}
+		if (!vx_check(0, VS_ADMIN|VS_WATCH))
+			vx_vsi_statfs(sb, buf);
 	}
 	return retval;
 }
@@ -640,6 +648,10 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
 	error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
 	if (error)
 		goto out;
+
+	error = cow_check_and_break(&path);
+	if (error)
+		goto dput_and_out;
 	inode = path.dentry->d_inode;
 
 	error = mnt_want_write(path.mnt);
@@ -673,11 +685,11 @@ static int chown_common(struct dentry * 
 	newattrs.ia_valid =  ATTR_CTIME;
 	if (user != (uid_t) -1) {
 		newattrs.ia_valid |= ATTR_UID;
-		newattrs.ia_uid = user;
+		newattrs.ia_uid = dx_map_uid(user);
 	}
 	if (group != (gid_t) -1) {
 		newattrs.ia_valid |= ATTR_GID;
-		newattrs.ia_gid = group;
+		newattrs.ia_gid = dx_map_gid(group);
 	}
 	if (!S_ISDIR(inode->i_mode))
 		newattrs.ia_valid |=
@@ -700,7 +712,11 @@ SYSCALL_DEFINE3(chown, const char __user
 	error = mnt_want_write(path.mnt);
 	if (error)
 		goto out_release;
-	error = chown_common(path.dentry, user, group);
+#ifdef CONFIG_VSERVER_COWBL
+	error = cow_check_and_break(&path);
+	if (!error)
+#endif
+		error = chown_common(path.dentry, user, group);
 	mnt_drop_write(path.mnt);
 out_release:
 	path_put(&path);
@@ -725,7 +741,11 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
 	error = mnt_want_write(path.mnt);
 	if (error)
 		goto out_release;
-	error = chown_common(path.dentry, user, group);
+#ifdef CONFIG_VSERVER_COWBL
+	error = cow_check_and_break(&path);
+	if (!error)
+#endif
+		error = chown_common(path.dentry, user, group);
 	mnt_drop_write(path.mnt);
 out_release:
 	path_put(&path);
@@ -744,7 +764,11 @@ SYSCALL_DEFINE3(lchown, const char __use
 	error = mnt_want_write(path.mnt);
 	if (error)
 		goto out_release;
-	error = chown_common(path.dentry, user, group);
+#ifdef CONFIG_VSERVER_COWBL
+	error = cow_check_and_break(&path);
+	if (!error)
+#endif
+		error = chown_common(path.dentry, user, group);
 	mnt_drop_write(path.mnt);
 out_release:
 	path_put(&path);
@@ -990,6 +1014,7 @@ static void __put_unused_fd(struct files
 	__FD_CLR(fd, fdt->open_fds);
 	if (fd < files->next_fd)
 		files->next_fd = fd;
+	vx_openfd_dec(fd);
 }
 
 void put_unused_fd(unsigned int fd)
diff -NurpP --minimal linux-2.6.32.17/fs/proc/array.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/proc/array.c
--- linux-2.6.32.17/fs/proc/array.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/proc/array.c	2010-05-27 19:23:00.000000000 +0200
@@ -82,6 +82,8 @@
 #include <linux/pid_namespace.h>
 #include <linux/ptrace.h>
 #include <linux/tracehook.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
 
 #include <asm/pgtable.h>
 #include <asm/processor.h>
@@ -138,8 +140,9 @@ static const char *task_state_array[] = 
 	"D (disk sleep)",	/*  2 */
 	"T (stopped)",		/*  4 */
 	"T (tracing stop)",	/*  8 */
-	"Z (zombie)",		/* 16 */
-	"X (dead)"		/* 32 */
+	"H (on hold)",		/* 16 */
+	"Z (zombie)",		/* 32 */
+	"X (dead)",		/* 64 */
 };
 
 static inline const char *get_task_state(struct task_struct *tsk)
@@ -166,6 +169,9 @@ static inline void task_state(struct seq
 	rcu_read_lock();
 	ppid = pid_alive(p) ?
 		task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0;
+	if (unlikely(vx_current_initpid(p->pid)))
+		ppid = 0;
+
 	tpid = 0;
 	if (pid_alive(p)) {
 		struct task_struct *tracer = tracehook_tracer_task(p);
@@ -281,7 +287,7 @@ static inline void task_sig(struct seq_f
 }
 
 static void render_cap_t(struct seq_file *m, const char *header,
-			kernel_cap_t *a)
+			struct vx_info *vxi, kernel_cap_t *a)
 {
 	unsigned __capi;
 
@@ -306,10 +312,11 @@ static inline void task_cap(struct seq_f
 	cap_bset	= cred->cap_bset;
 	rcu_read_unlock();
 
-	render_cap_t(m, "CapInh:\t", &cap_inheritable);
-	render_cap_t(m, "CapPrm:\t", &cap_permitted);
-	render_cap_t(m, "CapEff:\t", &cap_effective);
-	render_cap_t(m, "CapBnd:\t", &cap_bset);
+	/* FIXME: maybe move the p->vx_info masking to __task_cred() ? */
+	render_cap_t(m, "CapInh:\t", p->vx_info, &cap_inheritable);
+	render_cap_t(m, "CapPrm:\t", p->vx_info, &cap_permitted);
+	render_cap_t(m, "CapEff:\t", p->vx_info, &cap_effective);
+	render_cap_t(m, "CapBnd:\t", p->vx_info, &cap_bset);
 }
 
 static inline void task_context_switch_counts(struct seq_file *m,
@@ -321,6 +328,43 @@ static inline void task_context_switch_c
 			p->nivcsw);
 }
 
+
+int proc_pid_nsproxy(struct seq_file *m, struct pid_namespace *ns,
+			struct pid *pid, struct task_struct *task)
+{
+	seq_printf(m,	"Proxy:\t%p(%c)\n"
+			"Count:\t%u\n"
+			"uts:\t%p(%c)\n"
+			"ipc:\t%p(%c)\n"
+			"mnt:\t%p(%c)\n"
+			"pid:\t%p(%c)\n"
+			"net:\t%p(%c)\n",
+			task->nsproxy,
+			(task->nsproxy == init_task.nsproxy ? 'I' : '-'),
+			atomic_read(&task->nsproxy->count),
+			task->nsproxy->uts_ns,
+			(task->nsproxy->uts_ns == init_task.nsproxy->uts_ns ? 'I' : '-'),
+			task->nsproxy->ipc_ns,
+			(task->nsproxy->ipc_ns == init_task.nsproxy->ipc_ns ? 'I' : '-'),
+			task->nsproxy->mnt_ns,
+			(task->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns ? 'I' : '-'),
+			task->nsproxy->pid_ns,
+			(task->nsproxy->pid_ns == init_task.nsproxy->pid_ns ? 'I' : '-'),
+			task->nsproxy->net_ns,
+			(task->nsproxy->net_ns == init_task.nsproxy->net_ns ? 'I' : '-'));
+	return 0;
+}
+
+void task_vs_id(struct seq_file *m, struct task_struct *task)
+{
+	if (task_vx_flags(task, VXF_HIDE_VINFO, 0))
+		return;
+
+	seq_printf(m, "VxID: %d\n", vx_task_xid(task));
+	seq_printf(m, "NxID: %d\n", nx_task_nid(task));
+}
+
+
 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
 			struct pid *pid, struct task_struct *task)
 {
@@ -336,6 +380,7 @@ int proc_pid_status(struct seq_file *m, 
 	task_sig(m, task);
 	task_cap(m, task);
 	cpuset_task_status_allowed(m, task);
+	task_vs_id(m, task);
 #if defined(CONFIG_S390)
 	task_show_regs(m, task);
 #endif
@@ -452,6 +497,17 @@ static int do_task_stat(struct seq_file 
 	/* convert nsec -> ticks */
 	start_time = nsec_to_clock_t(start_time);
 
+	/* fixup start time for virt uptime */
+	if (vx_flags(VXF_VIRT_UPTIME, 0)) {
+		unsigned long long bias =
+			current->vx_info->cvirt.bias_clock;
+
+		if (start_time > bias)
+			start_time -= bias;
+		else
+			start_time = 0;
+	}
+
 	seq_printf(m, "%d (%s) %c %d %d %d %d %d %u %lu \
 %lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \
 %lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu %lu %ld\n",
diff -NurpP --minimal linux-2.6.32.17/fs/proc/base.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/proc/base.c
--- linux-2.6.32.17/fs/proc/base.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/proc/base.c	2010-05-21 13:20:19.000000000 +0200
@@ -81,6 +81,8 @@
 #include <linux/elf.h>
 #include <linux/pid_namespace.h>
 #include <linux/fs_struct.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
 #include "internal.h"
 
 /* NOTE:
@@ -1048,12 +1050,17 @@ static ssize_t oom_adjust_write(struct f
 		return -ESRCH;
 	}
 
-	if (oom_adjust < task->signal->oom_adj && !capable(CAP_SYS_RESOURCE)) {
+	if (oom_adjust < task->signal->oom_adj &&
+		!vx_capable(CAP_SYS_RESOURCE, VXC_OOM_ADJUST)) {
 		unlock_task_sighand(task, &flags);
 		put_task_struct(task);
 		return -EACCES;
 	}
 
+	/* prevent guest processes from circumventing the oom killer */
+	if (vx_current_xid() && (oom_adjust == OOM_DISABLE))
+		oom_adjust = OOM_ADJUST_MIN;
+
 	task->signal->oom_adj = oom_adjust;
 
 	unlock_task_sighand(task, &flags);
@@ -1093,7 +1100,7 @@ static ssize_t proc_loginuid_write(struc
 	ssize_t length;
 	uid_t loginuid;
 
-	if (!capable(CAP_AUDIT_CONTROL))
+	if (!vx_capable(CAP_AUDIT_CONTROL, VXC_AUDIT_CONTROL))
 		return -EPERM;
 
 	if (current != pid_task(proc_pid(inode), PIDTYPE_PID))
@@ -1459,6 +1466,8 @@ static struct inode *proc_pid_make_inode
 		inode->i_gid = cred->egid;
 		rcu_read_unlock();
 	}
+	/* procfs is xid tagged */
+	inode->i_tag = (tag_t)vx_task_xid(task);
 	security_task_to_inode(task, inode);
 
 out:
@@ -2009,6 +2018,13 @@ static struct dentry *proc_pident_lookup
 	if (!task)
 		goto out_no_task;
 
+	/* TODO: maybe we can come up with a generic approach? */
+	if (task_vx_flags(task, VXF_HIDE_VINFO, 0) &&
+		(dentry->d_name.len == 5) &&
+		(!memcmp(dentry->d_name.name, "vinfo", 5) ||
+		!memcmp(dentry->d_name.name, "ninfo", 5)))
+		goto out;
+
 	/*
 	 * Yes, it does not scale. And it should not. Don't add
 	 * new entries into /proc/<tgid>/ without very good reasons.
@@ -2414,7 +2430,7 @@ out_iput:
 static struct dentry *proc_base_lookup(struct inode *dir, struct dentry *dentry)
 {
 	struct dentry *error;
-	struct task_struct *task = get_proc_task(dir);
+	struct task_struct *task = get_proc_task_real(dir);
 	const struct pid_entry *p, *last;
 
 	error = ERR_PTR(-ENOENT);
@@ -2504,6 +2520,9 @@ static int proc_pid_personality(struct s
 static const struct file_operations proc_task_operations;
 static const struct inode_operations proc_task_inode_operations;
 
+extern int proc_pid_vx_info(struct task_struct *, char *);
+extern int proc_pid_nx_info(struct task_struct *, char *);
+
 static const struct pid_entry tgid_base_stuff[] = {
 	DIR("task",       S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
 	DIR("fd",         S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
@@ -2562,6 +2581,8 @@ static const struct pid_entry tgid_base_
 #ifdef CONFIG_CGROUPS
 	REG("cgroup",  S_IRUGO, proc_cgroup_operations),
 #endif
+	INF("vinfo",      S_IRUGO, proc_pid_vx_info),
+	INF("ninfo",	  S_IRUGO, proc_pid_nx_info),
 	INF("oom_score",  S_IRUGO, proc_oom_score),
 	REG("oom_adj",    S_IRUGO|S_IWUSR, proc_oom_adjust_operations),
 #ifdef CONFIG_AUDITSYSCALL
@@ -2577,6 +2598,7 @@ static const struct pid_entry tgid_base_
 #ifdef CONFIG_TASK_IO_ACCOUNTING
 	INF("io",	S_IRUGO, proc_tgid_io_accounting),
 #endif
+	ONE("nsproxy",	S_IRUGO, proc_pid_nsproxy),
 };
 
 static int proc_tgid_base_readdir(struct file * filp,
@@ -2768,7 +2790,7 @@ retry:
 	iter.task = NULL;
 	pid = find_ge_pid(iter.tgid, ns);
 	if (pid) {
-		iter.tgid = pid_nr_ns(pid, ns);
+		iter.tgid = pid_unmapped_nr_ns(pid, ns);
 		iter.task = pid_task(pid, PIDTYPE_PID);
 		/* What we to know is if the pid we have find is the
 		 * pid of a thread_group_leader.  Testing for task
@@ -2798,7 +2820,7 @@ static int proc_pid_fill_cache(struct fi
 	struct tgid_iter iter)
 {
 	char name[PROC_NUMBUF];
-	int len = snprintf(name, sizeof(name), "%d", iter.tgid);
+	int len = snprintf(name, sizeof(name), "%d", vx_map_tgid(iter.tgid));
 	return proc_fill_cache(filp, dirent, filldir, name, len,
 				proc_pid_instantiate, iter.task, NULL);
 }
@@ -2807,7 +2829,7 @@ static int proc_pid_fill_cache(struct fi
 int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
 {
 	unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
-	struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode);
+	struct task_struct *reaper = get_proc_task_real(filp->f_path.dentry->d_inode);
 	struct tgid_iter iter;
 	struct pid_namespace *ns;
 
@@ -2827,6 +2849,8 @@ int proc_pid_readdir(struct file * filp,
 	     iter.task;
 	     iter.tgid += 1, iter = next_tgid(ns, iter)) {
 		filp->f_pos = iter.tgid + TGID_OFFSET;
+		if (!vx_proc_task_visible(iter.task))
+			continue;
 		if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
 			put_task_struct(iter.task);
 			goto out;
@@ -2973,6 +2997,8 @@ static struct dentry *proc_task_lookup(s
 	tid = name_to_int(dentry);
 	if (tid == ~0U)
 		goto out;
+	if (vx_current_initpid(tid))
+		goto out;
 
 	ns = dentry->d_sb->s_fs_info;
 	rcu_read_lock();
diff -NurpP --minimal linux-2.6.32.17/fs/proc/generic.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/proc/generic.c
--- linux-2.6.32.17/fs/proc/generic.c	2009-06-11 17:13:07.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/proc/generic.c	2009-12-03 20:04:56.000000000 +0100
@@ -20,6 +20,7 @@
 #include <linux/bitops.h>
 #include <linux/spinlock.h>
 #include <linux/completion.h>
+#include <linux/vserver/inode.h>
 #include <asm/uaccess.h>
 
 #include "internal.h"
@@ -425,6 +426,8 @@ struct dentry *proc_lookup_de(struct pro
 	for (de = de->subdir; de ; de = de->next) {
 		if (de->namelen != dentry->d_name.len)
 			continue;
+			if (!vx_hide_check(0, de->vx_flags))
+				continue;
 		if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
 			unsigned int ino;
 
@@ -433,6 +436,8 @@ struct dentry *proc_lookup_de(struct pro
 			spin_unlock(&proc_subdir_lock);
 			error = -EINVAL;
 			inode = proc_get_inode(dir->i_sb, ino, de);
+				/* generic proc entries belong to the host */
+				inode->i_tag = 0;
 			goto out_unlock;
 		}
 	}
@@ -510,6 +515,8 @@ int proc_readdir_de(struct proc_dir_entr
 
 				/* filldir passes info to user space */
 				de_get(de);
+				if (!vx_hide_check(0, de->vx_flags))
+					goto skip;
 				spin_unlock(&proc_subdir_lock);
 				if (filldir(dirent, de->name, de->namelen, filp->f_pos,
 					    de->low_ino, de->mode >> 12) < 0) {
@@ -517,6 +524,7 @@ int proc_readdir_de(struct proc_dir_entr
 					goto out;
 				}
 				spin_lock(&proc_subdir_lock);
+			skip:
 				filp->f_pos++;
 				next = de->next;
 				de_put(de);
@@ -631,6 +639,7 @@ static struct proc_dir_entry *__proc_cre
 	ent->nlink = nlink;
 	atomic_set(&ent->count, 1);
 	ent->pde_users = 0;
+	ent->vx_flags = IATTR_PROC_DEFAULT;
 	spin_lock_init(&ent->pde_unload_lock);
 	ent->pde_unload_completion = NULL;
 	INIT_LIST_HEAD(&ent->pde_openers);
@@ -654,7 +663,8 @@ struct proc_dir_entry *proc_symlink(cons
 				kfree(ent->data);
 				kfree(ent);
 				ent = NULL;
-			}
+			} else
+				ent->vx_flags = IATTR_PROC_SYMLINK;
 		} else {
 			kfree(ent);
 			ent = NULL;
diff -NurpP --minimal linux-2.6.32.17/fs/proc/inode.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/proc/inode.c
--- linux-2.6.32.17/fs/proc/inode.c	2009-06-11 17:13:07.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/proc/inode.c	2009-12-03 20:04:56.000000000 +0100
@@ -459,6 +459,8 @@ struct inode *proc_get_inode(struct supe
 			inode->i_uid = de->uid;
 			inode->i_gid = de->gid;
 		}
+		if (de->vx_flags)
+			PROC_I(inode)->vx_flags = de->vx_flags;
 		if (de->size)
 			inode->i_size = de->size;
 		if (de->nlink)
diff -NurpP --minimal linux-2.6.32.17/fs/proc/internal.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/proc/internal.h
--- linux-2.6.32.17/fs/proc/internal.h	2009-09-10 15:26:23.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/proc/internal.h	2009-12-03 20:04:56.000000000 +0100
@@ -10,6 +10,7 @@
  */
 
 #include <linux/proc_fs.h>
+#include <linux/vs_pid.h>
 
 extern struct proc_dir_entry proc_root;
 #ifdef CONFIG_PROC_SYSCTL
@@ -51,6 +52,9 @@ extern int proc_pid_status(struct seq_fi
 				struct pid *pid, struct task_struct *task);
 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
 				struct pid *pid, struct task_struct *task);
+extern int proc_pid_nsproxy(struct seq_file *m, struct pid_namespace *ns,
+				struct pid *pid, struct task_struct *task);
+
 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
 
 extern const struct file_operations proc_maps_operations;
@@ -70,11 +74,16 @@ static inline struct pid *proc_pid(struc
 	return PROC_I(inode)->pid;
 }
 
-static inline struct task_struct *get_proc_task(struct inode *inode)
+static inline struct task_struct *get_proc_task_real(struct inode *inode)
 {
 	return get_pid_task(proc_pid(inode), PIDTYPE_PID);
 }
 
+static inline struct task_struct *get_proc_task(struct inode *inode)
+{
+	return vx_get_proc_task(inode, proc_pid(inode));
+}
+
 static inline int proc_fd(struct inode *inode)
 {
 	return PROC_I(inode)->fd;
diff -NurpP --minimal linux-2.6.32.17/fs/proc/loadavg.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/proc/loadavg.c
--- linux-2.6.32.17/fs/proc/loadavg.c	2009-09-10 15:26:23.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/proc/loadavg.c	2009-12-03 20:04:56.000000000 +0100
@@ -12,15 +12,27 @@
 
 static int loadavg_proc_show(struct seq_file *m, void *v)
 {
+	unsigned long running;
+	unsigned int threads;
 	unsigned long avnrun[3];
 
 	get_avenrun(avnrun, FIXED_1/200, 0);
 
+	if (vx_flags(VXF_VIRT_LOAD, 0)) {
+		struct vx_info *vxi = current_vx_info();
+
+		running = atomic_read(&vxi->cvirt.nr_running);
+		threads = atomic_read(&vxi->cvirt.nr_threads);
+	} else {
+		running = nr_running();
+		threads = nr_threads;
+	}
+
 	seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%d %d\n",
 		LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]),
 		LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
 		LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
-		nr_running(), nr_threads,
+		running, threads,
 		task_active_pid_ns(current)->last_pid);
 	return 0;
 }
diff -NurpP --minimal linux-2.6.32.17/fs/proc/meminfo.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/proc/meminfo.c
--- linux-2.6.32.17/fs/proc/meminfo.c	2009-12-03 20:02:53.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/proc/meminfo.c	2010-02-05 00:31:48.000000000 +0100
@@ -39,7 +39,8 @@ static int meminfo_proc_show(struct seq_
 	allowed = ((totalram_pages - hugetlb_total_pages())
 		* sysctl_overcommit_ratio / 100) + total_swap_pages;
 
-	cached = global_page_state(NR_FILE_PAGES) -
+	cached = vx_flags(VXF_VIRT_MEM, 0) ?
+		vx_vsi_cached(&i) : global_page_state(NR_FILE_PAGES) -
 			total_swapcache_pages - i.bufferram;
 	if (cached < 0)
 		cached = 0;
diff -NurpP --minimal linux-2.6.32.17/fs/proc/root.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/proc/root.c
--- linux-2.6.32.17/fs/proc/root.c	2009-06-11 17:13:07.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/proc/root.c	2009-12-03 20:04:56.000000000 +0100
@@ -18,9 +18,14 @@
 #include <linux/bitops.h>
 #include <linux/mount.h>
 #include <linux/pid_namespace.h>
+#include <linux/vserver/inode.h>
 
 #include "internal.h"
 
+struct proc_dir_entry *proc_virtual;
+
+extern void proc_vx_init(void);
+
 static int proc_test_super(struct super_block *sb, void *data)
 {
 	return sb->s_fs_info == data;
@@ -136,6 +141,7 @@ void __init proc_root_init(void)
 #endif
 	proc_mkdir("bus", NULL);
 	proc_sys_init();
+	proc_vx_init();
 }
 
 static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat
@@ -203,6 +209,7 @@ struct proc_dir_entry proc_root = {
 	.proc_iops	= &proc_root_inode_operations, 
 	.proc_fops	= &proc_root_operations,
 	.parent		= &proc_root,
+	.vx_flags	= IATTR_ADMIN | IATTR_WATCH,
 };
 
 int pid_ns_prepare_proc(struct pid_namespace *ns)
diff -NurpP --minimal linux-2.6.32.17/fs/proc/uptime.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/proc/uptime.c
--- linux-2.6.32.17/fs/proc/uptime.c	2009-12-03 20:02:53.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/proc/uptime.c	2009-12-03 20:04:56.000000000 +0100
@@ -4,22 +4,22 @@
 #include <linux/sched.h>
 #include <linux/seq_file.h>
 #include <linux/time.h>
-#include <linux/kernel_stat.h>
+#include <linux/vserver/cvirt.h>
 #include <asm/cputime.h>
 
 static int uptime_proc_show(struct seq_file *m, void *v)
 {
 	struct timespec uptime;
 	struct timespec idle;
-	int i;
-	cputime_t idletime = cputime_zero;
-
-	for_each_possible_cpu(i)
-		idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle);
+	cputime_t idletime = cputime_add(init_task.utime, init_task.stime);
 
 	do_posix_clock_monotonic_gettime(&uptime);
 	monotonic_to_bootbased(&uptime);
 	cputime_to_timespec(idletime, &idle);
+
+	if (vx_flags(VXF_VIRT_UPTIME, 0))
+		vx_vsi_uptime(&uptime, &idle);
+
 	seq_printf(m, "%lu.%02lu %lu.%02lu\n",
 			(unsigned long) uptime.tv_sec,
 			(uptime.tv_nsec / (NSEC_PER_SEC / 100)),
diff -NurpP --minimal linux-2.6.32.17/fs/quota/quota.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/quota/quota.c
--- linux-2.6.32.17/fs/quota/quota.c	2009-09-10 15:26:24.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/quota/quota.c	2009-12-03 20:04:56.000000000 +0100
@@ -18,6 +18,7 @@
 #include <linux/capability.h>
 #include <linux/quotaops.h>
 #include <linux/types.h>
+#include <linux/vs_context.h>
 
 /* Check validity of generic quotactl commands */
 static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
@@ -83,11 +84,11 @@ static int generic_quotactl_valid(struct
 	if (cmd == Q_GETQUOTA) {
 		if (((type == USRQUOTA && current_euid() != id) ||
 		     (type == GRPQUOTA && !in_egroup_p(id))) &&
-		    !capable(CAP_SYS_ADMIN))
+		    !vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
 			return -EPERM;
 	}
 	else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO)
-		if (!capable(CAP_SYS_ADMIN))
+		if (!vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
 			return -EPERM;
 
 	return 0;
@@ -135,10 +136,10 @@ static int xqm_quotactl_valid(struct sup
 	if (cmd == Q_XGETQUOTA) {
 		if (((type == XQM_USRQUOTA && current_euid() != id) ||
 		     (type == XQM_GRPQUOTA && !in_egroup_p(id))) &&
-		     !capable(CAP_SYS_ADMIN))
+		     !vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
 			return -EPERM;
 	} else if (cmd != Q_XGETQSTAT && cmd != Q_XQUOTASYNC) {
-		if (!capable(CAP_SYS_ADMIN))
+		if (!vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
 			return -EPERM;
 	}
 
@@ -351,6 +352,46 @@ static int do_quotactl(struct super_bloc
 	return 0;
 }
 
+#if defined(CONFIG_BLK_DEV_VROOT) || defined(CONFIG_BLK_DEV_VROOT_MODULE)
+
+#include <linux/vroot.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/vserver/debug.h>
+
+static vroot_grb_func *vroot_get_real_bdev = NULL;
+
+static spinlock_t vroot_grb_lock = SPIN_LOCK_UNLOCKED;
+
+int register_vroot_grb(vroot_grb_func *func) {
+	int ret = -EBUSY;
+
+	spin_lock(&vroot_grb_lock);
+	if (!vroot_get_real_bdev) {
+		vroot_get_real_bdev = func;
+		ret = 0;
+	}
+	spin_unlock(&vroot_grb_lock);
+	return ret;
+}
+EXPORT_SYMBOL(register_vroot_grb);
+
+int unregister_vroot_grb(vroot_grb_func *func) {
+	int ret = -EINVAL;
+
+	spin_lock(&vroot_grb_lock);
+	if (vroot_get_real_bdev) {
+		vroot_get_real_bdev = NULL;
+		ret = 0;
+	}
+	spin_unlock(&vroot_grb_lock);
+	return ret;
+}
+EXPORT_SYMBOL(unregister_vroot_grb);
+
+#endif
+
 /*
  * look up a superblock on which quota ops will be performed
  * - use the name of a block device to find the superblock thereon
@@ -368,6 +409,22 @@ static struct super_block *quotactl_bloc
 	putname(tmp);
 	if (IS_ERR(bdev))
 		return ERR_CAST(bdev);
+#if defined(CONFIG_BLK_DEV_VROOT) || defined(CONFIG_BLK_DEV_VROOT_MODULE)
+	if (bdev && bdev->bd_inode &&
+			imajor(bdev->bd_inode) == VROOT_MAJOR) {
+		struct block_device *bdnew = (void *)-EINVAL;
+
+		if (vroot_get_real_bdev)
+			bdnew = vroot_get_real_bdev(bdev);
+		else
+			vxdprintk(VXD_CBIT(misc, 0),
+					"vroot_get_real_bdev not set");
+		bdput(bdev);
+		if (IS_ERR(bdnew))
+			return ERR_PTR(PTR_ERR(bdnew));
+		bdev = bdnew;
+	}
+#endif
 	sb = get_super(bdev);
 	bdput(bdev);
 	if (!sb)
diff -NurpP --minimal linux-2.6.32.17/fs/reiserfs/file.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/reiserfs/file.c
--- linux-2.6.32.17/fs/reiserfs/file.c	2009-06-11 17:13:08.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/reiserfs/file.c	2009-12-03 20:04:56.000000000 +0100
@@ -307,4 +307,5 @@ const struct inode_operations reiserfs_f
 	.listxattr = reiserfs_listxattr,
 	.removexattr = reiserfs_removexattr,
 	.permission = reiserfs_permission,
+	.sync_flags = reiserfs_sync_flags,
 };
diff -NurpP --minimal linux-2.6.32.17/fs/reiserfs/inode.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/reiserfs/inode.c
--- linux-2.6.32.17/fs/reiserfs/inode.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/reiserfs/inode.c	2010-01-26 20:35:35.000000000 +0100
@@ -18,6 +18,7 @@
 #include <linux/writeback.h>
 #include <linux/quotaops.h>
 #include <linux/swap.h>
+#include <linux/vs_tag.h>
 
 int reiserfs_commit_write(struct file *f, struct page *page,
 			  unsigned from, unsigned to);
@@ -1117,6 +1118,8 @@ static void init_inode(struct inode *ino
 	struct buffer_head *bh;
 	struct item_head *ih;
 	__u32 rdev;
+	uid_t uid;
+	gid_t gid;
 	//int version = ITEM_VERSION_1;
 
 	bh = PATH_PLAST_BUFFER(path);
@@ -1138,12 +1141,13 @@ static void init_inode(struct inode *ino
 		    (struct stat_data_v1 *)B_I_PITEM(bh, ih);
 		unsigned long blocks;
 
+		uid = sd_v1_uid(sd);
+		gid = sd_v1_gid(sd);
+
 		set_inode_item_key_version(inode, KEY_FORMAT_3_5);
 		set_inode_sd_version(inode, STAT_DATA_V1);
 		inode->i_mode = sd_v1_mode(sd);
 		inode->i_nlink = sd_v1_nlink(sd);
-		inode->i_uid = sd_v1_uid(sd);
-		inode->i_gid = sd_v1_gid(sd);
 		inode->i_size = sd_v1_size(sd);
 		inode->i_atime.tv_sec = sd_v1_atime(sd);
 		inode->i_mtime.tv_sec = sd_v1_mtime(sd);
@@ -1185,11 +1189,12 @@ static void init_inode(struct inode *ino
 		// (directories and symlinks)
 		struct stat_data *sd = (struct stat_data *)B_I_PITEM(bh, ih);
 
+		uid    = sd_v2_uid(sd);
+		gid    = sd_v2_gid(sd);
+
 		inode->i_mode = sd_v2_mode(sd);
 		inode->i_nlink = sd_v2_nlink(sd);
-		inode->i_uid = sd_v2_uid(sd);
 		inode->i_size = sd_v2_size(sd);
-		inode->i_gid = sd_v2_gid(sd);
 		inode->i_mtime.tv_sec = sd_v2_mtime(sd);
 		inode->i_atime.tv_sec = sd_v2_atime(sd);
 		inode->i_ctime.tv_sec = sd_v2_ctime(sd);
@@ -1219,6 +1224,10 @@ static void init_inode(struct inode *ino
 		sd_attrs_to_i_attrs(sd_v2_attrs(sd), inode);
 	}
 
+	inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
+	inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
+	inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid, 0);
+
 	pathrelse(path);
 	if (S_ISREG(inode->i_mode)) {
 		inode->i_op = &reiserfs_file_inode_operations;
@@ -1241,13 +1250,15 @@ static void init_inode(struct inode *ino
 static void inode2sd(void *sd, struct inode *inode, loff_t size)
 {
 	struct stat_data *sd_v2 = (struct stat_data *)sd;
+	uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag);
+	gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag);
 	__u16 flags;
 
+	set_sd_v2_uid(sd_v2, uid);
+	set_sd_v2_gid(sd_v2, gid);
 	set_sd_v2_mode(sd_v2, inode->i_mode);
 	set_sd_v2_nlink(sd_v2, inode->i_nlink);
-	set_sd_v2_uid(sd_v2, inode->i_uid);
 	set_sd_v2_size(sd_v2, size);
-	set_sd_v2_gid(sd_v2, inode->i_gid);
 	set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec);
 	set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec);
 	set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec);
@@ -2839,14 +2850,19 @@ int reiserfs_commit_write(struct file *f
 void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode)
 {
 	if (reiserfs_attrs(inode->i_sb)) {
-		if (sd_attrs & REISERFS_SYNC_FL)
-			inode->i_flags |= S_SYNC;
-		else
-			inode->i_flags &= ~S_SYNC;
 		if (sd_attrs & REISERFS_IMMUTABLE_FL)
 			inode->i_flags |= S_IMMUTABLE;
 		else
 			inode->i_flags &= ~S_IMMUTABLE;
+		if (sd_attrs & REISERFS_IXUNLINK_FL)
+			inode->i_flags |= S_IXUNLINK;
+		else
+			inode->i_flags &= ~S_IXUNLINK;
+
+		if (sd_attrs & REISERFS_SYNC_FL)
+			inode->i_flags |= S_SYNC;
+		else
+			inode->i_flags &= ~S_SYNC;
 		if (sd_attrs & REISERFS_APPEND_FL)
 			inode->i_flags |= S_APPEND;
 		else
@@ -2859,6 +2875,15 @@ void sd_attrs_to_i_attrs(__u16 sd_attrs,
 			REISERFS_I(inode)->i_flags |= i_nopack_mask;
 		else
 			REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
+
+		if (sd_attrs & REISERFS_BARRIER_FL)
+			inode->i_vflags |= V_BARRIER;
+		else
+			inode->i_vflags &= ~V_BARRIER;
+		if (sd_attrs & REISERFS_COW_FL)
+			inode->i_vflags |= V_COW;
+		else
+			inode->i_vflags &= ~V_COW;
 	}
 }
 
@@ -2869,6 +2894,11 @@ void i_attrs_to_sd_attrs(struct inode *i
 			*sd_attrs |= REISERFS_IMMUTABLE_FL;
 		else
 			*sd_attrs &= ~REISERFS_IMMUTABLE_FL;
+		if (inode->i_flags & S_IXUNLINK)
+			*sd_attrs |= REISERFS_IXUNLINK_FL;
+		else
+			*sd_attrs &= ~REISERFS_IXUNLINK_FL;
+
 		if (inode->i_flags & S_SYNC)
 			*sd_attrs |= REISERFS_SYNC_FL;
 		else
@@ -2881,6 +2911,15 @@ void i_attrs_to_sd_attrs(struct inode *i
 			*sd_attrs |= REISERFS_NOTAIL_FL;
 		else
 			*sd_attrs &= ~REISERFS_NOTAIL_FL;
+
+		if (inode->i_vflags & V_BARRIER)
+			*sd_attrs |= REISERFS_BARRIER_FL;
+		else
+			*sd_attrs &= ~REISERFS_BARRIER_FL;
+		if (inode->i_vflags & V_COW)
+			*sd_attrs |= REISERFS_COW_FL;
+		else
+			*sd_attrs &= ~REISERFS_COW_FL;
 	}
 }
 
@@ -3101,9 +3140,11 @@ int reiserfs_setattr(struct dentry *dent
 	}
 
 	error = inode_change_ok(inode, attr);
+
 	if (!error) {
 		if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
-		    (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
+		    (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) ||
+		    (ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag)) {
 			error = reiserfs_chown_xattrs(inode, attr);
 
 			if (!error) {
@@ -3133,6 +3174,9 @@ int reiserfs_setattr(struct dentry *dent
 					inode->i_uid = attr->ia_uid;
 				if (attr->ia_valid & ATTR_GID)
 					inode->i_gid = attr->ia_gid;
+				if ((attr->ia_valid & ATTR_TAG) &&
+					IS_TAGGED(inode))
+					inode->i_tag = attr->ia_tag;
 				mark_inode_dirty(inode);
 				error =
 				    journal_end(&th, inode->i_sb, jbegin_count);
diff -NurpP --minimal linux-2.6.32.17/fs/reiserfs/ioctl.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/reiserfs/ioctl.c
--- linux-2.6.32.17/fs/reiserfs/ioctl.c	2009-06-11 17:13:08.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/reiserfs/ioctl.c	2009-12-03 20:04:56.000000000 +0100
@@ -7,11 +7,27 @@
 #include <linux/mount.h>
 #include <linux/reiserfs_fs.h>
 #include <linux/time.h>
+#include <linux/mount.h>
 #include <asm/uaccess.h>
 #include <linux/pagemap.h>
 #include <linux/smp_lock.h>
 #include <linux/compat.h>
 
+
+int reiserfs_sync_flags(struct inode *inode, int flags, int vflags)
+{
+	__u16 sd_attrs = 0;
+
+	inode->i_flags = flags;
+	inode->i_vflags = vflags;
+
+	i_attrs_to_sd_attrs(inode, &sd_attrs);
+	REISERFS_I(inode)->i_attrs = sd_attrs;
+	inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+	return 0;
+}
+
 /*
 ** reiserfs_ioctl - handler for ioctl for inode
 ** supported commands:
@@ -23,7 +39,7 @@
 int reiserfs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
 		   unsigned long arg)
 {
-	unsigned int flags;
+	unsigned int flags, oldflags;
 	int err = 0;
 
 	switch (cmd) {
@@ -43,6 +59,7 @@ int reiserfs_ioctl(struct inode *inode, 
 
 		flags = REISERFS_I(inode)->i_attrs;
 		i_attrs_to_sd_attrs(inode, (__u16 *) & flags);
+		flags &= REISERFS_FL_USER_VISIBLE;
 		return put_user(flags, (int __user *)arg);
 	case REISERFS_IOC_SETFLAGS:{
 			if (!reiserfs_attrs(inode->i_sb))
@@ -60,6 +77,10 @@ int reiserfs_ioctl(struct inode *inode, 
 				err = -EFAULT;
 				goto setflags_out;
 			}
+			if (IS_BARRIER(inode)) {
+				vxwprintk_task(1, "messing with the barrier.");
+				return -EACCES;
+			}
 			/*
 			 * Is it quota file? Do not allow user to mess with it
 			 */
@@ -84,6 +105,10 @@ int reiserfs_ioctl(struct inode *inode, 
 					goto setflags_out;
 				}
 			}
+
+			oldflags = REISERFS_I(inode)->i_attrs;
+			flags &= REISERFS_FL_USER_MODIFIABLE;
+			flags |= oldflags & ~REISERFS_FL_USER_MODIFIABLE;
 			sd_attrs_to_i_attrs(flags, inode);
 			REISERFS_I(inode)->i_attrs = flags;
 			inode->i_ctime = CURRENT_TIME_SEC;
diff -NurpP --minimal linux-2.6.32.17/fs/reiserfs/namei.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/reiserfs/namei.c
--- linux-2.6.32.17/fs/reiserfs/namei.c	2009-06-11 17:13:08.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/reiserfs/namei.c	2009-12-03 20:04:56.000000000 +0100
@@ -17,6 +17,7 @@
 #include <linux/reiserfs_acl.h>
 #include <linux/reiserfs_xattr.h>
 #include <linux/quotaops.h>
+#include <linux/vs_tag.h>
 
 #define INC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) { inc_nlink(i); if (i->i_nlink >= REISERFS_LINK_MAX) i->i_nlink=1; }
 #define DEC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) drop_nlink(i);
@@ -354,6 +355,7 @@ static struct dentry *reiserfs_lookup(st
 	if (retval == IO_ERROR) {
 		return ERR_PTR(-EIO);
 	}
+		dx_propagate_tag(nd, inode);
 
 	return d_splice_alias(inode, dentry);
 }
@@ -570,6 +572,7 @@ static int new_inode_init(struct inode *
 	} else {
 		inode->i_gid = current_fsgid();
 	}
+	inode->i_tag = dx_current_fstag(inode->i_sb);
 	vfs_dq_init(inode);
 	return 0;
 }
@@ -1515,6 +1518,7 @@ const struct inode_operations reiserfs_d
 	.listxattr = reiserfs_listxattr,
 	.removexattr = reiserfs_removexattr,
 	.permission = reiserfs_permission,
+	.sync_flags = reiserfs_sync_flags,
 };
 
 /*
diff -NurpP --minimal linux-2.6.32.17/fs/reiserfs/super.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/reiserfs/super.c
--- linux-2.6.32.17/fs/reiserfs/super.c	2009-12-03 20:02:53.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/reiserfs/super.c	2009-12-03 20:04:56.000000000 +0100
@@ -884,6 +884,14 @@ static int reiserfs_parse_options(struct
 		{"user_xattr",.setmask = 1 << REISERFS_UNSUPPORTED_OPT},
 		{"nouser_xattr",.clrmask = 1 << REISERFS_UNSUPPORTED_OPT},
 #endif
+#ifndef CONFIG_TAGGING_NONE
+		{"tagxid",.setmask = 1 << REISERFS_TAGGED},
+		{"tag",.setmask = 1 << REISERFS_TAGGED},
+		{"notag",.clrmask = 1 << REISERFS_TAGGED},
+#endif
+#ifdef CONFIG_PROPAGATE
+		{"tag",.arg_required = 'T',.values = NULL},
+#endif
 #ifdef CONFIG_REISERFS_FS_POSIX_ACL
 		{"acl",.setmask = 1 << REISERFS_POSIXACL},
 		{"noacl",.clrmask = 1 << REISERFS_POSIXACL},
@@ -1190,6 +1198,14 @@ static int reiserfs_remount(struct super
 	handle_quota_files(s, qf_names, &qfmt);
 #endif
 
+	if ((mount_options & (1 << REISERFS_TAGGED)) &&
+		!(s->s_flags & MS_TAGGED)) {
+		reiserfs_warning(s, "super-vs01",
+			"reiserfs: tagging not permitted on remount.");
+		err = -EINVAL;
+		goto out_err;
+	}
+
 	handle_attrs(s);
 
 	/* Add options that are safe here */
@@ -1652,6 +1668,10 @@ static int reiserfs_fill_super(struct su
 		goto error;
 	}
 
+	/* map mount option tagxid */
+	if (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_TAGGED))
+		s->s_flags |= MS_TAGGED;
+
 	rs = SB_DISK_SUPER_BLOCK(s);
 	/* Let's do basic sanity check to verify that underlying device is not
 	   smaller than the filesystem. If the check fails then abort and scream,
diff -NurpP --minimal linux-2.6.32.17/fs/reiserfs/xattr.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/reiserfs/xattr.c
--- linux-2.6.32.17/fs/reiserfs/xattr.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/reiserfs/xattr.c	2010-05-21 13:20:19.000000000 +0200
@@ -39,6 +39,7 @@
 #include <linux/namei.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
+#include <linux/mount.h>
 #include <linux/file.h>
 #include <linux/pagemap.h>
 #include <linux/xattr.h>
diff -NurpP --minimal linux-2.6.32.17/fs/stat.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/stat.c
--- linux-2.6.32.17/fs/stat.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/stat.c	2010-01-13 14:33:47.000000000 +0100
@@ -26,6 +26,7 @@ void generic_fillattr(struct inode *inod
 	stat->nlink = inode->i_nlink;
 	stat->uid = inode->i_uid;
 	stat->gid = inode->i_gid;
+	stat->tag = inode->i_tag;
 	stat->rdev = inode->i_rdev;
 	stat->atime = inode->i_atime;
 	stat->mtime = inode->i_mtime;
diff -NurpP --minimal linux-2.6.32.17/fs/super.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/super.c
--- linux-2.6.32.17/fs/super.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/super.c	2010-02-12 10:59:55.000000000 +0100
@@ -37,6 +37,9 @@
 #include <linux/kobject.h>
 #include <linux/mutex.h>
 #include <linux/file.h>
+#include <linux/devpts_fs.h>
+#include <linux/proc_fs.h>
+#include <linux/vs_context.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
@@ -914,12 +917,18 @@ struct vfsmount *
 vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
 {
 	struct vfsmount *mnt;
+	struct super_block *sb;
 	char *secdata = NULL;
 	int error;
 
 	if (!type)
 		return ERR_PTR(-ENODEV);
 
+	error = -EPERM;
+	if ((type->fs_flags & FS_BINARY_MOUNTDATA) &&
+		!vx_capable(CAP_SYS_ADMIN, VXC_BINARY_MOUNT))
+		goto out;
+
 	error = -ENOMEM;
 	mnt = alloc_vfsmnt(name);
 	if (!mnt)
@@ -938,9 +947,17 @@ vfs_kern_mount(struct file_system_type *
 	error = type->get_sb(type, flags, name, data, mnt);
 	if (error < 0)
 		goto out_free_secdata;
-	BUG_ON(!mnt->mnt_sb);
 
- 	error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata);
+	sb = mnt->mnt_sb;
+	BUG_ON(!sb);
+
+	error = -EPERM;
+	if (!vx_capable(CAP_SYS_ADMIN, VXC_BINARY_MOUNT) && !sb->s_bdev &&
+		(sb->s_magic != PROC_SUPER_MAGIC) &&
+		(sb->s_magic != DEVPTS_SUPER_MAGIC))
+		goto out_sb;
+
+	error = security_sb_kern_mount(sb, flags, secdata);
  	if (error)
  		goto out_sb;
 
diff -NurpP --minimal linux-2.6.32.17/fs/sysfs/mount.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/sysfs/mount.c
--- linux-2.6.32.17/fs/sysfs/mount.c	2009-06-11 17:13:08.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/sysfs/mount.c	2009-12-03 20:04:56.000000000 +0100
@@ -47,7 +47,7 @@ static int sysfs_fill_super(struct super
 
 	sb->s_blocksize = PAGE_CACHE_SIZE;
 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
-	sb->s_magic = SYSFS_MAGIC;
+	sb->s_magic = SYSFS_SUPER_MAGIC;
 	sb->s_op = &sysfs_ops;
 	sb->s_time_gran = 1;
 	sysfs_sb = sb;
diff -NurpP --minimal linux-2.6.32.17/fs/utimes.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/utimes.c
--- linux-2.6.32.17/fs/utimes.c	2009-03-24 14:22:37.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/utimes.c	2009-12-03 20:04:56.000000000 +0100
@@ -8,6 +8,8 @@
 #include <linux/stat.h>
 #include <linux/utime.h>
 #include <linux/syscalls.h>
+#include <linux/mount.h>
+#include <linux/vs_cowbl.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 
diff -NurpP --minimal linux-2.6.32.17/fs/xattr.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/xattr.c
--- linux-2.6.32.17/fs/xattr.c	2009-12-03 20:02:53.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/xattr.c	2010-03-13 21:50:00.000000000 +0100
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/fsnotify.h>
 #include <linux/audit.h>
+#include <linux/mount.h>
 #include <asm/uaccess.h>
 
 
@@ -49,7 +50,7 @@ xattr_permission(struct inode *inode, co
 	 * The trusted.* namespace can only be accessed by a privileged user.
 	 */
 	if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
-		return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM);
+		return (vx_capable(CAP_SYS_ADMIN, VXC_FS_TRUSTED) ? 0 : -EPERM);
 
 	/* In user.* namespace, only regular files and directories can have
 	 * extended attributes. For sticky directories, only the owner and
diff -NurpP --minimal linux-2.6.32.17/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/linux-2.6/xfs_ioctl.c
--- linux-2.6.32.17/fs/xfs/linux-2.6/xfs_ioctl.c	2009-09-10 15:26:24.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/linux-2.6/xfs_ioctl.c	2009-12-03 20:04:56.000000000 +0100
@@ -34,7 +34,6 @@
 #include "xfs_dir2_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_ioctl.h"
 #include "xfs_btree.h"
 #include "xfs_ialloc.h"
 #include "xfs_rtalloc.h"
@@ -742,6 +741,10 @@ xfs_merge_ioc_xflags(
 		xflags |= XFS_XFLAG_IMMUTABLE;
 	else
 		xflags &= ~XFS_XFLAG_IMMUTABLE;
+	if (flags & FS_IXUNLINK_FL)
+		xflags |= XFS_XFLAG_IXUNLINK;
+	else
+		xflags &= ~XFS_XFLAG_IXUNLINK;
 	if (flags & FS_APPEND_FL)
 		xflags |= XFS_XFLAG_APPEND;
 	else
@@ -770,6 +773,8 @@ xfs_di2lxflags(
 
 	if (di_flags & XFS_DIFLAG_IMMUTABLE)
 		flags |= FS_IMMUTABLE_FL;
+	if (di_flags & XFS_DIFLAG_IXUNLINK)
+		flags |= FS_IXUNLINK_FL;
 	if (di_flags & XFS_DIFLAG_APPEND)
 		flags |= FS_APPEND_FL;
 	if (di_flags & XFS_DIFLAG_SYNC)
@@ -828,6 +833,8 @@ xfs_set_diflags(
 	di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
 	if (xflags & XFS_XFLAG_IMMUTABLE)
 		di_flags |= XFS_DIFLAG_IMMUTABLE;
+	if (xflags & XFS_XFLAG_IXUNLINK)
+		di_flags |= XFS_DIFLAG_IXUNLINK;
 	if (xflags & XFS_XFLAG_APPEND)
 		di_flags |= XFS_DIFLAG_APPEND;
 	if (xflags & XFS_XFLAG_SYNC)
@@ -870,6 +877,10 @@ xfs_diflags_to_linux(
 		inode->i_flags |= S_IMMUTABLE;
 	else
 		inode->i_flags &= ~S_IMMUTABLE;
+	if (xflags & XFS_XFLAG_IXUNLINK)
+		inode->i_flags |= S_IXUNLINK;
+	else
+		inode->i_flags &= ~S_IXUNLINK;
 	if (xflags & XFS_XFLAG_APPEND)
 		inode->i_flags |= S_APPEND;
 	else
@@ -1346,10 +1357,18 @@ xfs_file_ioctl(
 	case XFS_IOC_FSGETXATTRA:
 		return xfs_ioc_fsgetxattr(ip, 1, arg);
 	case XFS_IOC_FSSETXATTR:
+		if (IS_BARRIER(inode)) {
+			vxwprintk_task(1, "messing with the barrier.");
+			return -XFS_ERROR(EACCES);
+		}
 		return xfs_ioc_fssetxattr(ip, filp, arg);
 	case XFS_IOC_GETXFLAGS:
 		return xfs_ioc_getxflags(ip, arg);
 	case XFS_IOC_SETXFLAGS:
+		if (IS_BARRIER(inode)) {
+			vxwprintk_task(1, "messing with the barrier.");
+			return -XFS_ERROR(EACCES);
+		}
 		return xfs_ioc_setxflags(ip, filp, arg);
 
 	case XFS_IOC_FSSETDM: {
diff -NurpP --minimal linux-2.6.32.17/fs/xfs/linux-2.6/xfs_ioctl.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/linux-2.6/xfs_ioctl.h
--- linux-2.6.32.17/fs/xfs/linux-2.6/xfs_ioctl.h	2009-03-24 14:22:37.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/linux-2.6/xfs_ioctl.h	2009-12-03 20:04:56.000000000 +0100
@@ -70,6 +70,12 @@ xfs_handle_to_dentry(
 	void __user		*uhandle,
 	u32			hlen);
 
+extern int
+xfs_sync_flags(
+	struct inode		*inode,
+	int			flags,
+	int			vflags);
+
 extern long
 xfs_file_ioctl(
 	struct file		*filp,
diff -NurpP --minimal linux-2.6.32.17/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/linux-2.6/xfs_iops.c
--- linux-2.6.32.17/fs/xfs/linux-2.6/xfs_iops.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/linux-2.6/xfs_iops.c	2010-04-28 10:19:35.000000000 +0200
@@ -36,6 +36,7 @@
 #include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_ioctl.h"
 #include "xfs_bmap.h"
 #include "xfs_btree.h"
 #include "xfs_ialloc.h"
@@ -55,6 +56,7 @@
 #include <linux/security.h>
 #include <linux/falloc.h>
 #include <linux/fiemap.h>
+#include <linux/vs_tag.h>
 
 /*
  * Bring the timestamps in the XFS inode uptodate.
@@ -495,6 +497,7 @@ xfs_vn_getattr(
 	stat->nlink = ip->i_d.di_nlink;
 	stat->uid = ip->i_d.di_uid;
 	stat->gid = ip->i_d.di_gid;
+	stat->tag = ip->i_d.di_tag;
 	stat->ino = ip->i_ino;
 	stat->atime = inode->i_atime;
 	stat->mtime = inode->i_mtime;
@@ -686,6 +689,7 @@ static const struct inode_operations xfs
 	.listxattr		= xfs_vn_listxattr,
 	.fallocate		= xfs_vn_fallocate,
 	.fiemap			= xfs_vn_fiemap,
+	.sync_flags		= xfs_sync_flags,
 };
 
 static const struct inode_operations xfs_dir_inode_operations = {
@@ -711,6 +715,7 @@ static const struct inode_operations xfs
 	.getxattr		= generic_getxattr,
 	.removexattr		= generic_removexattr,
 	.listxattr		= xfs_vn_listxattr,
+	.sync_flags		= xfs_sync_flags,
 };
 
 static const struct inode_operations xfs_dir_ci_inode_operations = {
@@ -760,6 +765,10 @@ xfs_diflags_to_iflags(
 		inode->i_flags |= S_IMMUTABLE;
 	else
 		inode->i_flags &= ~S_IMMUTABLE;
+	if (ip->i_d.di_flags & XFS_DIFLAG_IXUNLINK)
+		inode->i_flags |= S_IXUNLINK;
+	else
+		inode->i_flags &= ~S_IXUNLINK;
 	if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
 		inode->i_flags |= S_APPEND;
 	else
@@ -772,6 +781,15 @@ xfs_diflags_to_iflags(
 		inode->i_flags |= S_NOATIME;
 	else
 		inode->i_flags &= ~S_NOATIME;
+
+	if (ip->i_d.di_vflags & XFS_DIVFLAG_BARRIER)
+		inode->i_vflags |= V_BARRIER;
+	else
+		inode->i_vflags &= ~V_BARRIER;
+	if (ip->i_d.di_vflags & XFS_DIVFLAG_COW)
+		inode->i_vflags |= V_COW;
+	else
+		inode->i_vflags &= ~V_COW;
 }
 
 /*
@@ -800,6 +818,7 @@ xfs_setup_inode(
 	inode->i_nlink	= ip->i_d.di_nlink;
 	inode->i_uid	= ip->i_d.di_uid;
 	inode->i_gid	= ip->i_d.di_gid;
+	inode->i_tag    = ip->i_d.di_tag;
 
 	switch (inode->i_mode & S_IFMT) {
 	case S_IFBLK:
diff -NurpP --minimal linux-2.6.32.17/fs/xfs/linux-2.6/xfs_linux.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/linux-2.6/xfs_linux.h
--- linux-2.6.32.17/fs/xfs/linux-2.6/xfs_linux.h	2009-09-10 15:26:24.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/linux-2.6/xfs_linux.h	2009-12-03 20:04:56.000000000 +0100
@@ -119,6 +119,7 @@
 
 #define current_cpu()		(raw_smp_processor_id())
 #define current_pid()		(current->pid)
+#define current_fstag(cred,vp)	(dx_current_fstag((vp)->i_sb))
 #define current_test_flags(f)	(current->flags & (f))
 #define current_set_flags_nested(sp, f)		\
 		(*(sp) = current->flags, current->flags |= (f))
diff -NurpP --minimal linux-2.6.32.17/fs/xfs/linux-2.6/xfs_super.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/linux-2.6/xfs_super.c
--- linux-2.6.32.17/fs/xfs/linux-2.6/xfs_super.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/linux-2.6/xfs_super.c	2010-05-21 13:20:19.000000000 +0200
@@ -117,6 +117,9 @@ mempool_t *xfs_ioend_pool;
 #define MNTOPT_DMAPI	"dmapi"		/* DMI enabled (DMAPI / XDSM) */
 #define MNTOPT_XDSM	"xdsm"		/* DMI enabled (DMAPI / XDSM) */
 #define MNTOPT_DMI	"dmi"		/* DMI enabled (DMAPI / XDSM) */
+#define MNTOPT_TAGXID	"tagxid"	/* context tagging for inodes */
+#define MNTOPT_TAGGED	"tag"		/* context tagging for inodes */
+#define MNTOPT_NOTAGTAG	"notag"		/* do not use context tagging */
 
 /*
  * Table driven mount option parser.
@@ -125,10 +128,14 @@ mempool_t *xfs_ioend_pool;
  * in the future, too.
  */
 enum {
+	Opt_tag, Opt_notag,
 	Opt_barrier, Opt_nobarrier, Opt_err
 };
 
 static const match_table_t tokens = {
+	{Opt_tag, "tagxid"},
+	{Opt_tag, "tag"},
+	{Opt_notag, "notag"},
 	{Opt_barrier, "barrier"},
 	{Opt_nobarrier, "nobarrier"},
 	{Opt_err, NULL}
@@ -382,6 +389,19 @@ xfs_parseargs(
 		} else if (!strcmp(this_char, "irixsgid")) {
 			cmn_err(CE_WARN,
 	"XFS: irixsgid is now a sysctl(2) variable, option is deprecated.");
+#ifndef CONFIG_TAGGING_NONE
+		} else if (!strcmp(this_char, MNTOPT_TAGGED)) {
+			mp->m_flags |= XFS_MOUNT_TAGGED;
+		} else if (!strcmp(this_char, MNTOPT_NOTAGTAG)) {
+			mp->m_flags &= ~XFS_MOUNT_TAGGED;
+		} else if (!strcmp(this_char, MNTOPT_TAGXID)) {
+			mp->m_flags |= XFS_MOUNT_TAGGED;
+#endif
+#ifdef CONFIG_PROPAGATE
+		} else if (!strcmp(this_char, MNTOPT_TAGGED)) {
+			/* use value */
+			mp->m_flags |= XFS_MOUNT_TAGGED;
+#endif
 		} else {
 			cmn_err(CE_WARN,
 				"XFS: unknown mount option [%s].", this_char);
@@ -1295,6 +1315,16 @@ xfs_fs_remount(
 		case Opt_nobarrier:
 			mp->m_flags &= ~XFS_MOUNT_BARRIER;
 			break;
+		case Opt_tag:
+			if (!(sb->s_flags & MS_TAGGED)) {
+				printk(KERN_INFO
+					"XFS: %s: tagging not permitted on remount.\n",
+					sb->s_id);
+				return -EINVAL;
+			}
+			break;
+		case Opt_notag:
+			break;
 		default:
 			/*
 			 * Logically we would return an error here to prevent
@@ -1530,6 +1560,9 @@ xfs_fs_fill_super(
 
 	XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, mtpt, mp->m_fsname);
 
+	if (mp->m_flags & XFS_MOUNT_TAGGED)
+		sb->s_flags |= MS_TAGGED;
+
 	sb->s_magic = XFS_SB_MAGIC;
 	sb->s_blocksize = mp->m_sb.sb_blocksize;
 	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
diff -NurpP --minimal linux-2.6.32.17/fs/xfs/xfs_dinode.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_dinode.h
--- linux-2.6.32.17/fs/xfs/xfs_dinode.h	2009-06-11 17:13:09.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_dinode.h	2009-12-03 20:04:56.000000000 +0100
@@ -50,7 +50,9 @@ typedef struct xfs_dinode {
 	__be32		di_gid;		/* owner's group id */
 	__be32		di_nlink;	/* number of links to file */
 	__be16		di_projid;	/* owner's project id */
-	__u8		di_pad[8];	/* unused, zeroed space */
+	__be16		di_tag;		/* context tagging */
+	__be16		di_vflags;	/* vserver specific flags */
+	__u8		di_pad[4];	/* unused, zeroed space */
 	__be16		di_flushiter;	/* incremented on flush */
 	xfs_timestamp_t	di_atime;	/* time last accessed */
 	xfs_timestamp_t	di_mtime;	/* time last modified */
@@ -183,6 +185,8 @@ static inline void xfs_dinode_put_rdev(s
 #define XFS_DIFLAG_EXTSZINHERIT_BIT 12	/* inherit inode extent size */
 #define XFS_DIFLAG_NODEFRAG_BIT     13	/* do not reorganize/defragment */
 #define XFS_DIFLAG_FILESTREAM_BIT   14  /* use filestream allocator */
+#define XFS_DIFLAG_IXUNLINK_BIT     15	/* Immutable inver on unlink */
+
 #define XFS_DIFLAG_REALTIME      (1 << XFS_DIFLAG_REALTIME_BIT)
 #define XFS_DIFLAG_PREALLOC      (1 << XFS_DIFLAG_PREALLOC_BIT)
 #define XFS_DIFLAG_NEWRTBM       (1 << XFS_DIFLAG_NEWRTBM_BIT)
@@ -198,6 +202,7 @@ static inline void xfs_dinode_put_rdev(s
 #define XFS_DIFLAG_EXTSZINHERIT  (1 << XFS_DIFLAG_EXTSZINHERIT_BIT)
 #define XFS_DIFLAG_NODEFRAG      (1 << XFS_DIFLAG_NODEFRAG_BIT)
 #define XFS_DIFLAG_FILESTREAM    (1 << XFS_DIFLAG_FILESTREAM_BIT)
+#define XFS_DIFLAG_IXUNLINK      (1 << XFS_DIFLAG_IXUNLINK_BIT)
 
 #ifdef CONFIG_XFS_RT
 #define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
@@ -210,6 +215,10 @@ static inline void xfs_dinode_put_rdev(s
 	 XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \
 	 XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \
 	 XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \
-	 XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG | XFS_DIFLAG_FILESTREAM)
+	 XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG | XFS_DIFLAG_FILESTREAM | \
+	 XFS_DIFLAG_IXUNLINK)
+
+#define XFS_DIVFLAG_BARRIER	0x01
+#define XFS_DIVFLAG_COW		0x02
 
 #endif	/* __XFS_DINODE_H__ */
diff -NurpP --minimal linux-2.6.32.17/fs/xfs/xfs_fs.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_fs.h
--- linux-2.6.32.17/fs/xfs/xfs_fs.h	2009-12-03 20:02:53.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_fs.h	2009-12-03 20:04:56.000000000 +0100
@@ -67,6 +67,9 @@ struct fsxattr {
 #define XFS_XFLAG_EXTSZINHERIT	0x00001000	/* inherit inode extent size */
 #define XFS_XFLAG_NODEFRAG	0x00002000  	/* do not defragment */
 #define XFS_XFLAG_FILESTREAM	0x00004000	/* use filestream allocator */
+#define XFS_XFLAG_IXUNLINK	0x00008000	/* immutable invert on unlink */
+#define XFS_XFLAG_BARRIER	0x10000000	/* chroot() barrier */
+#define XFS_XFLAG_COW		0x20000000	/* copy on write mark */
 #define XFS_XFLAG_HASATTR	0x80000000	/* no DIFLAG for this	*/
 
 /*
@@ -292,7 +295,8 @@ typedef struct xfs_bstat {
 	__s32		bs_extents;	/* number of extents		*/
 	__u32		bs_gen;		/* generation count		*/
 	__u16		bs_projid;	/* project id			*/
-	unsigned char	bs_pad[14];	/* pad space, unused		*/
+	__u16		bs_tag;		/* context tagging		*/
+	unsigned char	bs_pad[12];	/* pad space, unused		*/
 	__u32		bs_dmevmask;	/* DMIG event mask		*/
 	__u16		bs_dmstate;	/* DMIG state info		*/
 	__u16		bs_aextents;	/* attribute number of extents	*/
diff -NurpP --minimal linux-2.6.32.17/fs/xfs/xfs_ialloc.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_ialloc.c
--- linux-2.6.32.17/fs/xfs/xfs_ialloc.c	2009-12-03 20:02:53.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_ialloc.c	2009-12-03 20:04:56.000000000 +0100
@@ -41,7 +41,6 @@
 #include "xfs_error.h"
 #include "xfs_bmap.h"
 
-
 /*
  * Allocation group level functions.
  */
diff -NurpP --minimal linux-2.6.32.17/fs/xfs/xfs_inode.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_inode.c
--- linux-2.6.32.17/fs/xfs/xfs_inode.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_inode.c	2010-04-28 10:19:35.000000000 +0200
@@ -249,6 +249,7 @@ xfs_inotobp(
 	return 0;
 }
 
+#include <linux/vs_tag.h>
 
 /*
  * This routine is called to map an inode to the buffer containing
@@ -654,15 +655,25 @@ xfs_iformat_btree(
 STATIC void
 xfs_dinode_from_disk(
 	xfs_icdinode_t		*to,
-	xfs_dinode_t		*from)
+	xfs_dinode_t		*from,
+	int tagged)
 {
+	uint32_t uid, gid, tag;
+
 	to->di_magic = be16_to_cpu(from->di_magic);
 	to->di_mode = be16_to_cpu(from->di_mode);
 	to->di_version = from ->di_version;
 	to->di_format = from->di_format;
 	to->di_onlink = be16_to_cpu(from->di_onlink);
-	to->di_uid = be32_to_cpu(from->di_uid);
-	to->di_gid = be32_to_cpu(from->di_gid);
+
+	uid = be32_to_cpu(from->di_uid);
+	gid = be32_to_cpu(from->di_gid);
+	tag = be16_to_cpu(from->di_tag);
+
+	to->di_uid = INOTAG_UID(tagged, uid, gid);
+	to->di_gid = INOTAG_GID(tagged, uid, gid);
+	to->di_tag = INOTAG_TAG(tagged, uid, gid, tag);
+
 	to->di_nlink = be32_to_cpu(from->di_nlink);
 	to->di_projid = be16_to_cpu(from->di_projid);
 	memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
@@ -683,21 +694,26 @@ xfs_dinode_from_disk(
 	to->di_dmevmask	= be32_to_cpu(from->di_dmevmask);
 	to->di_dmstate	= be16_to_cpu(from->di_dmstate);
 	to->di_flags	= be16_to_cpu(from->di_flags);
+	to->di_vflags	= be16_to_cpu(from->di_vflags);
 	to->di_gen	= be32_to_cpu(from->di_gen);
 }
 
 void
 xfs_dinode_to_disk(
 	xfs_dinode_t		*to,
-	xfs_icdinode_t		*from)
+	xfs_icdinode_t		*from,
+	int tagged)
 {
 	to->di_magic = cpu_to_be16(from->di_magic);
 	to->di_mode = cpu_to_be16(from->di_mode);
 	to->di_version = from ->di_version;
 	to->di_format = from->di_format;
 	to->di_onlink = cpu_to_be16(from->di_onlink);
-	to->di_uid = cpu_to_be32(from->di_uid);
-	to->di_gid = cpu_to_be32(from->di_gid);
+
+	to->di_uid = cpu_to_be32(TAGINO_UID(tagged, from->di_uid, from->di_tag));
+	to->di_gid = cpu_to_be32(TAGINO_GID(tagged, from->di_gid, from->di_tag));
+	to->di_tag = cpu_to_be16(TAGINO_TAG(tagged, from->di_tag));
+
 	to->di_nlink = cpu_to_be32(from->di_nlink);
 	to->di_projid = cpu_to_be16(from->di_projid);
 	memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
@@ -718,12 +734,14 @@ xfs_dinode_to_disk(
 	to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
 	to->di_dmstate = cpu_to_be16(from->di_dmstate);
 	to->di_flags = cpu_to_be16(from->di_flags);
+	to->di_vflags = cpu_to_be16(from->di_vflags);
 	to->di_gen = cpu_to_be32(from->di_gen);
 }
 
 STATIC uint
 _xfs_dic2xflags(
-	__uint16_t		di_flags)
+	__uint16_t		di_flags,
+	__uint16_t		di_vflags)
 {
 	uint			flags = 0;
 
@@ -734,6 +752,8 @@ _xfs_dic2xflags(
 			flags |= XFS_XFLAG_PREALLOC;
 		if (di_flags & XFS_DIFLAG_IMMUTABLE)
 			flags |= XFS_XFLAG_IMMUTABLE;
+		if (di_flags & XFS_DIFLAG_IXUNLINK)
+			flags |= XFS_XFLAG_IXUNLINK;
 		if (di_flags & XFS_DIFLAG_APPEND)
 			flags |= XFS_XFLAG_APPEND;
 		if (di_flags & XFS_DIFLAG_SYNC)
@@ -758,6 +778,10 @@ _xfs_dic2xflags(
 			flags |= XFS_XFLAG_FILESTREAM;
 	}
 
+	if (di_vflags & XFS_DIVFLAG_BARRIER)
+		flags |= FS_BARRIER_FL;
+	if (di_vflags & XFS_DIVFLAG_COW)
+		flags |= FS_COW_FL;
 	return flags;
 }
 
@@ -767,7 +791,7 @@ xfs_ip2xflags(
 {
 	xfs_icdinode_t		*dic = &ip->i_d;
 
-	return _xfs_dic2xflags(dic->di_flags) |
+	return _xfs_dic2xflags(dic->di_flags, dic->di_vflags) |
 				(XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
 }
 
@@ -775,7 +799,8 @@ uint
 xfs_dic2xflags(
 	xfs_dinode_t		*dip)
 {
-	return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) |
+	return _xfs_dic2xflags(be16_to_cpu(dip->di_flags),
+				be16_to_cpu(dip->di_vflags)) |
 				(XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
 }
 
@@ -811,7 +836,6 @@ xfs_iread(
 	if (error)
 		return error;
 	dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
-
 	/*
 	 * If we got something that isn't an inode it means someone
 	 * (nfs or dmi) has a stale handle.
@@ -836,7 +860,8 @@ xfs_iread(
 	 * Otherwise, just get the truly permanent information.
 	 */
 	if (dip->di_mode) {
-		xfs_dinode_from_disk(&ip->i_d, dip);
+		xfs_dinode_from_disk(&ip->i_d, dip,
+			mp->m_flags & XFS_MOUNT_TAGGED);
 		error = xfs_iformat(ip, dip);
 		if (error)  {
 #ifdef DEBUG
@@ -1036,6 +1061,7 @@ xfs_ialloc(
 	ASSERT(ip->i_d.di_nlink == nlink);
 	ip->i_d.di_uid = current_fsuid();
 	ip->i_d.di_gid = current_fsgid();
+	ip->i_d.di_tag = current_fstag(cr, &ip->i_vnode);
 	ip->i_d.di_projid = prid;
 	memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
 
@@ -1096,6 +1122,7 @@ xfs_ialloc(
 	ip->i_d.di_dmevmask = 0;
 	ip->i_d.di_dmstate = 0;
 	ip->i_d.di_flags = 0;
+	ip->i_d.di_vflags = 0;
 	flags = XFS_ILOG_CORE;
 	switch (mode & S_IFMT) {
 	case S_IFIFO:
@@ -2172,6 +2199,7 @@ xfs_ifree(
 	}
 	ip->i_d.di_mode = 0;		/* mark incore inode as free */
 	ip->i_d.di_flags = 0;
+	ip->i_d.di_vflags = 0;
 	ip->i_d.di_dmevmask = 0;
 	ip->i_d.di_forkoff = 0;		/* mark the attr fork not in use */
 	ip->i_df.if_ext_max =
@@ -3152,7 +3180,8 @@ xfs_iflush_int(
 	 * because if the inode is dirty at all the core must
 	 * be.
 	 */
-	xfs_dinode_to_disk(dip, &ip->i_d);
+	xfs_dinode_to_disk(dip, &ip->i_d,
+		mp->m_flags & XFS_MOUNT_TAGGED);
 
 	/* Wrap, we never let the log put out DI_MAX_FLUSH */
 	if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
diff -NurpP --minimal linux-2.6.32.17/fs/xfs/xfs_inode.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_inode.h
--- linux-2.6.32.17/fs/xfs/xfs_inode.h	2009-12-03 20:02:54.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_inode.h	2009-12-03 20:04:56.000000000 +0100
@@ -135,7 +135,9 @@ typedef struct xfs_icdinode {
 	__uint32_t	di_gid;		/* owner's group id */
 	__uint32_t	di_nlink;	/* number of links to file */
 	__uint16_t	di_projid;	/* owner's project id */
-	__uint8_t	di_pad[8];	/* unused, zeroed space */
+	__uint16_t	di_tag;		/* context tagging */
+	__uint16_t	di_vflags;	/* vserver specific flags */
+	__uint8_t	di_pad[4];	/* unused, zeroed space */
 	__uint16_t	di_flushiter;	/* incremented on flush */
 	xfs_ictimestamp_t di_atime;	/* time last accessed */
 	xfs_ictimestamp_t di_mtime;	/* time last modified */
@@ -569,7 +571,7 @@ int		xfs_itobp(struct xfs_mount *, struc
 int		xfs_iread(struct xfs_mount *, struct xfs_trans *,
 			  struct xfs_inode *, xfs_daddr_t, uint);
 void		xfs_dinode_to_disk(struct xfs_dinode *,
-				   struct xfs_icdinode *);
+				   struct xfs_icdinode *, int);
 void		xfs_idestroy_fork(struct xfs_inode *, int);
 void		xfs_idata_realloc(struct xfs_inode *, int, int);
 void		xfs_iroot_realloc(struct xfs_inode *, int, int);
diff -NurpP --minimal linux-2.6.32.17/fs/xfs/xfs_itable.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_itable.c
--- linux-2.6.32.17/fs/xfs/xfs_itable.c	2009-12-03 20:02:54.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_itable.c	2009-12-03 20:04:56.000000000 +0100
@@ -84,6 +84,7 @@ xfs_bulkstat_one_iget(
 	buf->bs_mode = dic->di_mode;
 	buf->bs_uid = dic->di_uid;
 	buf->bs_gid = dic->di_gid;
+	buf->bs_tag = dic->di_tag;
 	buf->bs_size = dic->di_size;
 
 	/*
diff -NurpP --minimal linux-2.6.32.17/fs/xfs/xfs_log_recover.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_log_recover.c
--- linux-2.6.32.17/fs/xfs/xfs_log_recover.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_log_recover.c	2010-04-28 10:19:35.000000000 +0200
@@ -2467,7 +2467,8 @@ xlog_recover_do_inode_trans(
 	}
 
 	/* The core is in in-core format */
-	xfs_dinode_to_disk(dip, (xfs_icdinode_t *)item->ri_buf[1].i_addr);
+	xfs_dinode_to_disk(dip, (xfs_icdinode_t *)item->ri_buf[1].i_addr,
+		mp->m_flags & XFS_MOUNT_TAGGED);
 
 	/* the rest is in on-disk format */
 	if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) {
diff -NurpP --minimal linux-2.6.32.17/fs/xfs/xfs_mount.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_mount.h
--- linux-2.6.32.17/fs/xfs/xfs_mount.h	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_mount.h	2010-05-21 13:20:19.000000000 +0200
@@ -285,6 +285,7 @@ typedef struct xfs_mount {
 						   allocator */
 #define XFS_MOUNT_NOATTR2	(1ULL << 25)	/* disable use of attr2 format */
 
+#define XFS_MOUNT_TAGGED	(1ULL << 31)	/* context tagging */
 
 /*
  * Default minimum read and write sizes.
diff -NurpP --minimal linux-2.6.32.17/fs/xfs/xfs_vnodeops.c linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_vnodeops.c
--- linux-2.6.32.17/fs/xfs/xfs_vnodeops.c	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_vnodeops.c	2010-04-28 10:20:09.000000000 +0200
@@ -54,6 +54,80 @@
 #include "xfs_filestream.h"
 #include "xfs_vnodeops.h"
 
+
+STATIC void
+xfs_get_inode_flags(
+	xfs_inode_t	*ip)
+{
+	struct inode 	*inode = VFS_I(ip);
+	unsigned int 	flags = inode->i_flags;
+	unsigned int 	vflags = inode->i_vflags;
+
+	if (flags & S_IMMUTABLE)
+		ip->i_d.di_flags |= XFS_DIFLAG_IMMUTABLE;
+	else
+		ip->i_d.di_flags &= ~XFS_DIFLAG_IMMUTABLE;
+	if (flags & S_IXUNLINK)
+		ip->i_d.di_flags |= XFS_DIFLAG_IXUNLINK;
+	else
+		ip->i_d.di_flags &= ~XFS_DIFLAG_IXUNLINK;
+
+	if (vflags & V_BARRIER)
+		ip->i_d.di_vflags |= XFS_DIVFLAG_BARRIER;
+	else
+		ip->i_d.di_vflags &= ~XFS_DIVFLAG_BARRIER;
+	if (vflags & V_COW)
+		ip->i_d.di_vflags |= XFS_DIVFLAG_COW;
+	else
+		ip->i_d.di_vflags &= ~XFS_DIVFLAG_COW;
+}
+
+int
+xfs_sync_flags(
+	struct inode		*inode,
+	int			flags,
+	int			vflags)
+{
+	struct xfs_inode	*ip = XFS_I(inode);
+	struct xfs_mount	*mp = ip->i_mount;
+	struct xfs_trans        *tp;
+	unsigned int		lock_flags = 0;
+	int			code;
+
+	tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
+	code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
+	if (code)
+		goto error_out;
+
+	lock_flags = XFS_ILOCK_EXCL;
+	xfs_ilock(ip, lock_flags);
+
+	xfs_trans_ijoin(tp, ip, lock_flags);
+	xfs_trans_ihold(tp, ip);
+
+	inode->i_flags = flags;
+	inode->i_vflags = vflags;
+	xfs_get_inode_flags(ip);
+
+	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+	xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
+
+	XFS_STATS_INC(xs_ig_attrchg);
+
+	if (mp->m_flags & XFS_MOUNT_WSYNC)
+		xfs_trans_set_sync(tp);
+	code = xfs_trans_commit(tp, 0);
+	xfs_iunlock(ip, lock_flags);
+	return code;
+
+error_out:
+	xfs_trans_cancel(tp, 0);
+	if (lock_flags)
+		xfs_iunlock(ip, lock_flags);
+	return code;
+}
+
+
 int
 xfs_setattr(
 	struct xfs_inode	*ip,
@@ -69,6 +143,7 @@ xfs_setattr(
 	uint			commit_flags=0;
 	uid_t			uid=0, iuid=0;
 	gid_t			gid=0, igid=0;
+	tag_t			tag=0, itag=0;
 	struct xfs_dquot	*udqp, *gdqp, *olddquot1, *olddquot2;
 	int			need_iolock = 1;
 
@@ -161,7 +236,7 @@ xfs_setattr(
 	/*
 	 * Change file ownership.  Must be the owner or privileged.
 	 */
-	if (mask & (ATTR_UID|ATTR_GID)) {
+	if (mask & (ATTR_UID|ATTR_GID|ATTR_TAG)) {
 		/*
 		 * These IDs could have changed since we last looked at them.
 		 * But, we're assured that if the ownership did change
@@ -170,8 +245,10 @@ xfs_setattr(
 		 */
 		iuid = ip->i_d.di_uid;
 		igid = ip->i_d.di_gid;
+		itag = ip->i_d.di_tag;
 		gid = (mask & ATTR_GID) ? iattr->ia_gid : igid;
 		uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid;
+		tag = (mask & ATTR_TAG) ? iattr->ia_tag : itag;
 
 		/*
 		 * Do a quota reservation only if uid/gid is actually
@@ -179,7 +256,8 @@ xfs_setattr(
 		 */
 		if (XFS_IS_QUOTA_RUNNING(mp) &&
 		    ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
-		     (XFS_IS_GQUOTA_ON(mp) && igid != gid))) {
+		     (XFS_IS_GQUOTA_ON(mp) && igid != gid) ||
+		     (XFS_IS_GQUOTA_ON(mp) && itag != tag))) {
 			ASSERT(tp);
 			code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
 						capable(CAP_FOWNER) ?
@@ -340,7 +418,7 @@ xfs_setattr(
 	/*
 	 * Change file ownership.  Must be the owner or privileged.
 	 */
-	if (mask & (ATTR_UID|ATTR_GID)) {
+	if (mask & (ATTR_UID|ATTR_GID|ATTR_TAG)) {
 		/*
 		 * CAP_FSETID overrides the following restrictions:
 		 *
@@ -356,6 +434,10 @@ xfs_setattr(
 		 * Change the ownerships and register quota modifications
 		 * in the transaction.
 		 */
+		if (itag != tag) {
+			ip->i_d.di_tag = tag;
+			inode->i_tag = tag;
+		}
 		if (iuid != uid) {
 			if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) {
 				ASSERT(mask & ATTR_UID);
diff -NurpP --minimal linux-2.6.32.17/fs/xfs/xfs_vnodeops.h linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_vnodeops.h
--- linux-2.6.32.17/fs/xfs/xfs_vnodeops.h	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/fs/xfs/xfs_vnodeops.h	2010-04-28 10:19:38.000000000 +0200
@@ -14,6 +14,7 @@ struct xfs_inode;
 struct xfs_iomap;
 
 
+int xfs_sync_xflags(struct xfs_inode *ip);
 int xfs_setattr(struct xfs_inode *ip, struct iattr *vap, int flags);
 #define	XFS_ATTR_DMI		0x01	/* invocation from a DMI function */
 #define	XFS_ATTR_NONBLOCK	0x02	/* return EAGAIN if operation would block */
diff -NurpP --minimal linux-2.6.32.17/include/asm-generic/tlb.h linux-2.6.32.17-vs2.3.0.36.29.4/include/asm-generic/tlb.h
--- linux-2.6.32.17/include/asm-generic/tlb.h	2009-09-10 15:26:24.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/asm-generic/tlb.h	2009-12-03 20:04:56.000000000 +0100
@@ -14,6 +14,7 @@
 #define _ASM_GENERIC__TLB_H
 
 #include <linux/swap.h>
+#include <linux/vs_memory.h>
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 
diff -NurpP --minimal linux-2.6.32.17/include/linux/capability.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/capability.h
--- linux-2.6.32.17/include/linux/capability.h	2009-12-03 20:02:54.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/capability.h	2009-12-03 20:04:56.000000000 +0100
@@ -285,6 +285,7 @@ struct cpu_vfs_cap_data {
    arbitrary SCSI commands */
 /* Allow setting encryption key on loopback filesystem */
 /* Allow setting zone reclaim policy */
+/* Allow the selection of a security context */
 
 #define CAP_SYS_ADMIN        21
 
@@ -357,7 +358,13 @@ struct cpu_vfs_cap_data {
 
 #define CAP_MAC_ADMIN        33
 
-#define CAP_LAST_CAP         CAP_MAC_ADMIN
+/* Allow context manipulations */
+/* Allow changing context info on files */
+
+#define CAP_CONTEXT	     34
+
+
+#define CAP_LAST_CAP         CAP_CONTEXT
 
 #define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)
 
diff -NurpP --minimal linux-2.6.32.17/include/linux/devpts_fs.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/devpts_fs.h
--- linux-2.6.32.17/include/linux/devpts_fs.h	2008-12-25 00:26:37.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/devpts_fs.h	2009-12-03 20:04:56.000000000 +0100
@@ -45,5 +45,4 @@ static inline void devpts_pty_kill(struc
 
 #endif
 
-
 #endif /* _LINUX_DEVPTS_FS_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/ext2_fs.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/ext2_fs.h
--- linux-2.6.32.17/include/linux/ext2_fs.h	2009-03-24 14:22:41.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/ext2_fs.h	2009-12-03 20:04:56.000000000 +0100
@@ -189,8 +189,12 @@ struct ext2_group_desc
 #define EXT2_NOTAIL_FL			FS_NOTAIL_FL	/* file tail should not be merged */
 #define EXT2_DIRSYNC_FL			FS_DIRSYNC_FL	/* dirsync behaviour (directories only) */
 #define EXT2_TOPDIR_FL			FS_TOPDIR_FL	/* Top of directory hierarchies*/
+#define EXT2_IXUNLINK_FL		FS_IXUNLINK_FL	/* Immutable invert on unlink */
 #define EXT2_RESERVED_FL		FS_RESERVED_FL	/* reserved for ext2 lib */
 
+#define EXT2_BARRIER_FL			FS_BARRIER_FL	/* Barrier for chroot() */
+#define EXT2_COW_FL			FS_COW_FL	/* Copy on Write marker */
+
 #define EXT2_FL_USER_VISIBLE		FS_FL_USER_VISIBLE	/* User visible flags */
 #define EXT2_FL_USER_MODIFIABLE		FS_FL_USER_MODIFIABLE	/* User modifiable flags */
 
@@ -274,7 +278,8 @@ struct ext2_inode {
 			__u16	i_pad1;
 			__le16	l_i_uid_high;	/* these 2 fields    */
 			__le16	l_i_gid_high;	/* were reserved2[0] */
-			__u32	l_i_reserved2;
+			__le16	l_i_tag;	/* Context Tag */
+			__u16	l_i_reserved2;
 		} linux2;
 		struct {
 			__u8	h_i_frag;	/* Fragment number */
@@ -303,6 +308,7 @@ struct ext2_inode {
 #define i_gid_low	i_gid
 #define i_uid_high	osd2.linux2.l_i_uid_high
 #define i_gid_high	osd2.linux2.l_i_gid_high
+#define i_raw_tag	osd2.linux2.l_i_tag
 #define i_reserved2	osd2.linux2.l_i_reserved2
 #endif
 
@@ -347,6 +353,7 @@ struct ext2_inode {
 #define EXT2_MOUNT_USRQUOTA		0x020000  /* user quota */
 #define EXT2_MOUNT_GRPQUOTA		0x040000  /* group quota */
 #define EXT2_MOUNT_RESERVATION		0x080000  /* Preallocation */
+#define EXT2_MOUNT_TAGGED		(1<<24)	  /* Enable Context Tags */
 
 
 #define clear_opt(o, opt)		o &= ~EXT2_MOUNT_##opt
diff -NurpP --minimal linux-2.6.32.17/include/linux/ext3_fs.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/ext3_fs.h
--- linux-2.6.32.17/include/linux/ext3_fs.h	2009-09-10 15:26:25.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/ext3_fs.h	2009-12-03 20:04:56.000000000 +0100
@@ -173,10 +173,14 @@ struct ext3_group_desc
 #define EXT3_NOTAIL_FL			0x00008000 /* file tail should not be merged */
 #define EXT3_DIRSYNC_FL			0x00010000 /* dirsync behaviour (directories only) */
 #define EXT3_TOPDIR_FL			0x00020000 /* Top of directory hierarchies*/
+#define EXT3_IXUNLINK_FL		0x08000000 /* Immutable invert on unlink */
 #define EXT3_RESERVED_FL		0x80000000 /* reserved for ext3 lib */
 
-#define EXT3_FL_USER_VISIBLE		0x0003DFFF /* User visible flags */
-#define EXT3_FL_USER_MODIFIABLE		0x000380FF /* User modifiable flags */
+#define EXT3_BARRIER_FL			0x04000000 /* Barrier for chroot() */
+#define EXT3_COW_FL			0x20000000 /* Copy on Write marker */
+
+#define EXT3_FL_USER_VISIBLE		0x0103DFFF /* User visible flags */
+#define EXT3_FL_USER_MODIFIABLE		0x010380FF /* User modifiable flags */
 
 /* Flags that should be inherited by new inodes from their parent. */
 #define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\
@@ -320,7 +324,8 @@ struct ext3_inode {
 			__u16	i_pad1;
 			__le16	l_i_uid_high;	/* these 2 fields    */
 			__le16	l_i_gid_high;	/* were reserved2[0] */
-			__u32	l_i_reserved2;
+			__le16	l_i_tag;	/* Context Tag */
+			__u16	l_i_reserved2;
 		} linux2;
 		struct {
 			__u8	h_i_frag;	/* Fragment number */
@@ -351,6 +356,7 @@ struct ext3_inode {
 #define i_gid_low	i_gid
 #define i_uid_high	osd2.linux2.l_i_uid_high
 #define i_gid_high	osd2.linux2.l_i_gid_high
+#define i_raw_tag	osd2.linux2.l_i_tag
 #define i_reserved2	osd2.linux2.l_i_reserved2
 
 #elif defined(__GNU__)
@@ -414,6 +420,7 @@ struct ext3_inode {
 #define EXT3_MOUNT_GRPQUOTA		0x200000 /* "old" group quota */
 #define EXT3_MOUNT_DATA_ERR_ABORT	0x400000 /* Abort on file data write
 						  * error in ordered mode */
+#define EXT3_MOUNT_TAGGED		(1<<24) /* Enable Context Tags */
 
 /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
 #ifndef _LINUX_EXT2_FS_H
@@ -892,6 +899,7 @@ extern void ext3_get_inode_flags(struct 
 extern void ext3_set_aops(struct inode *inode);
 extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		       u64 start, u64 len);
+extern int ext3_sync_flags(struct inode *, int, int);
 
 /* ioctl.c */
 extern long ext3_ioctl(struct file *, unsigned int, unsigned long);
diff -NurpP --minimal linux-2.6.32.17/include/linux/fs.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/fs.h
--- linux-2.6.32.17/include/linux/fs.h	2010-08-09 18:04:14.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/fs.h	2010-07-08 02:22:31.000000000 +0200
@@ -208,6 +208,9 @@ struct inodes_stat_t {
 #define MS_KERNMOUNT	(1<<22) /* this is a kern_mount call */
 #define MS_I_VERSION	(1<<23) /* Update inode I_version field */
 #define MS_STRICTATIME	(1<<24) /* Always perform atime updates */
+#define MS_TAGGED	(1<<25) /* use generic inode tagging */
+#define MS_TAGID	(1<<26) /* use specific tag for this mount */
+#define MS_NOTAGCHECK	(1<<27) /* don't check tags */
 #define MS_ACTIVE	(1<<30)
 #define MS_NOUSER	(1<<31)
 
@@ -234,6 +237,14 @@ struct inodes_stat_t {
 #define S_NOCMTIME	128	/* Do not update file c/mtime */
 #define S_SWAPFILE	256	/* Do not truncate: swapon got its bmaps */
 #define S_PRIVATE	512	/* Inode is fs-internal */
+#define S_IXUNLINK	1024	/* Immutable Invert on unlink */
+
+/* Linux-VServer related Inode flags */
+
+#define V_VALID		1
+#define V_XATTR		2
+#define V_BARRIER	4	/* Barrier for chroot() */
+#define V_COW		8	/* Copy on Write */
 
 /*
  * Note that nosuid etc flags are inode-specific: setting some file-system
@@ -256,12 +267,15 @@ struct inodes_stat_t {
 #define IS_DIRSYNC(inode)	(__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \
 					((inode)->i_flags & (S_SYNC|S_DIRSYNC)))
 #define IS_MANDLOCK(inode)	__IS_FLG(inode, MS_MANDLOCK)
-#define IS_NOATIME(inode)   __IS_FLG(inode, MS_RDONLY|MS_NOATIME)
-#define IS_I_VERSION(inode)   __IS_FLG(inode, MS_I_VERSION)
+#define IS_NOATIME(inode)	__IS_FLG(inode, MS_RDONLY|MS_NOATIME)
+#define IS_I_VERSION(inode)	__IS_FLG(inode, MS_I_VERSION)
+#define IS_TAGGED(inode)	__IS_FLG(inode, MS_TAGGED)
 
 #define IS_NOQUOTA(inode)	((inode)->i_flags & S_NOQUOTA)
 #define IS_APPEND(inode)	((inode)->i_flags & S_APPEND)
 #define IS_IMMUTABLE(inode)	((inode)->i_flags & S_IMMUTABLE)
+#define IS_IXUNLINK(inode)	((inode)->i_flags & S_IXUNLINK)
+#define IS_IXORUNLINK(inode)	((IS_IXUNLINK(inode) ? S_IMMUTABLE : 0) ^ IS_IMMUTABLE(inode))
 #define IS_POSIXACL(inode)	__IS_FLG(inode, MS_POSIXACL)
 
 #define IS_DEADDIR(inode)	((inode)->i_flags & S_DEAD)
@@ -269,6 +283,16 @@ struct inodes_stat_t {
 #define IS_SWAPFILE(inode)	((inode)->i_flags & S_SWAPFILE)
 #define IS_PRIVATE(inode)	((inode)->i_flags & S_PRIVATE)
 
+#define IS_BARRIER(inode)	(S_ISDIR((inode)->i_mode) && ((inode)->i_vflags & V_BARRIER))
+
+#ifdef CONFIG_VSERVER_COWBL
+#  define IS_COW(inode)		(IS_IXUNLINK(inode) && IS_IMMUTABLE(inode))
+#  define IS_COW_LINK(inode)	(S_ISREG((inode)->i_mode) && ((inode)->i_nlink > 1))
+#else
+#  define IS_COW(inode)		(0)
+#  define IS_COW_LINK(inode)	(0)
+#endif
+
 /* the read-only stuff doesn't really belong here, but any other place is
    probably as bad and I don't want to create yet another include file. */
 
@@ -350,11 +374,14 @@ struct inodes_stat_t {
 #define FS_TOPDIR_FL			0x00020000 /* Top of directory hierarchies*/
 #define FS_EXTENT_FL			0x00080000 /* Extents */
 #define FS_DIRECTIO_FL			0x00100000 /* Use direct i/o */
+#define FS_IXUNLINK_FL			0x08000000 /* Immutable invert on unlink */
 #define FS_RESERVED_FL			0x80000000 /* reserved for ext2 lib */
 
-#define FS_FL_USER_VISIBLE		0x0003DFFF /* User visible flags */
-#define FS_FL_USER_MODIFIABLE		0x000380FF /* User modifiable flags */
+#define FS_BARRIER_FL			0x04000000 /* Barrier for chroot() */
+#define FS_COW_FL			0x20000000 /* Copy on Write marker */
 
+#define FS_FL_USER_VISIBLE		0x0103DFFF /* User visible flags */
+#define FS_FL_USER_MODIFIABLE		0x010380FF /* User modifiable flags */
 
 #define SYNC_FILE_RANGE_WAIT_BEFORE	1
 #define SYNC_FILE_RANGE_WRITE		2
@@ -436,6 +463,7 @@ typedef void (dio_iodone_t)(struct kiocb
 #define ATTR_KILL_PRIV	(1 << 14)
 #define ATTR_OPEN	(1 << 15) /* Truncating from open(O_TRUNC) */
 #define ATTR_TIMES_SET	(1 << 16)
+#define ATTR_TAG	(1 << 17)
 
 /*
  * This is the Inode Attributes structure, used for notify_change().  It
@@ -451,6 +479,7 @@ struct iattr {
 	umode_t		ia_mode;
 	uid_t		ia_uid;
 	gid_t		ia_gid;
+	tag_t		ia_tag;
 	loff_t		ia_size;
 	struct timespec	ia_atime;
 	struct timespec	ia_mtime;
@@ -464,6 +493,9 @@ struct iattr {
 	struct file	*ia_file;
 };
 
+#define ATTR_FLAG_BARRIER	512	/* Barrier for chroot() */
+#define ATTR_FLAG_IXUNLINK	1024	/* Immutable invert on unlink */
+
 /*
  * Includes for diskquotas.
  */
@@ -729,7 +761,9 @@ struct inode {
 	unsigned int		i_nlink;
 	uid_t			i_uid;
 	gid_t			i_gid;
+	tag_t			i_tag;
 	dev_t			i_rdev;
+	dev_t			i_mdev;
 	u64			i_version;
 	loff_t			i_size;
 #ifdef __NEED_I_SIZE_ORDERED
@@ -776,7 +810,8 @@ struct inode {
 	unsigned long		i_state;
 	unsigned long		dirtied_when;	/* jiffies of first dirtying */
 
-	unsigned int		i_flags;
+	unsigned short		i_flags;
+	unsigned short		i_vflags;
 
 	atomic_t		i_writecount;
 #ifdef CONFIG_SECURITY
@@ -864,12 +899,12 @@ static inline void i_size_write(struct i
 
 static inline unsigned iminor(const struct inode *inode)
 {
-	return MINOR(inode->i_rdev);
+	return MINOR(inode->i_mdev);
 }
 
 static inline unsigned imajor(const struct inode *inode)
 {
-	return MAJOR(inode->i_rdev);
+	return MAJOR(inode->i_mdev);
 }
 
 extern struct block_device *I_BDEV(struct inode *inode);
@@ -928,6 +963,7 @@ struct file {
 	loff_t			f_pos;
 	struct fown_struct	f_owner;
 	const struct cred	*f_cred;
+	xid_t			f_xid;
 	struct file_ra_state	f_ra;
 
 	u64			f_version;
@@ -1069,6 +1105,7 @@ struct file_lock {
 	struct file *fl_file;
 	loff_t fl_start;
 	loff_t fl_end;
+	xid_t fl_xid;
 
 	struct fasync_struct *	fl_fasync; /* for lease break notifications */
 	unsigned long fl_break_time;	/* for nonblocking lease breaks */
@@ -1536,6 +1573,7 @@ struct inode_operations {
 	ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
 	int (*removexattr) (struct dentry *, const char *);
+	int (*sync_flags) (struct inode *, int, int);
 	void (*truncate_range)(struct inode *, loff_t, loff_t);
 	long (*fallocate)(struct inode *inode, int mode, loff_t offset,
 			  loff_t len);
@@ -1556,6 +1594,7 @@ extern ssize_t vfs_readv(struct file *, 
 		unsigned long, loff_t *);
 extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
 		unsigned long, loff_t *);
+ssize_t vfs_sendfile(struct file *, struct file *, loff_t *, size_t, loff_t);
 
 struct super_operations {
    	struct inode *(*alloc_inode)(struct super_block *sb);
@@ -2354,6 +2393,7 @@ extern int dcache_dir_open(struct inode 
 extern int dcache_dir_close(struct inode *, struct file *);
 extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
 extern int dcache_readdir(struct file *, void *, filldir_t);
+extern int dcache_readdir_filter(struct file *, void *, filldir_t, int (*)(struct dentry *));
 extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 extern int simple_statfs(struct dentry *, struct kstatfs *);
 extern int simple_link(struct dentry *, struct inode *, struct dentry *);
diff -NurpP --minimal linux-2.6.32.17/include/linux/gfs2_ondisk.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/gfs2_ondisk.h
--- linux-2.6.32.17/include/linux/gfs2_ondisk.h	2009-12-03 20:02:55.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/gfs2_ondisk.h	2009-12-03 20:04:56.000000000 +0100
@@ -235,6 +235,9 @@ enum {
 	gfs2fl_NoAtime		= 7,
 	gfs2fl_Sync		= 8,
 	gfs2fl_System		= 9,
+	gfs2fl_IXUnlink		= 16,
+	gfs2fl_Barrier		= 17,
+	gfs2fl_Cow		= 18,
 	gfs2fl_TruncInProg	= 29,
 	gfs2fl_InheritDirectio	= 30,
 	gfs2fl_InheritJdata	= 31,
@@ -251,6 +254,9 @@ enum {
 #define GFS2_DIF_NOATIME		0x00000080
 #define GFS2_DIF_SYNC			0x00000100
 #define GFS2_DIF_SYSTEM			0x00000200 /* New in gfs2 */
+#define GFS2_DIF_IXUNLINK		0x00010000
+#define GFS2_DIF_BARRIER		0x00020000
+#define GFS2_DIF_COW			0x00040000
 #define GFS2_DIF_TRUNC_IN_PROG		0x20000000 /* New in gfs2 */
 #define GFS2_DIF_INHERIT_DIRECTIO	0x40000000
 #define GFS2_DIF_INHERIT_JDATA		0x80000000
diff -NurpP --minimal linux-2.6.32.17/include/linux/if_tun.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/if_tun.h
--- linux-2.6.32.17/include/linux/if_tun.h	2009-12-03 20:02:55.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/if_tun.h	2009-12-03 20:04:56.000000000 +0100
@@ -48,6 +48,7 @@
 #define TUNGETIFF      _IOR('T', 210, unsigned int)
 #define TUNGETSNDBUF   _IOR('T', 211, int)
 #define TUNSETSNDBUF   _IOW('T', 212, int)
+#define TUNSETNID     _IOW('T', 215, int)
 
 /* TUNSETIFF ifr flags */
 #define IFF_TUN		0x0001
diff -NurpP --minimal linux-2.6.32.17/include/linux/init_task.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/init_task.h
--- linux-2.6.32.17/include/linux/init_task.h	2009-12-03 20:02:55.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/init_task.h	2009-12-03 20:04:56.000000000 +0100
@@ -184,6 +184,10 @@ extern struct cred init_cred;
 	INIT_FTRACE_GRAPH						\
 	INIT_TRACE_RECURSION						\
 	INIT_TASK_RCU_PREEMPT(tsk)					\
+	.xid		= 0,						\
+	.vx_info	= NULL,						\
+	.nid		= 0,						\
+	.nx_info	= NULL,						\
 }
 
 
diff -NurpP --minimal linux-2.6.32.17/include/linux/ipc.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/ipc.h
--- linux-2.6.32.17/include/linux/ipc.h	2009-12-03 20:02:55.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/ipc.h	2009-12-03 20:04:56.000000000 +0100
@@ -91,6 +91,7 @@ struct kern_ipc_perm
 	key_t		key;
 	uid_t		uid;
 	gid_t		gid;
+	xid_t		xid;
 	uid_t		cuid;
 	gid_t		cgid;
 	mode_t		mode; 
diff -NurpP --minimal linux-2.6.32.17/include/linux/Kbuild linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/Kbuild
--- linux-2.6.32.17/include/linux/Kbuild	2009-12-03 20:02:54.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/Kbuild	2009-12-03 20:04:56.000000000 +0100
@@ -382,5 +382,8 @@ unifdef-y += xattr.h
 unifdef-y += xfrm.h
 
 objhdr-y += version.h
+
+header-y += vserver/
 header-y += wimax.h
 header-y += wimax/
+
diff -NurpP --minimal linux-2.6.32.17/include/linux/loop.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/loop.h
--- linux-2.6.32.17/include/linux/loop.h	2009-09-10 15:26:25.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/loop.h	2009-12-03 20:04:56.000000000 +0100
@@ -45,6 +45,7 @@ struct loop_device {
 	struct loop_func_table *lo_encryption;
 	__u32           lo_init[2];
 	uid_t		lo_key_owner;	/* Who set the key */
+	xid_t		lo_xid;
 	int		(*ioctl)(struct loop_device *, int cmd, 
 				 unsigned long arg); 
 
diff -NurpP --minimal linux-2.6.32.17/include/linux/magic.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/magic.h
--- linux-2.6.32.17/include/linux/magic.h	2009-12-03 20:02:55.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/magic.h	2009-12-03 20:04:56.000000000 +0100
@@ -3,7 +3,7 @@
 
 #define ADFS_SUPER_MAGIC	0xadf5
 #define AFFS_SUPER_MAGIC	0xadff
-#define AFS_SUPER_MAGIC                0x5346414F
+#define AFS_SUPER_MAGIC		0x5346414F
 #define AUTOFS_SUPER_MAGIC	0x0187
 #define CODA_SUPER_MAGIC	0x73757245
 #define CRAMFS_MAGIC		0x28cd3d45	/* some random number */
@@ -38,6 +38,7 @@
 #define NFS_SUPER_MAGIC		0x6969
 #define OPENPROM_SUPER_MAGIC	0x9fa1
 #define PROC_SUPER_MAGIC	0x9fa0
+#define DEVPTS_SUPER_MAGIC	0x1cd1
 #define QNX4_SUPER_MAGIC	0x002f		/* qnx4 fs detection */
 
 #define REISERFS_SUPER_MAGIC	0x52654973	/* used by gcc */
diff -NurpP --minimal linux-2.6.32.17/include/linux/major.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/major.h
--- linux-2.6.32.17/include/linux/major.h	2009-09-10 15:26:25.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/major.h	2009-12-03 20:04:56.000000000 +0100
@@ -15,6 +15,7 @@
 #define HD_MAJOR		IDE0_MAJOR
 #define PTY_SLAVE_MAJOR		3
 #define TTY_MAJOR		4
+#define VROOT_MAJOR		4
 #define TTYAUX_MAJOR		5
 #define LP_MAJOR		6
 #define VCS_MAJOR		7
diff -NurpP --minimal linux-2.6.32.17/include/linux/memcontrol.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/memcontrol.h
--- linux-2.6.32.17/include/linux/memcontrol.h	2009-12-03 20:02:55.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/memcontrol.h	2010-02-05 00:15:49.000000000 +0100
@@ -70,6 +70,13 @@ int task_in_mem_cgroup(struct task_struc
 
 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
 
+extern u64 mem_cgroup_res_read_u64(struct mem_cgroup *mem, int member);
+extern u64 mem_cgroup_memsw_read_u64(struct mem_cgroup *mem, int member);
+
+extern s64 mem_cgroup_stat_read_cache(struct mem_cgroup *mem);
+extern s64 mem_cgroup_stat_read_anon(struct mem_cgroup *mem);
+extern s64 mem_cgroup_stat_read_mapped(struct mem_cgroup *mem);
+
 static inline
 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
 {
diff -NurpP --minimal linux-2.6.32.17/include/linux/mm_types.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/mm_types.h
--- linux-2.6.32.17/include/linux/mm_types.h	2009-12-03 20:02:55.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/mm_types.h	2009-12-03 20:04:56.000000000 +0100
@@ -246,6 +246,7 @@ struct mm_struct {
 
 	/* Architecture-specific MM context */
 	mm_context_t context;
+	struct vx_info *mm_vx_info;
 
 	/* Swap token stuff */
 	/*
diff -NurpP --minimal linux-2.6.32.17/include/linux/mount.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/mount.h
--- linux-2.6.32.17/include/linux/mount.h	2009-09-10 15:26:25.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/mount.h	2009-12-03 20:04:56.000000000 +0100
@@ -36,6 +36,9 @@ struct mnt_namespace;
 #define MNT_UNBINDABLE	0x2000	/* if the vfsmount is a unbindable mount */
 #define MNT_PNODE_MASK	0x3000	/* propagation flag mask */
 
+#define MNT_TAGID	0x10000
+#define MNT_NOTAG	0x20000
+
 struct vfsmount {
 	struct list_head mnt_hash;
 	struct vfsmount *mnt_parent;	/* fs we are mounted on */
@@ -70,6 +73,7 @@ struct vfsmount {
 #else
 	int mnt_writers;
 #endif
+	tag_t mnt_tag;			/* tagging used for vfsmount */
 };
 
 static inline int *get_mnt_writers_ptr(struct vfsmount *mnt)
diff -NurpP --minimal linux-2.6.32.17/include/linux/net.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/net.h
--- linux-2.6.32.17/include/linux/net.h	2009-12-03 20:02:55.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/net.h	2009-12-03 20:04:56.000000000 +0100
@@ -69,6 +69,7 @@ struct net;
 #define SOCK_NOSPACE		2
 #define SOCK_PASSCRED		3
 #define SOCK_PASSSEC		4
+#define SOCK_USER_SOCKET	5
 
 #ifndef ARCH_HAS_SOCKET_TYPES
 /**
diff -NurpP --minimal linux-2.6.32.17/include/linux/nfs_mount.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/nfs_mount.h
--- linux-2.6.32.17/include/linux/nfs_mount.h	2009-03-24 14:22:43.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/nfs_mount.h	2009-12-03 20:04:56.000000000 +0100
@@ -63,7 +63,8 @@ struct nfs_mount_data {
 #define NFS_MOUNT_SECFLAVOUR	0x2000	/* 5 */
 #define NFS_MOUNT_NORDIRPLUS	0x4000	/* 5 */
 #define NFS_MOUNT_UNSHARED	0x8000	/* 5 */
-#define NFS_MOUNT_FLAGMASK	0xFFFF
+#define NFS_MOUNT_TAGGED	0x10000	/* context tagging */
+#define NFS_MOUNT_FLAGMASK	0x1FFFF
 
 /* The following are for internal use only */
 #define NFS_MOUNT_LOOKUP_CACHE_NONEG	0x10000
diff -NurpP --minimal linux-2.6.32.17/include/linux/nsproxy.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/nsproxy.h
--- linux-2.6.32.17/include/linux/nsproxy.h	2009-06-11 17:13:17.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/nsproxy.h	2009-12-03 20:04:56.000000000 +0100
@@ -3,6 +3,7 @@
 
 #include <linux/spinlock.h>
 #include <linux/sched.h>
+#include <linux/vserver/debug.h>
 
 struct mnt_namespace;
 struct uts_namespace;
@@ -63,22 +64,33 @@ static inline struct nsproxy *task_nspro
 }
 
 int copy_namespaces(unsigned long flags, struct task_struct *tsk);
+struct nsproxy *copy_nsproxy(struct nsproxy *orig);
 void exit_task_namespaces(struct task_struct *tsk);
 void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new);
 void free_nsproxy(struct nsproxy *ns);
 int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **,
 	struct fs_struct *);
 
-static inline void put_nsproxy(struct nsproxy *ns)
+#define	get_nsproxy(n)	__get_nsproxy(n, __FILE__, __LINE__)
+
+static inline void __get_nsproxy(struct nsproxy *ns,
+	const char *_file, int _line)
 {
-	if (atomic_dec_and_test(&ns->count)) {
-		free_nsproxy(ns);
-	}
+	vxlprintk(VXD_CBIT(space, 0), "get_nsproxy(%p[%u])",
+		ns, atomic_read(&ns->count), _file, _line);
+	atomic_inc(&ns->count);
 }
 
-static inline void get_nsproxy(struct nsproxy *ns)
+#define	put_nsproxy(n)	__put_nsproxy(n, __FILE__, __LINE__)
+
+static inline void __put_nsproxy(struct nsproxy *ns,
+	const char *_file, int _line)
 {
-	atomic_inc(&ns->count);
+	vxlprintk(VXD_CBIT(space, 0), "put_nsproxy(%p[%u])",
+		ns, atomic_read(&ns->count), _file, _line);
+	if (atomic_dec_and_test(&ns->count)) {
+		free_nsproxy(ns);
+	}
 }
 
 #ifdef CONFIG_CGROUP_NS
diff -NurpP --minimal linux-2.6.32.17/include/linux/pid.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/pid.h
--- linux-2.6.32.17/include/linux/pid.h	2009-03-24 14:22:43.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/pid.h	2009-12-03 20:04:56.000000000 +0100
@@ -8,7 +8,8 @@ enum pid_type
 	PIDTYPE_PID,
 	PIDTYPE_PGID,
 	PIDTYPE_SID,
-	PIDTYPE_MAX
+	PIDTYPE_MAX,
+	PIDTYPE_REALPID
 };
 
 /*
@@ -160,6 +161,7 @@ static inline pid_t pid_nr(struct pid *p
 }
 
 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns);
+pid_t pid_unmapped_nr_ns(struct pid *pid, struct pid_namespace *ns);
 pid_t pid_vnr(struct pid *pid);
 
 #define do_each_pid_task(pid, type, task)				\
diff -NurpP --minimal linux-2.6.32.17/include/linux/proc_fs.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/proc_fs.h
--- linux-2.6.32.17/include/linux/proc_fs.h	2009-12-03 20:02:56.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/proc_fs.h	2009-12-03 20:04:56.000000000 +0100
@@ -56,6 +56,7 @@ struct proc_dir_entry {
 	nlink_t nlink;
 	uid_t uid;
 	gid_t gid;
+	int vx_flags;
 	loff_t size;
 	const struct inode_operations *proc_iops;
 	/*
@@ -250,12 +251,18 @@ kclist_add(struct kcore_list *new, void 
 extern void kclist_add(struct kcore_list *, void *, size_t, int type);
 #endif
 
+struct vx_info;
+struct nx_info;
+
 union proc_op {
 	int (*proc_get_link)(struct inode *, struct path *);
 	int (*proc_read)(struct task_struct *task, char *page);
 	int (*proc_show)(struct seq_file *m,
 		struct pid_namespace *ns, struct pid *pid,
 		struct task_struct *task);
+	int (*proc_vs_read)(char *page);
+	int (*proc_vxi_read)(struct vx_info *vxi, char *page);
+	int (*proc_nxi_read)(struct nx_info *nxi, char *page);
 };
 
 struct ctl_table_header;
@@ -263,6 +270,7 @@ struct ctl_table;
 
 struct proc_inode {
 	struct pid *pid;
+	int vx_flags;
 	int fd;
 	union proc_op op;
 	struct proc_dir_entry *pde;
diff -NurpP --minimal linux-2.6.32.17/include/linux/quotaops.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/quotaops.h
--- linux-2.6.32.17/include/linux/quotaops.h	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/quotaops.h	2010-04-05 20:57:19.000000000 +0200
@@ -8,6 +8,7 @@
 #define _LINUX_QUOTAOPS_
 
 #include <linux/fs.h>
+#include <linux/vs_dlimit.h>
 
 static inline struct quota_info *sb_dqopt(struct super_block *sb)
 {
@@ -157,10 +158,14 @@ static inline void vfs_dq_init(struct in
  * a transaction (deadlocks possible otherwise) */
 static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr)
 {
+	if (dl_alloc_space(inode, nr))
+		return 1;
 	if (sb_any_quota_active(inode->i_sb)) {
 		/* Used space is updated in alloc_space() */
-		if (inode->i_sb->dq_op->alloc_space(inode, nr, 1) == NO_QUOTA)
+		if (inode->i_sb->dq_op->alloc_space(inode, nr, 1) == NO_QUOTA) {
+			dl_free_space(inode, nr);
 			return 1;
+		}
 	}
 	else
 		inode_add_bytes(inode, nr);
@@ -177,10 +182,14 @@ static inline int vfs_dq_prealloc_space(
 
 static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr)
 {
+	if (dl_alloc_space(inode, nr))
+		return 1;
 	if (sb_any_quota_active(inode->i_sb)) {
 		/* Used space is updated in alloc_space() */
-		if (inode->i_sb->dq_op->alloc_space(inode, nr, 0) == NO_QUOTA)
+		if (inode->i_sb->dq_op->alloc_space(inode, nr, 0) == NO_QUOTA) {
+			dl_free_space(inode, nr);
 			return 1;
+		}
 	}
 	else
 		inode_add_bytes(inode, nr);
@@ -197,10 +206,14 @@ static inline int vfs_dq_alloc_space(str
 
 static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
 {
+	if (dl_reserve_space(inode, nr))
+		return 1;
 	if (sb_any_quota_active(inode->i_sb)) {
 		/* Used space is updated in alloc_space() */
-		if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA)
+		if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA) {
+			dl_release_space(inode, nr);
 			return 1;
+		}
 	}
 	else
 		inode_add_rsv_space(inode, nr);
@@ -209,10 +222,14 @@ static inline int vfs_dq_reserve_space(s
 
 static inline int vfs_dq_alloc_inode(struct inode *inode)
 {
+	if (dl_alloc_inode(inode))
+		return 1;
 	if (sb_any_quota_active(inode->i_sb)) {
 		vfs_dq_init(inode);
-		if (inode->i_sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA)
+		if (inode->i_sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) {
+			dl_free_inode(inode);
 			return 1;
+		}
 	}
 	return 0;
 }
@@ -222,9 +239,13 @@ static inline int vfs_dq_alloc_inode(str
  */
 static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
 {
+	if (dl_claim_space(inode, nr))
+		return 1;
 	if (sb_any_quota_active(inode->i_sb)) {
-		if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA)
+		if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA) {
+			dl_release_space(inode, nr);
 			return 1;
+		}
 	} else
 		inode_claim_rsv_space(inode, nr);
 
@@ -242,6 +263,7 @@ void vfs_dq_release_reservation_space(st
 		inode->i_sb->dq_op->release_rsv(inode, nr);
 	else
 		inode_sub_rsv_space(inode, nr);
+	dl_release_space(inode, nr);
 }
 
 static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
@@ -250,6 +272,7 @@ static inline void vfs_dq_free_space_nod
 		inode->i_sb->dq_op->free_space(inode, nr);
 	else
 		inode_sub_bytes(inode, nr);
+	dl_free_space(inode, nr);
 }
 
 static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr)
@@ -262,6 +285,7 @@ static inline void vfs_dq_free_inode(str
 {
 	if (sb_any_quota_active(inode->i_sb))
 		inode->i_sb->dq_op->free_inode(inode, 1);
+	dl_free_inode(inode);
 }
 
 /* Cannot be called inside a transaction */
@@ -365,6 +389,8 @@ static inline int vfs_dq_transfer(struct
 
 static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr)
 {
+	if (dl_alloc_space(inode, nr))
+		return 1;
 	inode_add_bytes(inode, nr);
 	return 0;
 }
@@ -378,6 +404,8 @@ static inline int vfs_dq_prealloc_space(
 
 static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr)
 {
+	if (dl_alloc_space(inode, nr))
+		return 1;
 	inode_add_bytes(inode, nr);
 	return 0;
 }
@@ -391,22 +419,28 @@ static inline int vfs_dq_alloc_space(str
 
 static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
 {
+	if (dl_reserve_space(inode, nr))
+		return 1;
 	return 0;
 }
 
 static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
 {
+	if (dl_claim_space(inode, nr))
+		return 1;
 	return vfs_dq_alloc_space(inode, nr);
 }
 
 static inline
 int vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
 {
+	dl_release_space(inode, nr);
 	return 0;
 }
 
 static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
 {
+	dl_free_space(inode, nr);
 	inode_sub_bytes(inode, nr);
 }
 
diff -NurpP --minimal linux-2.6.32.17/include/linux/reboot.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/reboot.h
--- linux-2.6.32.17/include/linux/reboot.h	2008-12-25 00:26:37.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/reboot.h	2009-12-03 22:06:59.000000000 +0100
@@ -33,6 +33,7 @@
 #define	LINUX_REBOOT_CMD_RESTART2	0xA1B2C3D4
 #define	LINUX_REBOOT_CMD_SW_SUSPEND	0xD000FCE2
 #define	LINUX_REBOOT_CMD_KEXEC		0x45584543
+#define	LINUX_REBOOT_CMD_OOM		0xDEADBEEF
 
 
 #ifdef __KERNEL__
diff -NurpP --minimal linux-2.6.32.17/include/linux/reiserfs_fs.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/reiserfs_fs.h
--- linux-2.6.32.17/include/linux/reiserfs_fs.h	2009-09-10 15:26:26.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/reiserfs_fs.h	2009-12-03 20:04:56.000000000 +0100
@@ -899,6 +899,11 @@ struct stat_data_v1 {
 #define REISERFS_COMPR_FL     FS_COMPR_FL
 #define REISERFS_NOTAIL_FL    FS_NOTAIL_FL
 
+/* unfortunately reiserfs sdattr is only 16 bit */
+#define REISERFS_IXUNLINK_FL  (FS_IXUNLINK_FL >> 16)
+#define REISERFS_BARRIER_FL   (FS_BARRIER_FL >> 16)
+#define REISERFS_COW_FL       (FS_COW_FL >> 16)
+
 /* persistent flags that file inherits from the parent directory */
 #define REISERFS_INHERIT_MASK ( REISERFS_IMMUTABLE_FL |	\
 				REISERFS_SYNC_FL |	\
@@ -908,6 +913,9 @@ struct stat_data_v1 {
 				REISERFS_COMPR_FL |	\
 				REISERFS_NOTAIL_FL )
 
+#define REISERFS_FL_USER_VISIBLE	0x80FF
+#define REISERFS_FL_USER_MODIFIABLE	0x80FF
+
 /* Stat Data on disk (reiserfs version of UFS disk inode minus the
    address blocks) */
 struct stat_data {
@@ -1989,6 +1997,7 @@ static inline void reiserfs_update_sd(st
 void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode);
 void i_attrs_to_sd_attrs(struct inode *inode, __u16 * sd_attrs);
 int reiserfs_setattr(struct dentry *dentry, struct iattr *attr);
+int reiserfs_sync_flags(struct inode *inode, int, int);
 
 /* namei.c */
 void set_de_name_and_namelen(struct reiserfs_dir_entry *de);
diff -NurpP --minimal linux-2.6.32.17/include/linux/reiserfs_fs_sb.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/reiserfs_fs_sb.h
--- linux-2.6.32.17/include/linux/reiserfs_fs_sb.h	2009-09-10 15:26:26.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/reiserfs_fs_sb.h	2009-12-03 20:04:56.000000000 +0100
@@ -456,6 +456,7 @@ enum reiserfs_mount_options {
 	REISERFS_EXPOSE_PRIVROOT,
 	REISERFS_BARRIER_NONE,
 	REISERFS_BARRIER_FLUSH,
+	REISERFS_TAGGED,
 
 	/* Actions on error */
 	REISERFS_ERROR_PANIC,
diff -NurpP --minimal linux-2.6.32.17/include/linux/sched.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/sched.h
--- linux-2.6.32.17/include/linux/sched.h	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/sched.h	2010-05-27 19:21:36.000000000 +0200
@@ -390,25 +390,28 @@ extern void arch_unmap_area_topdown(stru
  * The mm counters are not protected by its page_table_lock,
  * so must be incremented atomically.
  */
-#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
-#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
-#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
-#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
-#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
+#define __set_mm_counter(mm, member, value) \
+	atomic_long_set(&(mm)->_##member, value)
+#define get_mm_counter(mm, member) \
+	((unsigned long)atomic_long_read(&(mm)->_##member))
 
 #else  /* !USE_SPLIT_PTLOCKS */
 /*
  * The mm counters are protected by its page_table_lock,
  * so can be incremented directly.
  */
-#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
+#define __set_mm_counter(mm, member, value) (mm)->_##member = (value)
 #define get_mm_counter(mm, member) ((mm)->_##member)
-#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
-#define inc_mm_counter(mm, member) (mm)->_##member++
-#define dec_mm_counter(mm, member) (mm)->_##member--
 
 #endif /* !USE_SPLIT_PTLOCKS */
 
+#define set_mm_counter(mm, member, value) \
+	vx_ ## member ## pages_sub((mm), (get_mm_counter(mm, member) - value))
+#define add_mm_counter(mm, member, value) \
+	vx_ ## member ## pages_add((mm), (value))
+#define inc_mm_counter(mm, member) vx_ ## member ## pages_inc((mm))
+#define dec_mm_counter(mm, member) vx_ ## member ## pages_dec((mm))
+
 #define get_mm_rss(mm)					\
 	(get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
 #define update_hiwater_rss(mm)	do {			\
@@ -1186,6 +1189,12 @@ struct sched_entity {
 	u64			nr_wakeups_affine_attempts;
 	u64			nr_wakeups_passive;
 	u64			nr_wakeups_idle;
+#ifdef CONFIG_CFS_HARD_LIMITS
+	u64			throttle_start;
+	u64			throttle_max;
+	u64			throttle_count;
+	u64			throttle_sum;
+#endif
 #endif
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1396,6 +1405,14 @@ struct task_struct {
 #endif
 	seccomp_t seccomp;
 
+/* vserver context data */
+	struct vx_info *vx_info;
+	struct nx_info *nx_info;
+
+	xid_t xid;
+	nid_t nid;
+	tag_t tag;
+
 /* Thread group tracking */
    	u32 parent_exec_id;
    	u32 self_exec_id;
@@ -1620,6 +1637,11 @@ struct pid_namespace;
 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
 			struct pid_namespace *ns);
 
+#include <linux/vserver/base.h>
+#include <linux/vserver/context.h>
+#include <linux/vserver/debug.h>
+#include <linux/vserver/pid.h>
+
 static inline pid_t task_pid_nr(struct task_struct *tsk)
 {
 	return tsk->pid;
@@ -1633,7 +1655,8 @@ static inline pid_t task_pid_nr_ns(struc
 
 static inline pid_t task_pid_vnr(struct task_struct *tsk)
 {
-	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
+	// return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
+	return vx_map_pid(__task_pid_nr_ns(tsk, PIDTYPE_PID, NULL));
 }
 
 
@@ -1646,7 +1669,7 @@ pid_t task_tgid_nr_ns(struct task_struct
 
 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
 {
-	return pid_vnr(task_tgid(tsk));
+	return vx_map_tgid(pid_vnr(task_tgid(tsk)));
 }
 
 
diff -NurpP --minimal linux-2.6.32.17/include/linux/shmem_fs.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/shmem_fs.h
--- linux-2.6.32.17/include/linux/shmem_fs.h	2009-12-03 20:02:56.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/shmem_fs.h	2009-12-03 20:04:56.000000000 +0100
@@ -8,6 +8,9 @@
 
 #define SHMEM_NR_DIRECT 16
 
+#define TMPFS_SUPER_MAGIC	0x01021994
+
+
 struct shmem_inode_info {
 	spinlock_t		lock;
 	unsigned long		flags;
diff -NurpP --minimal linux-2.6.32.17/include/linux/stat.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/stat.h
--- linux-2.6.32.17/include/linux/stat.h	2008-12-25 00:26:37.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/stat.h	2009-12-03 20:04:56.000000000 +0100
@@ -66,6 +66,7 @@ struct kstat {
 	unsigned int	nlink;
 	uid_t		uid;
 	gid_t		gid;
+	tag_t		tag;
 	dev_t		rdev;
 	loff_t		size;
 	struct timespec  atime;
diff -NurpP --minimal linux-2.6.32.17/include/linux/sunrpc/auth.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/sunrpc/auth.h
--- linux-2.6.32.17/include/linux/sunrpc/auth.h	2009-12-03 20:02:56.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/sunrpc/auth.h	2009-12-03 20:04:56.000000000 +0100
@@ -25,6 +25,7 @@
 struct auth_cred {
 	uid_t	uid;
 	gid_t	gid;
+	tag_t	tag;
 	struct group_info *group_info;
 	unsigned char machine_cred : 1;
 };
diff -NurpP --minimal linux-2.6.32.17/include/linux/sunrpc/clnt.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/sunrpc/clnt.h
--- linux-2.6.32.17/include/linux/sunrpc/clnt.h	2009-12-03 20:02:56.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/sunrpc/clnt.h	2009-12-03 20:04:56.000000000 +0100
@@ -49,7 +49,8 @@ struct rpc_clnt {
 	unsigned int		cl_softrtry : 1,/* soft timeouts */
 				cl_discrtry : 1,/* disconnect before retry */
 				cl_autobind : 1,/* use getport() */
-				cl_chatty   : 1;/* be verbose */
+				cl_chatty   : 1,/* be verbose */
+				cl_tag      : 1;/* context tagging */
 
 	struct rpc_rtt *	cl_rtt;		/* RTO estimator data */
 	const struct rpc_timeout *cl_timeout;	/* Timeout strategy */
diff -NurpP --minimal linux-2.6.32.17/include/linux/syscalls.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/syscalls.h
--- linux-2.6.32.17/include/linux/syscalls.h	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/syscalls.h	2010-05-21 13:20:19.000000000 +0200
@@ -548,6 +548,8 @@ asmlinkage long sys_symlink(const char _
 asmlinkage long sys_unlink(const char __user *pathname);
 asmlinkage long sys_rename(const char __user *oldname,
 				const char __user *newname);
+asmlinkage long sys_copyfile(const char __user *from, const char __user *to,
+				umode_t mode);
 asmlinkage long sys_chmod(const char __user *filename, mode_t mode);
 asmlinkage long sys_fchmod(unsigned int fd, mode_t mode);
 
diff -NurpP --minimal linux-2.6.32.17/include/linux/sysctl.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/sysctl.h
--- linux-2.6.32.17/include/linux/sysctl.h	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/sysctl.h	2010-02-12 10:59:55.000000000 +0100
@@ -69,6 +69,7 @@ enum
 	CTL_ABI=9,		/* Binary emulation */
 	CTL_CPU=10,		/* CPU stuff (speed scaling, etc) */
 	CTL_ARLAN=254,		/* arlan wireless driver */
+	CTL_VSERVER=4242,	/* Linux-VServer debug */
 	CTL_S390DBF=5677,	/* s390 debug */
 	CTL_SUNRPC=7249,	/* sunrpc debug */
 	CTL_PM=9899,		/* frv power management */
@@ -103,6 +104,7 @@ enum
 
 	KERN_PANIC=15,		/* int: panic timeout */
 	KERN_REALROOTDEV=16,	/* real root device to mount after initrd */
+	KERN_VSHELPER=17,	/* string: path to vshelper policy agent */
 
 	KERN_SPARC_REBOOT=21,	/* reboot command on Sparc */
 	KERN_CTLALTDEL=22,	/* int: allow ctl-alt-del to reboot */
diff -NurpP --minimal linux-2.6.32.17/include/linux/sysfs.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/sysfs.h
--- linux-2.6.32.17/include/linux/sysfs.h	2008-12-25 00:26:37.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/sysfs.h	2009-12-03 20:04:56.000000000 +0100
@@ -17,6 +17,8 @@
 #include <linux/list.h>
 #include <asm/atomic.h>
 
+#define SYSFS_SUPER_MAGIC	0x62656572
+
 struct kobject;
 struct module;
 
diff -NurpP --minimal linux-2.6.32.17/include/linux/time.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/time.h
--- linux-2.6.32.17/include/linux/time.h	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/time.h	2010-02-12 10:59:55.000000000 +0100
@@ -238,6 +238,9 @@ static __always_inline void timespec_add
 	a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
 	a->tv_nsec = ns;
 }
+
+#include <linux/vs_time.h>
+
 #endif /* __KERNEL__ */
 
 #define NFDBITS			__NFDBITS
diff -NurpP --minimal linux-2.6.32.17/include/linux/types.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/types.h
--- linux-2.6.32.17/include/linux/types.h	2009-09-10 15:26:26.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/types.h	2009-12-03 20:04:56.000000000 +0100
@@ -37,6 +37,9 @@ typedef __kernel_uid32_t	uid_t;
 typedef __kernel_gid32_t	gid_t;
 typedef __kernel_uid16_t        uid16_t;
 typedef __kernel_gid16_t        gid16_t;
+typedef unsigned int		xid_t;
+typedef unsigned int		nid_t;
+typedef unsigned int		tag_t;
 
 typedef unsigned long		uintptr_t;
 
diff -NurpP --minimal linux-2.6.32.17/include/linux/vroot.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vroot.h
--- linux-2.6.32.17/include/linux/vroot.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vroot.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,51 @@
+
+/*
+ * include/linux/vroot.h
+ *
+ * written by Herbert Pötzl, 9/11/2002
+ * ported to 2.6 by Herbert Pötzl, 30/12/2004
+ *
+ * Copyright (C) 2002-2007 by Herbert Pötzl.
+ * Redistribution of this file is permitted under the
+ * GNU General Public License.
+ */
+
+#ifndef _LINUX_VROOT_H
+#define _LINUX_VROOT_H
+
+
+#ifdef __KERNEL__
+
+/* Possible states of device */
+enum {
+	Vr_unbound,
+	Vr_bound,
+};
+
+struct vroot_device {
+	int		vr_number;
+	int		vr_refcnt;
+
+	struct semaphore	vr_ctl_mutex;
+	struct block_device    *vr_device;
+	int			vr_state;
+};
+
+
+typedef struct block_device *(vroot_grb_func)(struct block_device *);
+
+extern int register_vroot_grb(vroot_grb_func *);
+extern int unregister_vroot_grb(vroot_grb_func *);
+
+#endif /* __KERNEL__ */
+
+#define MAX_VROOT_DEFAULT	8
+
+/*
+ * IOCTL commands --- we will commandeer 0x56 ('V')
+ */
+
+#define VROOT_SET_DEV		0x5600
+#define VROOT_CLR_DEV		0x5601
+
+#endif /* _LINUX_VROOT_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vs_base.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_base.h
--- linux-2.6.32.17/include/linux/vs_base.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_base.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,10 @@
+#ifndef _VS_BASE_H
+#define _VS_BASE_H
+
+#include "vserver/base.h"
+#include "vserver/check.h"
+#include "vserver/debug.h"
+
+#else
+#warning duplicate inclusion
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/linux/vs_context.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_context.h
--- linux-2.6.32.17/include/linux/vs_context.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_context.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,242 @@
+#ifndef _VS_CONTEXT_H
+#define _VS_CONTEXT_H
+
+#include "vserver/base.h"
+#include "vserver/check.h"
+#include "vserver/context.h"
+#include "vserver/history.h"
+#include "vserver/debug.h"
+
+#include <linux/sched.h>
+
+
+#define get_vx_info(i) __get_vx_info(i, __FILE__, __LINE__, __HERE__)
+
+static inline struct vx_info *__get_vx_info(struct vx_info *vxi,
+	const char *_file, int _line, void *_here)
+{
+	if (!vxi)
+		return NULL;
+
+	vxlprintk(VXD_CBIT(xid, 2), "get_vx_info(%p[#%d.%d])",
+		vxi, vxi ? vxi->vx_id : 0,
+		vxi ? atomic_read(&vxi->vx_usecnt) : 0,
+		_file, _line);
+	__vxh_get_vx_info(vxi, _here);
+
+	atomic_inc(&vxi->vx_usecnt);
+	return vxi;
+}
+
+
+extern void free_vx_info(struct vx_info *);
+
+#define put_vx_info(i) __put_vx_info(i, __FILE__, __LINE__, __HERE__)
+
+static inline void __put_vx_info(struct vx_info *vxi,
+	const char *_file, int _line, void *_here)
+{
+	if (!vxi)
+		return;
+
+	vxlprintk(VXD_CBIT(xid, 2), "put_vx_info(%p[#%d.%d])",
+		vxi, vxi ? vxi->vx_id : 0,
+		vxi ? atomic_read(&vxi->vx_usecnt) : 0,
+		_file, _line);
+	__vxh_put_vx_info(vxi, _here);
+
+	if (atomic_dec_and_test(&vxi->vx_usecnt))
+		free_vx_info(vxi);
+}
+
+
+#define init_vx_info(p, i) \
+	__init_vx_info(p, i, __FILE__, __LINE__, __HERE__)
+
+static inline void __init_vx_info(struct vx_info **vxp, struct vx_info *vxi,
+	const char *_file, int _line, void *_here)
+{
+	if (vxi) {
+		vxlprintk(VXD_CBIT(xid, 3),
+			"init_vx_info(%p[#%d.%d])",
+			vxi, vxi ? vxi->vx_id : 0,
+			vxi ? atomic_read(&vxi->vx_usecnt) : 0,
+			_file, _line);
+		__vxh_init_vx_info(vxi, vxp, _here);
+
+		atomic_inc(&vxi->vx_usecnt);
+	}
+	*vxp = vxi;
+}
+
+
+#define set_vx_info(p, i) \
+	__set_vx_info(p, i, __FILE__, __LINE__, __HERE__)
+
+static inline void __set_vx_info(struct vx_info **vxp, struct vx_info *vxi,
+	const char *_file, int _line, void *_here)
+{
+	struct vx_info *vxo;
+
+	if (!vxi)
+		return;
+
+	vxlprintk(VXD_CBIT(xid, 3), "set_vx_info(%p[#%d.%d])",
+		vxi, vxi ? vxi->vx_id : 0,
+		vxi ? atomic_read(&vxi->vx_usecnt) : 0,
+		_file, _line);
+	__vxh_set_vx_info(vxi, vxp, _here);
+
+	atomic_inc(&vxi->vx_usecnt);
+	vxo = xchg(vxp, vxi);
+	BUG_ON(vxo);
+}
+
+
+#define clr_vx_info(p) __clr_vx_info(p, __FILE__, __LINE__, __HERE__)
+
+static inline void __clr_vx_info(struct vx_info **vxp,
+	const char *_file, int _line, void *_here)
+{
+	struct vx_info *vxo;
+
+	vxo = xchg(vxp, NULL);
+	if (!vxo)
+		return;
+
+	vxlprintk(VXD_CBIT(xid, 3), "clr_vx_info(%p[#%d.%d])",
+		vxo, vxo ? vxo->vx_id : 0,
+		vxo ? atomic_read(&vxo->vx_usecnt) : 0,
+		_file, _line);
+	__vxh_clr_vx_info(vxo, vxp, _here);
+
+	if (atomic_dec_and_test(&vxo->vx_usecnt))
+		free_vx_info(vxo);
+}
+
+
+#define claim_vx_info(v, p) \
+	__claim_vx_info(v, p, __FILE__, __LINE__, __HERE__)
+
+static inline void __claim_vx_info(struct vx_info *vxi,
+	struct task_struct *task,
+	const char *_file, int _line, void *_here)
+{
+	vxlprintk(VXD_CBIT(xid, 3), "claim_vx_info(%p[#%d.%d.%d]) %p",
+		vxi, vxi ? vxi->vx_id : 0,
+		vxi ? atomic_read(&vxi->vx_usecnt) : 0,
+		vxi ? atomic_read(&vxi->vx_tasks) : 0,
+		task, _file, _line);
+	__vxh_claim_vx_info(vxi, task, _here);
+
+	atomic_inc(&vxi->vx_tasks);
+}
+
+
+extern void unhash_vx_info(struct vx_info *);
+
+#define release_vx_info(v, p) \
+	__release_vx_info(v, p, __FILE__, __LINE__, __HERE__)
+
+static inline void __release_vx_info(struct vx_info *vxi,
+	struct task_struct *task,
+	const char *_file, int _line, void *_here)
+{
+	vxlprintk(VXD_CBIT(xid, 3), "release_vx_info(%p[#%d.%d.%d]) %p",
+		vxi, vxi ? vxi->vx_id : 0,
+		vxi ? atomic_read(&vxi->vx_usecnt) : 0,
+		vxi ? atomic_read(&vxi->vx_tasks) : 0,
+		task, _file, _line);
+	__vxh_release_vx_info(vxi, task, _here);
+
+	might_sleep();
+
+	if (atomic_dec_and_test(&vxi->vx_tasks))
+		unhash_vx_info(vxi);
+}
+
+
+#define task_get_vx_info(p) \
+	__task_get_vx_info(p, __FILE__, __LINE__, __HERE__)
+
+static inline struct vx_info *__task_get_vx_info(struct task_struct *p,
+	const char *_file, int _line, void *_here)
+{
+	struct vx_info *vxi;
+
+	task_lock(p);
+	vxlprintk(VXD_CBIT(xid, 5), "task_get_vx_info(%p)",
+		p, _file, _line);
+	vxi = __get_vx_info(p->vx_info, _file, _line, _here);
+	task_unlock(p);
+	return vxi;
+}
+
+
+static inline void __wakeup_vx_info(struct vx_info *vxi)
+{
+	if (waitqueue_active(&vxi->vx_wait))
+		wake_up_interruptible(&vxi->vx_wait);
+}
+
+
+#define enter_vx_info(v, s) __enter_vx_info(v, s, __FILE__, __LINE__)
+
+static inline void __enter_vx_info(struct vx_info *vxi,
+	struct vx_info_save *vxis, const char *_file, int _line)
+{
+	vxlprintk(VXD_CBIT(xid, 5), "enter_vx_info(%p[#%d],%p) %p[#%d,%p]",
+		vxi, vxi ? vxi->vx_id : 0, vxis, current,
+		current->xid, current->vx_info, _file, _line);
+	vxis->vxi = xchg(&current->vx_info, vxi);
+	vxis->xid = current->xid;
+	current->xid = vxi ? vxi->vx_id : 0;
+}
+
+#define leave_vx_info(s) __leave_vx_info(s, __FILE__, __LINE__)
+
+static inline void __leave_vx_info(struct vx_info_save *vxis,
+	const char *_file, int _line)
+{
+	vxlprintk(VXD_CBIT(xid, 5), "leave_vx_info(%p[#%d,%p]) %p[#%d,%p]",
+		vxis, vxis->xid, vxis->vxi, current,
+		current->xid, current->vx_info, _file, _line);
+	(void)xchg(&current->vx_info, vxis->vxi);
+	current->xid = vxis->xid;
+}
+
+
+static inline void __enter_vx_admin(struct vx_info_save *vxis)
+{
+	vxis->vxi = xchg(&current->vx_info, NULL);
+	vxis->xid = xchg(&current->xid, (xid_t)0);
+}
+
+static inline void __leave_vx_admin(struct vx_info_save *vxis)
+{
+	(void)xchg(&current->xid, vxis->xid);
+	(void)xchg(&current->vx_info, vxis->vxi);
+}
+
+#define task_is_init(p) \
+	__task_is_init(p, __FILE__, __LINE__, __HERE__)
+
+static inline int __task_is_init(struct task_struct *p,
+	const char *_file, int _line, void *_here)
+{
+	int is_init = is_global_init(p);
+
+	task_lock(p);
+	if (p->vx_info)
+		is_init = p->vx_info->vx_initpid == p->pid;
+	task_unlock(p);
+	return is_init;
+}
+
+extern void exit_vx_info(struct task_struct *, int);
+extern void exit_vx_info_early(struct task_struct *, int);
+
+
+#else
+#warning duplicate inclusion
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/linux/vs_cowbl.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_cowbl.h
--- linux-2.6.32.17/include/linux/vs_cowbl.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_cowbl.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,47 @@
+#ifndef _VS_COWBL_H
+#define _VS_COWBL_H
+
+#include <linux/fs.h>
+#include <linux/dcache.h>
+#include <linux/namei.h>
+
+extern struct dentry *cow_break_link(const char *pathname);
+
+static inline int cow_check_and_break(struct path *path)
+{
+	struct inode *inode = path->dentry->d_inode;
+	int error = 0;
+
+	/* do we need this check? */
+	if (IS_RDONLY(inode))
+		return -EROFS;
+
+	if (IS_COW(inode)) {
+		if (IS_COW_LINK(inode)) {
+			struct dentry *new_dentry, *old_dentry = path->dentry;
+			char *pp, *buf;
+
+			buf = kmalloc(PATH_MAX, GFP_KERNEL);
+			if (!buf) {
+				return -ENOMEM;
+			}
+			pp = d_path(path, buf, PATH_MAX);
+			new_dentry = cow_break_link(pp);
+			kfree(buf);
+			if (!IS_ERR(new_dentry)) {
+				path->dentry = new_dentry;
+				dput(old_dentry);
+			} else
+				error = PTR_ERR(new_dentry);
+		} else {
+			inode->i_flags &= ~(S_IXUNLINK | S_IMMUTABLE);
+			inode->i_ctime = CURRENT_TIME;
+			mark_inode_dirty(inode);
+		}
+	}
+	return error;
+}
+
+#else
+#warning duplicate inclusion
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/linux/vs_cvirt.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_cvirt.h
--- linux-2.6.32.17/include/linux/vs_cvirt.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_cvirt.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,50 @@
+#ifndef _VS_CVIRT_H
+#define _VS_CVIRT_H
+
+#include "vserver/cvirt.h"
+#include "vserver/context.h"
+#include "vserver/base.h"
+#include "vserver/check.h"
+#include "vserver/debug.h"
+
+
+static inline void vx_activate_task(struct task_struct *p)
+{
+	struct vx_info *vxi;
+
+	if ((vxi = p->vx_info)) {
+		vx_update_load(vxi);
+		atomic_inc(&vxi->cvirt.nr_running);
+	}
+}
+
+static inline void vx_deactivate_task(struct task_struct *p)
+{
+	struct vx_info *vxi;
+
+	if ((vxi = p->vx_info)) {
+		vx_update_load(vxi);
+		atomic_dec(&vxi->cvirt.nr_running);
+	}
+}
+
+static inline void vx_uninterruptible_inc(struct task_struct *p)
+{
+	struct vx_info *vxi;
+
+	if ((vxi = p->vx_info))
+		atomic_inc(&vxi->cvirt.nr_uninterruptible);
+}
+
+static inline void vx_uninterruptible_dec(struct task_struct *p)
+{
+	struct vx_info *vxi;
+
+	if ((vxi = p->vx_info))
+		atomic_dec(&vxi->cvirt.nr_uninterruptible);
+}
+
+
+#else
+#warning duplicate inclusion
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/linux/vs_device.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_device.h
--- linux-2.6.32.17/include/linux/vs_device.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_device.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,45 @@
+#ifndef _VS_DEVICE_H
+#define _VS_DEVICE_H
+
+#include "vserver/base.h"
+#include "vserver/device.h"
+#include "vserver/debug.h"
+
+
+#ifdef CONFIG_VSERVER_DEVICE
+
+int vs_map_device(struct vx_info *, dev_t, dev_t *, umode_t);
+
+#define vs_device_perm(v, d, m, p) \
+	((vs_map_device(current_vx_info(), d, NULL, m) & (p)) == (p))
+
+#else
+
+static inline
+int vs_map_device(struct vx_info *vxi,
+	dev_t device, dev_t *target, umode_t mode)
+{
+	if (target)
+		*target = device;
+	return ~0;
+}
+
+#define vs_device_perm(v, d, m, p) ((p) == (p))
+
+#endif
+
+
+#define vs_map_chrdev(d, t, p) \
+	((vs_map_device(current_vx_info(), d, t, S_IFCHR) & (p)) == (p))
+#define vs_map_blkdev(d, t, p) \
+	((vs_map_device(current_vx_info(), d, t, S_IFBLK) & (p)) == (p))
+
+#define vs_chrdev_perm(d, p) \
+	vs_device_perm(current_vx_info(), d, S_IFCHR, p)
+#define vs_blkdev_perm(d, p) \
+	vs_device_perm(current_vx_info(), d, S_IFBLK, p)
+
+
+#else
+#warning duplicate inclusion
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/linux/vs_dlimit.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_dlimit.h
--- linux-2.6.32.17/include/linux/vs_dlimit.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_dlimit.h	2010-03-21 06:19:38.000000000 +0100
@@ -0,0 +1,215 @@
+#ifndef _VS_DLIMIT_H
+#define _VS_DLIMIT_H
+
+#include <linux/fs.h>
+
+#include "vserver/dlimit.h"
+#include "vserver/base.h"
+#include "vserver/debug.h"
+
+
+#define get_dl_info(i)	__get_dl_info(i, __FILE__, __LINE__)
+
+static inline struct dl_info *__get_dl_info(struct dl_info *dli,
+	const char *_file, int _line)
+{
+	if (!dli)
+		return NULL;
+	vxlprintk(VXD_CBIT(dlim, 4), "get_dl_info(%p[#%d.%d])",
+		dli, dli ? dli->dl_tag : 0,
+		dli ? atomic_read(&dli->dl_usecnt) : 0,
+		_file, _line);
+	atomic_inc(&dli->dl_usecnt);
+	return dli;
+}
+
+
+#define free_dl_info(i) \
+	call_rcu(&(i)->dl_rcu, rcu_free_dl_info)
+
+#define put_dl_info(i)	__put_dl_info(i, __FILE__, __LINE__)
+
+static inline void __put_dl_info(struct dl_info *dli,
+	const char *_file, int _line)
+{
+	if (!dli)
+		return;
+	vxlprintk(VXD_CBIT(dlim, 4), "put_dl_info(%p[#%d.%d])",
+		dli, dli ? dli->dl_tag : 0,
+		dli ? atomic_read(&dli->dl_usecnt) : 0,
+		_file, _line);
+	if (atomic_dec_and_test(&dli->dl_usecnt))
+		free_dl_info(dli);
+}
+
+
+#define __dlimit_char(d)	((d) ? '*' : ' ')
+
+static inline int __dl_alloc_space(struct super_block *sb,
+	tag_t tag, dlsize_t nr, const char *file, int line)
+{
+	struct dl_info *dli = NULL;
+	int ret = 0;
+
+	if (nr == 0)
+		goto out;
+	dli = locate_dl_info(sb, tag);
+	if (!dli)
+		goto out;
+
+	spin_lock(&dli->dl_lock);
+	ret = (dli->dl_space_used + nr > dli->dl_space_total);
+	if (!ret)
+		dli->dl_space_used += nr;
+	spin_unlock(&dli->dl_lock);
+	put_dl_info(dli);
+out:
+	vxlprintk(VXD_CBIT(dlim, 1),
+		"ALLOC (%p,#%d)%c %lld bytes (%d)",
+		sb, tag, __dlimit_char(dli), (long long)nr,
+		ret, file, line);
+	return ret;
+}
+
+static inline void __dl_free_space(struct super_block *sb,
+	tag_t tag, dlsize_t nr, const char *_file, int _line)
+{
+	struct dl_info *dli = NULL;
+
+	if (nr == 0)
+		goto out;
+	dli = locate_dl_info(sb, tag);
+	if (!dli)
+		goto out;
+
+	spin_lock(&dli->dl_lock);
+	if (dli->dl_space_used > nr)
+		dli->dl_space_used -= nr;
+	else
+		dli->dl_space_used = 0;
+	spin_unlock(&dli->dl_lock);
+	put_dl_info(dli);
+out:
+	vxlprintk(VXD_CBIT(dlim, 1),
+		"FREE  (%p,#%d)%c %lld bytes",
+		sb, tag, __dlimit_char(dli), (long long)nr,
+		_file, _line);
+}
+
+static inline int __dl_alloc_inode(struct super_block *sb,
+	tag_t tag, const char *_file, int _line)
+{
+	struct dl_info *dli;
+	int ret = 0;
+
+	dli = locate_dl_info(sb, tag);
+	if (!dli)
+		goto out;
+
+	spin_lock(&dli->dl_lock);
+	dli->dl_inodes_used++;
+	ret = (dli->dl_inodes_used > dli->dl_inodes_total);
+	spin_unlock(&dli->dl_lock);
+	put_dl_info(dli);
+out:
+	vxlprintk(VXD_CBIT(dlim, 0),
+		"ALLOC (%p,#%d)%c inode (%d)",
+		sb, tag, __dlimit_char(dli), ret, _file, _line);
+	return ret;
+}
+
+static inline void __dl_free_inode(struct super_block *sb,
+	tag_t tag, const char *_file, int _line)
+{
+	struct dl_info *dli;
+
+	dli = locate_dl_info(sb, tag);
+	if (!dli)
+		goto out;
+
+	spin_lock(&dli->dl_lock);
+	if (dli->dl_inodes_used > 1)
+		dli->dl_inodes_used--;
+	else
+		dli->dl_inodes_used = 0;
+	spin_unlock(&dli->dl_lock);
+	put_dl_info(dli);
+out:
+	vxlprintk(VXD_CBIT(dlim, 0),
+		"FREE  (%p,#%d)%c inode",
+		sb, tag, __dlimit_char(dli), _file, _line);
+}
+
+static inline void __dl_adjust_block(struct super_block *sb, tag_t tag,
+	unsigned long long *free_blocks, unsigned long long *root_blocks,
+	const char *_file, int _line)
+{
+	struct dl_info *dli;
+	uint64_t broot, bfree;
+
+	dli = locate_dl_info(sb, tag);
+	if (!dli)
+		return;
+
+	spin_lock(&dli->dl_lock);
+	broot = (dli->dl_space_total -
+		(dli->dl_space_total >> 10) * dli->dl_nrlmult)
+		>> sb->s_blocksize_bits;
+	bfree = (dli->dl_space_total - dli->dl_space_used)
+			>> sb->s_blocksize_bits;
+	spin_unlock(&dli->dl_lock);
+
+	vxlprintk(VXD_CBIT(dlim, 2),
+		"ADJUST: %lld,%lld on %lld,%lld [mult=%d]",
+		(long long)bfree, (long long)broot,
+		*free_blocks, *root_blocks, dli->dl_nrlmult,
+		_file, _line);
+	if (free_blocks) {
+		if (*free_blocks > bfree)
+			*free_blocks = bfree;
+	}
+	if (root_blocks) {
+		if (*root_blocks > broot)
+			*root_blocks = broot;
+	}
+	put_dl_info(dli);
+}
+
+#define dl_prealloc_space(in, bytes) \
+	__dl_alloc_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \
+		__FILE__, __LINE__ )
+
+#define dl_alloc_space(in, bytes) \
+	__dl_alloc_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \
+		__FILE__, __LINE__ )
+
+#define dl_reserve_space(in, bytes) \
+	__dl_alloc_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \
+		__FILE__, __LINE__ )
+
+#define dl_claim_space(in, bytes) (0)
+
+#define dl_release_space(in, bytes) \
+	__dl_free_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \
+		__FILE__, __LINE__ )
+
+#define dl_free_space(in, bytes) \
+	__dl_free_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \
+		__FILE__, __LINE__ )
+
+
+
+#define dl_alloc_inode(in) \
+	__dl_alloc_inode((in)->i_sb, (in)->i_tag, __FILE__, __LINE__ )
+
+#define dl_free_inode(in) \
+	__dl_free_inode((in)->i_sb, (in)->i_tag, __FILE__, __LINE__ )
+
+
+#define dl_adjust_block(sb, tag, fb, rb) \
+	__dl_adjust_block(sb, tag, fb, rb, __FILE__, __LINE__ )
+
+
+#else
+#warning duplicate inclusion
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/base.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/base.h
--- linux-2.6.32.17/include/linux/vserver/base.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/base.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,170 @@
+#ifndef _VX_BASE_H
+#define _VX_BASE_H
+
+
+/* context state changes */
+
+enum {
+	VSC_STARTUP = 1,
+	VSC_SHUTDOWN,
+
+	VSC_NETUP,
+	VSC_NETDOWN,
+};
+
+
+
+#define vx_task_xid(t)	((t)->xid)
+
+#define vx_current_xid() vx_task_xid(current)
+
+#define current_vx_info() (current->vx_info)
+
+
+#define nx_task_nid(t)	((t)->nid)
+
+#define nx_current_nid() nx_task_nid(current)
+
+#define current_nx_info() (current->nx_info)
+
+
+/* generic flag merging */
+
+#define vs_check_flags(v, m, f)	(((v) & (m)) ^ (f))
+
+#define vs_mask_flags(v, f, m)	(((v) & ~(m)) | ((f) & (m)))
+
+#define vs_mask_mask(v, f, m)	(((v) & ~(m)) | ((v) & (f) & (m)))
+
+#define vs_check_bit(v, n)	((v) & (1LL << (n)))
+
+
+/* context flags */
+
+#define __vx_flags(v)	((v) ? (v)->vx_flags : 0)
+
+#define vx_current_flags()	__vx_flags(current_vx_info())
+
+#define vx_info_flags(v, m, f) \
+	vs_check_flags(__vx_flags(v), m, f)
+
+#define task_vx_flags(t, m, f) \
+	((t) && vx_info_flags((t)->vx_info, m, f))
+
+#define vx_flags(m, f)	vx_info_flags(current_vx_info(), m, f)
+
+
+/* context caps */
+
+#define __vx_ccaps(v)	((v) ? (v)->vx_ccaps : 0)
+
+#define vx_current_ccaps()	__vx_ccaps(current_vx_info())
+
+#define vx_info_ccaps(v, c)	(__vx_ccaps(v) & (c))
+
+#define vx_ccaps(c)	vx_info_ccaps(current_vx_info(), (c))
+
+
+
+/* network flags */
+
+#define __nx_flags(n)	((n) ? (n)->nx_flags : 0)
+
+#define nx_current_flags()	__nx_flags(current_nx_info())
+
+#define nx_info_flags(n, m, f) \
+	vs_check_flags(__nx_flags(n), m, f)
+
+#define task_nx_flags(t, m, f) \
+	((t) && nx_info_flags((t)->nx_info, m, f))
+
+#define nx_flags(m, f)	nx_info_flags(current_nx_info(), m, f)
+
+
+/* network caps */
+
+#define __nx_ncaps(n)	((n) ? (n)->nx_ncaps : 0)
+
+#define nx_current_ncaps()	__nx_ncaps(current_nx_info())
+
+#define nx_info_ncaps(n, c)	(__nx_ncaps(n) & (c))
+
+#define nx_ncaps(c)	nx_info_ncaps(current_nx_info(), c)
+
+
+/* context mask capabilities */
+
+#define __vx_mcaps(v)	((v) ? (v)->vx_ccaps >> 32UL : ~0 )
+
+#define vx_info_mcaps(v, c)	(__vx_mcaps(v) & (c))
+
+#define vx_mcaps(c)	vx_info_mcaps(current_vx_info(), c)
+
+
+/* context bcap mask */
+
+#define __vx_bcaps(v)		((v)->vx_bcaps)
+
+#define vx_current_bcaps()	__vx_bcaps(current_vx_info())
+
+
+/* mask given bcaps */
+
+#define vx_info_mbcaps(v, c)	((v) ? cap_intersect(__vx_bcaps(v), c) : c)
+
+#define vx_mbcaps(c)		vx_info_mbcaps(current_vx_info(), c)
+
+
+/* masked cap_bset */
+
+#define vx_info_cap_bset(v)	vx_info_mbcaps(v, current->cap_bset)
+
+#define vx_current_cap_bset()	vx_info_cap_bset(current_vx_info())
+
+#if 0
+#define vx_info_mbcap(v, b) \
+	(!vx_info_flags(v, VXF_STATE_SETUP, 0) ? \
+	vx_info_bcaps(v, b) : (b))
+
+#define task_vx_mbcap(t, b) \
+	vx_info_mbcap((t)->vx_info, (t)->b)
+
+#define vx_mbcap(b)	task_vx_mbcap(current, b)
+#endif
+
+#define vx_cap_raised(v, c, f)	cap_raised(vx_info_mbcaps(v, c), f)
+
+#define vx_capable(b, c) (capable(b) || \
+	(cap_raised(current_cap(), b) && vx_ccaps(c)))
+
+#define nx_capable(b, c) (capable(b) || \
+	(cap_raised(current_cap(), b) && nx_ncaps(c)))
+
+#define vx_task_initpid(t, n) \
+	((t)->vx_info && \
+	((t)->vx_info->vx_initpid == (n)))
+
+#define vx_current_initpid(n)	vx_task_initpid(current, n)
+
+
+/* context unshare mask */
+
+#define __vx_umask(v)		((v)->vx_umask)
+
+#define vx_current_umask()	__vx_umask(current_vx_info())
+
+#define vx_can_unshare(b, f) (capable(b) || \
+	(cap_raised(current_cap(), b) && \
+	!((f) & ~vx_current_umask())))
+
+
+#define __vx_state(v)	((v) ? ((v)->vx_state) : 0)
+
+#define vx_info_state(v, m)	(__vx_state(v) & (m))
+
+
+#define __nx_state(n)	((n) ? ((n)->nx_state) : 0)
+
+#define nx_info_state(n, m)	(__nx_state(n) & (m))
+
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/cacct_cmd.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/cacct_cmd.h
--- linux-2.6.32.17/include/linux/vserver/cacct_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/cacct_cmd.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,23 @@
+#ifndef _VX_CACCT_CMD_H
+#define _VX_CACCT_CMD_H
+
+
+/* virtual host info name commands */
+
+#define VCMD_sock_stat		VC_CMD(VSTAT, 5, 0)
+
+struct	vcmd_sock_stat_v0 {
+	uint32_t field;
+	uint32_t count[3];
+	uint64_t total[3];
+};
+
+
+#ifdef	__KERNEL__
+
+#include <linux/compiler.h>
+
+extern int vc_sock_stat(struct vx_info *, void __user *);
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_CACCT_CMD_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/cacct_def.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/cacct_def.h
--- linux-2.6.32.17/include/linux/vserver/cacct_def.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/cacct_def.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,43 @@
+#ifndef _VX_CACCT_DEF_H
+#define _VX_CACCT_DEF_H
+
+#include <asm/atomic.h>
+#include <linux/vserver/cacct.h>
+
+
+struct _vx_sock_acc {
+	atomic_long_t count;
+	atomic_long_t total;
+};
+
+/* context sub struct */
+
+struct _vx_cacct {
+	struct _vx_sock_acc sock[VXA_SOCK_SIZE][3];
+	atomic_t slab[8];
+	atomic_t page[6][8];
+};
+
+#ifdef CONFIG_VSERVER_DEBUG
+
+static inline void __dump_vx_cacct(struct _vx_cacct *cacct)
+{
+	int i, j;
+
+	printk("\t_vx_cacct:");
+	for (i = 0; i < 6; i++) {
+		struct _vx_sock_acc *ptr = cacct->sock[i];
+
+		printk("\t [%d] =", i);
+		for (j = 0; j < 3; j++) {
+			printk(" [%d] = %8lu, %8lu", j,
+				atomic_long_read(&ptr[j].count),
+				atomic_long_read(&ptr[j].total));
+		}
+		printk("\n");
+	}
+}
+
+#endif
+
+#endif	/* _VX_CACCT_DEF_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/cacct.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/cacct.h
--- linux-2.6.32.17/include/linux/vserver/cacct.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/cacct.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,15 @@
+#ifndef _VX_CACCT_H
+#define _VX_CACCT_H
+
+
+enum sock_acc_field {
+	VXA_SOCK_UNSPEC = 0,
+	VXA_SOCK_UNIX,
+	VXA_SOCK_INET,
+	VXA_SOCK_INET6,
+	VXA_SOCK_PACKET,
+	VXA_SOCK_OTHER,
+	VXA_SOCK_SIZE	/* array size */
+};
+
+#endif	/* _VX_CACCT_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/cacct_int.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/cacct_int.h
--- linux-2.6.32.17/include/linux/vserver/cacct_int.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/cacct_int.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,21 @@
+#ifndef _VX_CACCT_INT_H
+#define _VX_CACCT_INT_H
+
+
+#ifdef	__KERNEL__
+
+static inline
+unsigned long vx_sock_count(struct _vx_cacct *cacct, int type, int pos)
+{
+	return atomic_long_read(&cacct->sock[type][pos].count);
+}
+
+
+static inline
+unsigned long vx_sock_total(struct _vx_cacct *cacct, int type, int pos)
+{
+	return atomic_long_read(&cacct->sock[type][pos].total);
+}
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_CACCT_INT_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/check.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/check.h
--- linux-2.6.32.17/include/linux/vserver/check.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/check.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,89 @@
+#ifndef _VS_CHECK_H
+#define _VS_CHECK_H
+
+
+#define MAX_S_CONTEXT	65535	/* Arbitrary limit */
+
+#ifdef	CONFIG_VSERVER_DYNAMIC_IDS
+#define MIN_D_CONTEXT	49152	/* dynamic contexts start here */
+#else
+#define MIN_D_CONTEXT	65536
+#endif
+
+/* check conditions */
+
+#define VS_ADMIN	0x0001
+#define VS_WATCH	0x0002
+#define VS_HIDE		0x0004
+#define VS_HOSTID	0x0008
+
+#define VS_IDENT	0x0010
+#define VS_EQUIV	0x0020
+#define VS_PARENT	0x0040
+#define VS_CHILD	0x0080
+
+#define VS_ARG_MASK	0x00F0
+
+#define VS_DYNAMIC	0x0100
+#define VS_STATIC	0x0200
+
+#define VS_ATR_MASK	0x0F00
+
+#ifdef	CONFIG_VSERVER_PRIVACY
+#define VS_ADMIN_P	(0)
+#define VS_WATCH_P	(0)
+#else
+#define VS_ADMIN_P	VS_ADMIN
+#define VS_WATCH_P	VS_WATCH
+#endif
+
+#define VS_HARDIRQ	0x1000
+#define VS_SOFTIRQ	0x2000
+#define VS_IRQ		0x4000
+
+#define VS_IRQ_MASK	0xF000
+
+#include <linux/hardirq.h>
+
+/*
+ * check current context for ADMIN/WATCH and
+ * optionally against supplied argument
+ */
+static inline int __vs_check(int cid, int id, unsigned int mode)
+{
+	if (mode & VS_ARG_MASK) {
+		if ((mode & VS_IDENT) && (id == cid))
+			return 1;
+	}
+	if (mode & VS_ATR_MASK) {
+		if ((mode & VS_DYNAMIC) &&
+			(id >= MIN_D_CONTEXT) &&
+			(id <= MAX_S_CONTEXT))
+			return 1;
+		if ((mode & VS_STATIC) &&
+			(id > 1) && (id < MIN_D_CONTEXT))
+			return 1;
+	}
+	if (mode & VS_IRQ_MASK) {
+		if ((mode & VS_IRQ) && unlikely(in_interrupt()))
+			return 1;
+		if ((mode & VS_HARDIRQ) && unlikely(in_irq()))
+			return 1;
+		if ((mode & VS_SOFTIRQ) && unlikely(in_softirq()))
+			return 1;
+	}
+	return (((mode & VS_ADMIN) && (cid == 0)) ||
+		((mode & VS_WATCH) && (cid == 1)) ||
+		((mode & VS_HOSTID) && (id == 0)));
+}
+
+#define vx_check(c, m)	__vs_check(vx_current_xid(), c, (m) | VS_IRQ)
+
+#define vx_weak_check(c, m)	((m) ? vx_check(c, m) : 1)
+
+
+#define nx_check(c, m)	__vs_check(nx_current_nid(), c, m)
+
+#define nx_weak_check(c, m)	((m) ? nx_check(c, m) : 1)
+
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/context_cmd.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/context_cmd.h
--- linux-2.6.32.17/include/linux/vserver/context_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/context_cmd.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,145 @@
+#ifndef _VX_CONTEXT_CMD_H
+#define _VX_CONTEXT_CMD_H
+
+
+/* vinfo commands */
+
+#define VCMD_task_xid		VC_CMD(VINFO, 1, 0)
+
+#ifdef	__KERNEL__
+extern int vc_task_xid(uint32_t);
+
+#endif	/* __KERNEL__ */
+
+#define VCMD_vx_info		VC_CMD(VINFO, 5, 0)
+
+struct	vcmd_vx_info_v0 {
+	uint32_t xid;
+	uint32_t initpid;
+	/* more to come */
+};
+
+#ifdef	__KERNEL__
+extern int vc_vx_info(struct vx_info *, void __user *);
+
+#endif	/* __KERNEL__ */
+
+#define VCMD_ctx_stat		VC_CMD(VSTAT, 0, 0)
+
+struct	vcmd_ctx_stat_v0 {
+	uint32_t usecnt;
+	uint32_t tasks;
+	/* more to come */
+};
+
+#ifdef	__KERNEL__
+extern int vc_ctx_stat(struct vx_info *, void __user *);
+
+#endif	/* __KERNEL__ */
+
+/* context commands */
+
+#define VCMD_ctx_create_v0	VC_CMD(VPROC, 1, 0)
+#define VCMD_ctx_create		VC_CMD(VPROC, 1, 1)
+
+struct	vcmd_ctx_create {
+	uint64_t flagword;
+};
+
+#define VCMD_ctx_migrate_v0	VC_CMD(PROCMIG, 1, 0)
+#define VCMD_ctx_migrate	VC_CMD(PROCMIG, 1, 1)
+
+struct	vcmd_ctx_migrate {
+	uint64_t flagword;
+};
+
+#ifdef	__KERNEL__
+extern int vc_ctx_create(uint32_t, void __user *);
+extern int vc_ctx_migrate(struct vx_info *, void __user *);
+
+#endif	/* __KERNEL__ */
+
+
+/* flag commands */
+
+#define VCMD_get_cflags		VC_CMD(FLAGS, 1, 0)
+#define VCMD_set_cflags		VC_CMD(FLAGS, 2, 0)
+
+struct	vcmd_ctx_flags_v0 {
+	uint64_t flagword;
+	uint64_t mask;
+};
+
+#ifdef	__KERNEL__
+extern int vc_get_cflags(struct vx_info *, void __user *);
+extern int vc_set_cflags(struct vx_info *, void __user *);
+
+#endif	/* __KERNEL__ */
+
+
+/* context caps commands */
+
+#define VCMD_get_ccaps		VC_CMD(FLAGS, 3, 1)
+#define VCMD_set_ccaps		VC_CMD(FLAGS, 4, 1)
+
+struct	vcmd_ctx_caps_v1 {
+	uint64_t ccaps;
+	uint64_t cmask;
+};
+
+#ifdef	__KERNEL__
+extern int vc_get_ccaps(struct vx_info *, void __user *);
+extern int vc_set_ccaps(struct vx_info *, void __user *);
+
+#endif	/* __KERNEL__ */
+
+
+/* bcaps commands */
+
+#define VCMD_get_bcaps		VC_CMD(FLAGS, 9, 0)
+#define VCMD_set_bcaps		VC_CMD(FLAGS, 10, 0)
+
+struct	vcmd_bcaps {
+	uint64_t bcaps;
+	uint64_t bmask;
+};
+
+#ifdef	__KERNEL__
+extern int vc_get_bcaps(struct vx_info *, void __user *);
+extern int vc_set_bcaps(struct vx_info *, void __user *);
+
+#endif	/* __KERNEL__ */
+
+
+/* umask commands */
+
+#define VCMD_get_umask		VC_CMD(FLAGS, 13, 0)
+#define VCMD_set_umask		VC_CMD(FLAGS, 14, 0)
+
+struct	vcmd_umask {
+	uint64_t umask;
+	uint64_t mask;
+};
+
+#ifdef	__KERNEL__
+extern int vc_get_umask(struct vx_info *, void __user *);
+extern int vc_set_umask(struct vx_info *, void __user *);
+
+#endif	/* __KERNEL__ */
+
+
+/* OOM badness */
+
+#define VCMD_get_badness	VC_CMD(MEMCTRL, 5, 0)
+#define VCMD_set_badness	VC_CMD(MEMCTRL, 6, 0)
+
+struct	vcmd_badness_v0 {
+	int64_t bias;
+};
+
+#ifdef	__KERNEL__
+extern int vc_get_badness(struct vx_info *, void __user *);
+extern int vc_set_badness(struct vx_info *, void __user *);
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_CONTEXT_CMD_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/context.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/context.h
--- linux-2.6.32.17/include/linux/vserver/context.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/context.h	2010-03-13 21:50:00.000000000 +0100
@@ -0,0 +1,184 @@
+#ifndef _VX_CONTEXT_H
+#define _VX_CONTEXT_H
+
+#include <linux/types.h>
+#include <linux/capability.h>
+
+
+/* context flags */
+
+#define VXF_INFO_SCHED		0x00000002
+#define VXF_INFO_NPROC		0x00000004
+#define VXF_INFO_PRIVATE	0x00000008
+
+#define VXF_INFO_INIT		0x00000010
+#define VXF_INFO_HIDE		0x00000020
+#define VXF_INFO_ULIMIT		0x00000040
+#define VXF_INFO_NSPACE		0x00000080
+
+#define VXF_SCHED_HARD		0x00000100
+#define VXF_SCHED_PRIO		0x00000200
+#define VXF_SCHED_PAUSE		0x00000400
+
+#define VXF_VIRT_MEM		0x00010000
+#define VXF_VIRT_UPTIME		0x00020000
+#define VXF_VIRT_CPU		0x00040000
+#define VXF_VIRT_LOAD		0x00080000
+#define VXF_VIRT_TIME		0x00100000
+
+#define VXF_HIDE_MOUNT		0x01000000
+/* was	VXF_HIDE_NETIF		0x02000000 */
+#define VXF_HIDE_VINFO		0x04000000
+
+#define VXF_STATE_SETUP		(1ULL << 32)
+#define VXF_STATE_INIT		(1ULL << 33)
+#define VXF_STATE_ADMIN		(1ULL << 34)
+
+#define VXF_SC_HELPER		(1ULL << 36)
+#define VXF_REBOOT_KILL		(1ULL << 37)
+#define VXF_PERSISTENT		(1ULL << 38)
+
+#define VXF_FORK_RSS		(1ULL << 48)
+#define VXF_PROLIFIC		(1ULL << 49)
+
+#define VXF_IGNEG_NICE		(1ULL << 52)
+
+#define VXF_ONE_TIME		(0x0007ULL << 32)
+
+#define VXF_INIT_SET		(VXF_STATE_SETUP | VXF_STATE_INIT | VXF_STATE_ADMIN)
+
+
+/* context migration */
+
+#define VXM_SET_INIT		0x00000001
+#define VXM_SET_REAPER		0x00000002
+
+/* context caps */
+
+#define VXC_CAP_MASK		0x00000000
+
+#define VXC_SET_UTSNAME		0x00000001
+#define VXC_SET_RLIMIT		0x00000002
+#define VXC_FS_SECURITY		0x00000004
+#define VXC_FS_TRUSTED		0x00000008
+#define VXC_TIOCSTI		0x00000010
+
+/* was	VXC_RAW_ICMP		0x00000100 */
+#define VXC_SYSLOG		0x00001000
+#define VXC_OOM_ADJUST		0x00002000
+#define VXC_AUDIT_CONTROL	0x00004000
+
+#define VXC_SECURE_MOUNT	0x00010000
+#define VXC_SECURE_REMOUNT	0x00020000
+#define VXC_BINARY_MOUNT	0x00040000
+
+#define VXC_QUOTA_CTL		0x00100000
+#define VXC_ADMIN_MAPPER	0x00200000
+#define VXC_ADMIN_CLOOP		0x00400000
+
+#define VXC_KTHREAD		0x01000000
+#define VXC_NAMESPACE		0x02000000
+
+
+#ifdef	__KERNEL__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+
+#include "limit_def.h"
+#include "sched_def.h"
+#include "cvirt_def.h"
+#include "cacct_def.h"
+#include "device_def.h"
+
+#define VX_SPACES	2
+
+struct _vx_info_pc {
+	struct _vx_sched_pc sched_pc;
+	struct _vx_cvirt_pc cvirt_pc;
+};
+
+struct vx_info {
+	struct hlist_node vx_hlist;		/* linked list of contexts */
+	xid_t vx_id;				/* context id */
+	atomic_t vx_usecnt;			/* usage count */
+	atomic_t vx_tasks;			/* tasks count */
+	struct vx_info *vx_parent;		/* parent context */
+	int vx_state;				/* context state */
+
+	unsigned long vx_nsmask[VX_SPACES];	/* assignment mask */
+	struct nsproxy *vx_nsproxy[VX_SPACES];	/* private namespaces */
+	struct fs_struct *vx_fs[VX_SPACES];	/* private namespace fs */
+
+	uint64_t vx_flags;			/* context flags */
+	uint64_t vx_ccaps;			/* context caps (vserver) */
+	kernel_cap_t vx_bcaps;			/* bounding caps (system) */
+	unsigned long vx_umask;			/* unshare mask (guest) */
+
+	struct task_struct *vx_reaper;		/* guest reaper process */
+	pid_t vx_initpid;			/* PID of guest init */
+	int64_t vx_badness_bias;		/* OOM points bias */
+
+	struct _vx_limit limit;			/* vserver limits */
+	struct _vx_sched sched;			/* vserver scheduler */
+	struct _vx_cvirt cvirt;			/* virtual/bias stuff */
+	struct _vx_cacct cacct;			/* context accounting */
+
+	struct _vx_device dmap;			/* default device map targets */
+
+#ifndef CONFIG_SMP
+	struct _vx_info_pc info_pc;		/* per cpu data */
+#else
+	struct _vx_info_pc *ptr_pc;		/* per cpu array */
+#endif
+
+	wait_queue_head_t vx_wait;		/* context exit waitqueue */
+	int reboot_cmd;				/* last sys_reboot() cmd */
+	int exit_code;				/* last process exit code */
+
+	char vx_name[65];			/* vserver name */
+};
+
+#ifndef CONFIG_SMP
+#define	vx_ptr_pc(vxi)		(&(vxi)->info_pc)
+#define	vx_per_cpu(vxi, v, id)	vx_ptr_pc(vxi)->v
+#else
+#define	vx_ptr_pc(vxi)		((vxi)->ptr_pc)
+#define	vx_per_cpu(vxi, v, id)	per_cpu_ptr(vx_ptr_pc(vxi), id)->v
+#endif
+
+#define	vx_cpu(vxi, v)		vx_per_cpu(vxi, v, smp_processor_id())
+
+
+struct vx_info_save {
+	struct vx_info *vxi;
+	xid_t xid;
+};
+
+
+/* status flags */
+
+#define VXS_HASHED	0x0001
+#define VXS_PAUSED	0x0010
+#define VXS_SHUTDOWN	0x0100
+#define VXS_HELPER	0x1000
+#define VXS_RELEASED	0x8000
+
+
+extern void claim_vx_info(struct vx_info *, struct task_struct *);
+extern void release_vx_info(struct vx_info *, struct task_struct *);
+
+extern struct vx_info *lookup_vx_info(int);
+extern struct vx_info *lookup_or_create_vx_info(int);
+
+extern int get_xid_list(int, unsigned int *, int);
+extern int xid_is_hashed(xid_t);
+
+extern int vx_migrate_task(struct task_struct *, struct vx_info *, int);
+
+extern long vs_state_change(struct vx_info *, unsigned int);
+
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_CONTEXT_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/cvirt_cmd.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/cvirt_cmd.h
--- linux-2.6.32.17/include/linux/vserver/cvirt_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/cvirt_cmd.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,53 @@
+#ifndef _VX_CVIRT_CMD_H
+#define _VX_CVIRT_CMD_H
+
+
+/* virtual host info name commands */
+
+#define VCMD_set_vhi_name	VC_CMD(VHOST, 1, 0)
+#define VCMD_get_vhi_name	VC_CMD(VHOST, 2, 0)
+
+struct	vcmd_vhi_name_v0 {
+	uint32_t field;
+	char name[65];
+};
+
+
+enum vhi_name_field {
+	VHIN_CONTEXT = 0,
+	VHIN_SYSNAME,
+	VHIN_NODENAME,
+	VHIN_RELEASE,
+	VHIN_VERSION,
+	VHIN_MACHINE,
+	VHIN_DOMAINNAME,
+};
+
+
+#ifdef	__KERNEL__
+
+#include <linux/compiler.h>
+
+extern int vc_set_vhi_name(struct vx_info *, void __user *);
+extern int vc_get_vhi_name(struct vx_info *, void __user *);
+
+#endif	/* __KERNEL__ */
+
+#define VCMD_virt_stat		VC_CMD(VSTAT, 3, 0)
+
+struct	vcmd_virt_stat_v0 {
+	uint64_t offset;
+	uint64_t uptime;
+	uint32_t nr_threads;
+	uint32_t nr_running;
+	uint32_t nr_uninterruptible;
+	uint32_t nr_onhold;
+	uint32_t nr_forks;
+	uint32_t load[3];
+};
+
+#ifdef	__KERNEL__
+extern int vc_virt_stat(struct vx_info *, void __user *);
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_CVIRT_CMD_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/cvirt_def.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/cvirt_def.h
--- linux-2.6.32.17/include/linux/vserver/cvirt_def.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/cvirt_def.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,80 @@
+#ifndef _VX_CVIRT_DEF_H
+#define _VX_CVIRT_DEF_H
+
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <asm/atomic.h>
+
+
+struct _vx_usage_stat {
+	uint64_t user;
+	uint64_t nice;
+	uint64_t system;
+	uint64_t softirq;
+	uint64_t irq;
+	uint64_t idle;
+	uint64_t iowait;
+};
+
+struct _vx_syslog {
+	wait_queue_head_t log_wait;
+	spinlock_t logbuf_lock;		/* lock for the log buffer */
+
+	unsigned long log_start;	/* next char to be read by syslog() */
+	unsigned long con_start;	/* next char to be sent to consoles */
+	unsigned long log_end;	/* most-recently-written-char + 1 */
+	unsigned long logged_chars;	/* #chars since last read+clear operation */
+
+	char log_buf[1024];
+};
+
+
+/* context sub struct */
+
+struct _vx_cvirt {
+	atomic_t nr_threads;		/* number of current threads */
+	atomic_t nr_running;		/* number of running threads */
+	atomic_t nr_uninterruptible;	/* number of uninterruptible threads */
+
+	atomic_t nr_onhold;		/* processes on hold */
+	uint32_t onhold_last;		/* jiffies when put on hold */
+
+	struct timeval bias_tv;		/* time offset to the host */
+	struct timespec bias_idle;
+	struct timespec bias_uptime;	/* context creation point */
+	uint64_t bias_clock;		/* offset in clock_t */
+
+	spinlock_t load_lock;		/* lock for the load averages */
+	atomic_t load_updates;		/* nr of load updates done so far */
+	uint32_t load_last;		/* last time load was calculated */
+	uint32_t load[3];		/* load averages 1,5,15 */
+
+	atomic_t total_forks;		/* number of forks so far */
+
+	struct _vx_syslog syslog;
+};
+
+struct _vx_cvirt_pc {
+	struct _vx_usage_stat cpustat;
+};
+
+
+#ifdef CONFIG_VSERVER_DEBUG
+
+static inline void __dump_vx_cvirt(struct _vx_cvirt *cvirt)
+{
+	printk("\t_vx_cvirt:\n");
+	printk("\t threads: %4d, %4d, %4d, %4d\n",
+		atomic_read(&cvirt->nr_threads),
+		atomic_read(&cvirt->nr_running),
+		atomic_read(&cvirt->nr_uninterruptible),
+		atomic_read(&cvirt->nr_onhold));
+	/* add rest here */
+	printk("\t total_forks = %d\n", atomic_read(&cvirt->total_forks));
+}
+
+#endif
+
+#endif	/* _VX_CVIRT_DEF_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/cvirt.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/cvirt.h
--- linux-2.6.32.17/include/linux/vserver/cvirt.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/cvirt.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,20 @@
+#ifndef _VX_CVIRT_H
+#define _VX_CVIRT_H
+
+
+#ifdef	__KERNEL__
+
+struct timespec;
+
+void vx_vsi_uptime(struct timespec *, struct timespec *);
+
+
+struct vx_info;
+
+void vx_update_load(struct vx_info *);
+
+
+int vx_do_syslog(int, char __user *, int);
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_CVIRT_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/debug_cmd.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/debug_cmd.h
--- linux-2.6.32.17/include/linux/vserver/debug_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/debug_cmd.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,58 @@
+#ifndef _VX_DEBUG_CMD_H
+#define _VX_DEBUG_CMD_H
+
+
+/* debug commands */
+
+#define VCMD_dump_history	VC_CMD(DEBUG, 1, 0)
+
+#define VCMD_read_history	VC_CMD(DEBUG, 5, 0)
+#define VCMD_read_monitor	VC_CMD(DEBUG, 6, 0)
+
+struct  vcmd_read_history_v0 {
+	uint32_t index;
+	uint32_t count;
+	char __user *data;
+};
+
+struct  vcmd_read_monitor_v0 {
+	uint32_t index;
+	uint32_t count;
+	char __user *data;
+};
+
+
+#ifdef	__KERNEL__
+
+#ifdef	CONFIG_COMPAT
+
+#include <asm/compat.h>
+
+struct	vcmd_read_history_v0_x32 {
+	uint32_t index;
+	uint32_t count;
+	compat_uptr_t data_ptr;
+};
+
+struct	vcmd_read_monitor_v0_x32 {
+	uint32_t index;
+	uint32_t count;
+	compat_uptr_t data_ptr;
+};
+
+#endif  /* CONFIG_COMPAT */
+
+extern int vc_dump_history(uint32_t);
+
+extern int vc_read_history(uint32_t, void __user *);
+extern int vc_read_monitor(uint32_t, void __user *);
+
+#ifdef	CONFIG_COMPAT
+
+extern int vc_read_history_x32(uint32_t, void __user *);
+extern int vc_read_monitor_x32(uint32_t, void __user *);
+
+#endif  /* CONFIG_COMPAT */
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_DEBUG_CMD_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/debug.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/debug.h
--- linux-2.6.32.17/include/linux/vserver/debug.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/debug.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,127 @@
+#ifndef _VX_DEBUG_H
+#define _VX_DEBUG_H
+
+
+#define VXD_CBIT(n, m)	(vx_debug_ ## n & (1 << (m)))
+#define VXD_CMIN(n, m)	(vx_debug_ ## n > (m))
+#define VXD_MASK(n, m)	(vx_debug_ ## n & (m))
+
+#define VXD_DEV(d)	(d), (d)->bd_inode->i_ino,		\
+			imajor((d)->bd_inode), iminor((d)->bd_inode)
+#define VXF_DEV		"%p[%lu,%d:%d]"
+
+
+#define vxd_path(p)						\
+	({ static char _buffer[PATH_MAX];			\
+	   d_path(p, _buffer, sizeof(_buffer)); })
+
+#define vxd_cond_path(n)					\
+	((n) ? vxd_path(&(n)->path) : "<null>" )
+
+
+#ifdef	CONFIG_VSERVER_DEBUG
+
+extern unsigned int vx_debug_switch;
+extern unsigned int vx_debug_xid;
+extern unsigned int vx_debug_nid;
+extern unsigned int vx_debug_tag;
+extern unsigned int vx_debug_net;
+extern unsigned int vx_debug_limit;
+extern unsigned int vx_debug_cres;
+extern unsigned int vx_debug_dlim;
+extern unsigned int vx_debug_quota;
+extern unsigned int vx_debug_cvirt;
+extern unsigned int vx_debug_space;
+extern unsigned int vx_debug_misc;
+
+
+#define VX_LOGLEVEL	"vxD: "
+#define VX_PROC_FMT	"%p: "
+#define VX_PROCESS	current
+
+#define vxdprintk(c, f, x...)					\
+	do {							\
+		if (c)						\
+			printk(VX_LOGLEVEL VX_PROC_FMT f "\n",	\
+				VX_PROCESS , ##x);		\
+	} while (0)
+
+#define vxlprintk(c, f, x...)					\
+	do {							\
+		if (c)						\
+			printk(VX_LOGLEVEL f " @%s:%d\n", x);	\
+	} while (0)
+
+#define vxfprintk(c, f, x...)					\
+	do {							\
+		if (c)						\
+			printk(VX_LOGLEVEL f " %s@%s:%d\n", x); \
+	} while (0)
+
+
+struct vx_info;
+
+void dump_vx_info(struct vx_info *, int);
+void dump_vx_info_inactive(int);
+
+#else	/* CONFIG_VSERVER_DEBUG */
+
+#define vx_debug_switch 0
+#define vx_debug_xid	0
+#define vx_debug_nid	0
+#define vx_debug_tag	0
+#define vx_debug_net	0
+#define vx_debug_limit	0
+#define vx_debug_cres	0
+#define vx_debug_dlim	0
+#define vx_debug_cvirt	0
+
+#define vxdprintk(x...) do { } while (0)
+#define vxlprintk(x...) do { } while (0)
+#define vxfprintk(x...) do { } while (0)
+
+#endif	/* CONFIG_VSERVER_DEBUG */
+
+
+#ifdef	CONFIG_VSERVER_WARN
+
+#define VX_WARNLEVEL	KERN_WARNING "vxW: "
+#define VX_WARN_TASK	"[»%s«,%u:#%u|%u|%u] "
+#define VX_WARN_XID	"[xid #%u] "
+#define VX_WARN_NID	"[nid #%u] "
+#define VX_WARN_TAG	"[tag #%u] "
+
+#define vxwprintk(c, f, x...)					\
+	do {							\
+		if (c)						\
+			printk(VX_WARNLEVEL f "\n", ##x);	\
+	} while (0)
+
+#else	/* CONFIG_VSERVER_WARN */
+
+#define vxwprintk(x...) do { } while (0)
+
+#endif	/* CONFIG_VSERVER_WARN */
+
+#define vxwprintk_task(c, f, x...)				\
+	vxwprintk(c, VX_WARN_TASK f,				\
+		current->comm, current->pid,			\
+		current->xid, current->nid, current->tag, ##x)
+#define vxwprintk_xid(c, f, x...)				\
+	vxwprintk(c, VX_WARN_XID f, current->xid, x)
+#define vxwprintk_nid(c, f, x...)				\
+	vxwprintk(c, VX_WARN_NID f, current->nid, x)
+#define vxwprintk_tag(c, f, x...)				\
+	vxwprintk(c, VX_WARN_TAG f, current->tag, x)
+
+#ifdef	CONFIG_VSERVER_DEBUG
+#define vxd_assert_lock(l)	assert_spin_locked(l)
+#define vxd_assert(c, f, x...)	vxlprintk(!(c), \
+	"assertion [" f "] failed.", ##x, __FILE__, __LINE__)
+#else
+#define vxd_assert_lock(l)	do { } while (0)
+#define vxd_assert(c, f, x...)	do { } while (0)
+#endif
+
+
+#endif /* _VX_DEBUG_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/device_cmd.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/device_cmd.h
--- linux-2.6.32.17/include/linux/vserver/device_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/device_cmd.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,44 @@
+#ifndef _VX_DEVICE_CMD_H
+#define _VX_DEVICE_CMD_H
+
+
+/*  device vserver commands */
+
+#define VCMD_set_mapping	VC_CMD(DEVICE, 1, 0)
+#define VCMD_unset_mapping	VC_CMD(DEVICE, 2, 0)
+
+struct	vcmd_set_mapping_v0 {
+	const char __user *device;
+	const char __user *target;
+	uint32_t flags;
+};
+
+
+#ifdef	__KERNEL__
+
+#ifdef	CONFIG_COMPAT
+
+#include <asm/compat.h>
+
+struct	vcmd_set_mapping_v0_x32 {
+	compat_uptr_t device_ptr;
+	compat_uptr_t target_ptr;
+	uint32_t flags;
+};
+
+#endif	/* CONFIG_COMPAT */
+
+#include <linux/compiler.h>
+
+extern int vc_set_mapping(struct vx_info *, void __user *);
+extern int vc_unset_mapping(struct vx_info *, void __user *);
+
+#ifdef	CONFIG_COMPAT
+
+extern int vc_set_mapping_x32(struct vx_info *, void __user *);
+extern int vc_unset_mapping_x32(struct vx_info *, void __user *);
+
+#endif	/* CONFIG_COMPAT */
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_DEVICE_CMD_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/device_def.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/device_def.h
--- linux-2.6.32.17/include/linux/vserver/device_def.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/device_def.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,17 @@
+#ifndef _VX_DEVICE_DEF_H
+#define _VX_DEVICE_DEF_H
+
+#include <linux/types.h>
+
+struct vx_dmap_target {
+	dev_t target;
+	uint32_t flags;
+};
+
+struct _vx_device {
+#ifdef CONFIG_VSERVER_DEVICE
+	struct vx_dmap_target targets[2];
+#endif
+};
+
+#endif	/* _VX_DEVICE_DEF_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/device.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/device.h
--- linux-2.6.32.17/include/linux/vserver/device.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/device.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,15 @@
+#ifndef _VX_DEVICE_H
+#define _VX_DEVICE_H
+
+
+#define DATTR_CREATE	0x00000001
+#define DATTR_OPEN	0x00000002
+
+#define DATTR_REMAP	0x00000010
+
+#define DATTR_MASK	0x00000013
+
+
+#else	/* _VX_DEVICE_H */
+#warning duplicate inclusion
+#endif	/* _VX_DEVICE_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/dlimit_cmd.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/dlimit_cmd.h
--- linux-2.6.32.17/include/linux/vserver/dlimit_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/dlimit_cmd.h	2009-12-10 16:43:43.000000000 +0100
@@ -0,0 +1,109 @@
+#ifndef _VX_DLIMIT_CMD_H
+#define _VX_DLIMIT_CMD_H
+
+
+/*  dlimit vserver commands */
+
+#define VCMD_add_dlimit		VC_CMD(DLIMIT, 1, 0)
+#define VCMD_rem_dlimit		VC_CMD(DLIMIT, 2, 0)
+
+#define VCMD_set_dlimit		VC_CMD(DLIMIT, 5, 0)
+#define VCMD_get_dlimit		VC_CMD(DLIMIT, 6, 0)
+
+struct	vcmd_ctx_dlimit_base_v0 {
+	const char __user *name;
+	uint32_t flags;
+};
+
+struct	vcmd_ctx_dlimit_v0 {
+	const char __user *name;
+	uint32_t space_used;			/* used space in kbytes */
+	uint32_t space_total;			/* maximum space in kbytes */
+	uint32_t inodes_used;			/* used inodes */
+	uint32_t inodes_total;			/* maximum inodes */
+	uint32_t reserved;			/* reserved for root in % */
+	uint32_t flags;
+};
+
+#define CDLIM_UNSET		((uint32_t)0UL)
+#define CDLIM_INFINITY		((uint32_t)~0UL)
+#define CDLIM_KEEP		((uint32_t)~1UL)
+
+#define DLIME_UNIT	0
+#define DLIME_KILO	1
+#define DLIME_MEGA	2
+#define DLIME_GIGA	3
+
+#define DLIMF_SHIFT	0x10
+
+#define DLIMS_USED	0
+#define DLIMS_TOTAL	2
+
+static inline
+uint64_t dlimit_space_32to64(uint32_t val, uint32_t flags, int shift)
+{
+	int exp = (flags & DLIMF_SHIFT) ?
+		(flags >> shift) & DLIME_GIGA : DLIME_KILO;
+	return ((uint64_t)val) << (10 * exp);
+}
+
+static inline
+uint32_t dlimit_space_64to32(uint64_t val, uint32_t *flags, int shift)
+{
+	int exp = 0;
+
+	if (*flags & DLIMF_SHIFT) {
+		while (val > (1LL << 32) && (exp < 3)) {
+			val >>= 10;
+			exp++;
+		}
+		*flags &= ~(DLIME_GIGA << shift);
+		*flags |= exp << shift;
+	} else
+		val >>= 10;
+	return val;
+}
+
+#ifdef	__KERNEL__
+
+#ifdef	CONFIG_COMPAT
+
+#include <asm/compat.h>
+
+struct	vcmd_ctx_dlimit_base_v0_x32 {
+	compat_uptr_t name_ptr;
+	uint32_t flags;
+};
+
+struct	vcmd_ctx_dlimit_v0_x32 {
+	compat_uptr_t name_ptr;
+	uint32_t space_used;			/* used space in kbytes */
+	uint32_t space_total;			/* maximum space in kbytes */
+	uint32_t inodes_used;			/* used inodes */
+	uint32_t inodes_total;			/* maximum inodes */
+	uint32_t reserved;			/* reserved for root in % */
+	uint32_t flags;
+};
+
+#endif	/* CONFIG_COMPAT */
+
+#include <linux/compiler.h>
+
+extern int vc_add_dlimit(uint32_t, void __user *);
+extern int vc_rem_dlimit(uint32_t, void __user *);
+
+extern int vc_set_dlimit(uint32_t, void __user *);
+extern int vc_get_dlimit(uint32_t, void __user *);
+
+#ifdef	CONFIG_COMPAT
+
+extern int vc_add_dlimit_x32(uint32_t, void __user *);
+extern int vc_rem_dlimit_x32(uint32_t, void __user *);
+
+extern int vc_set_dlimit_x32(uint32_t, void __user *);
+extern int vc_get_dlimit_x32(uint32_t, void __user *);
+
+#endif	/* CONFIG_COMPAT */
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_DLIMIT_CMD_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/dlimit.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/dlimit.h
--- linux-2.6.32.17/include/linux/vserver/dlimit.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/dlimit.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,54 @@
+#ifndef _VX_DLIMIT_H
+#define _VX_DLIMIT_H
+
+#include "switch.h"
+
+
+#ifdef	__KERNEL__
+
+/*      keep in sync with CDLIM_INFINITY	*/
+
+#define DLIM_INFINITY		(~0ULL)
+
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+
+struct super_block;
+
+struct dl_info {
+	struct hlist_node dl_hlist;		/* linked list of contexts */
+	struct rcu_head dl_rcu;			/* the rcu head */
+	tag_t dl_tag;				/* context tag */
+	atomic_t dl_usecnt;			/* usage count */
+	atomic_t dl_refcnt;			/* reference count */
+
+	struct super_block *dl_sb;		/* associated superblock */
+
+	spinlock_t dl_lock;			/* protect the values */
+
+	unsigned long long dl_space_used;	/* used space in bytes */
+	unsigned long long dl_space_total;	/* maximum space in bytes */
+	unsigned long dl_inodes_used;		/* used inodes */
+	unsigned long dl_inodes_total;		/* maximum inodes */
+
+	unsigned int dl_nrlmult;		/* non root limit mult */
+};
+
+struct rcu_head;
+
+extern void rcu_free_dl_info(struct rcu_head *);
+extern void unhash_dl_info(struct dl_info *);
+
+extern struct dl_info *locate_dl_info(struct super_block *, tag_t);
+
+
+struct kstatfs;
+
+extern void vx_vsi_statfs(struct super_block *, struct kstatfs *);
+
+typedef uint64_t dlsize_t;
+
+#endif	/* __KERNEL__ */
+#else	/* _VX_DLIMIT_H */
+#warning duplicate inclusion
+#endif	/* _VX_DLIMIT_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/global.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/global.h
--- linux-2.6.32.17/include/linux/vserver/global.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/global.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,19 @@
+#ifndef _VX_GLOBAL_H
+#define _VX_GLOBAL_H
+
+
+extern atomic_t vx_global_ctotal;
+extern atomic_t vx_global_cactive;
+
+extern atomic_t nx_global_ctotal;
+extern atomic_t nx_global_cactive;
+
+extern atomic_t vs_global_nsproxy;
+extern atomic_t vs_global_fs;
+extern atomic_t vs_global_mnt_ns;
+extern atomic_t vs_global_uts_ns;
+extern atomic_t vs_global_user_ns;
+extern atomic_t vs_global_pid_ns;
+
+
+#endif /* _VX_GLOBAL_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/history.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/history.h
--- linux-2.6.32.17/include/linux/vserver/history.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/history.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,197 @@
+#ifndef _VX_HISTORY_H
+#define _VX_HISTORY_H
+
+
+enum {
+	VXH_UNUSED = 0,
+	VXH_THROW_OOPS = 1,
+
+	VXH_GET_VX_INFO,
+	VXH_PUT_VX_INFO,
+	VXH_INIT_VX_INFO,
+	VXH_SET_VX_INFO,
+	VXH_CLR_VX_INFO,
+	VXH_CLAIM_VX_INFO,
+	VXH_RELEASE_VX_INFO,
+	VXH_ALLOC_VX_INFO,
+	VXH_DEALLOC_VX_INFO,
+	VXH_HASH_VX_INFO,
+	VXH_UNHASH_VX_INFO,
+	VXH_LOC_VX_INFO,
+	VXH_LOOKUP_VX_INFO,
+	VXH_CREATE_VX_INFO,
+};
+
+struct _vxhe_vxi {
+	struct vx_info *ptr;
+	unsigned xid;
+	unsigned usecnt;
+	unsigned tasks;
+};
+
+struct _vxhe_set_clr {
+	void *data;
+};
+
+struct _vxhe_loc_lookup {
+	unsigned arg;
+};
+
+struct _vx_hist_entry {
+	void *loc;
+	unsigned short seq;
+	unsigned short type;
+	struct _vxhe_vxi vxi;
+	union {
+		struct _vxhe_set_clr sc;
+		struct _vxhe_loc_lookup ll;
+	};
+};
+
+#ifdef	CONFIG_VSERVER_HISTORY
+
+extern unsigned volatile int vxh_active;
+
+struct _vx_hist_entry *vxh_advance(void *loc);
+
+
+static inline
+void	__vxh_copy_vxi(struct _vx_hist_entry *entry, struct vx_info *vxi)
+{
+	entry->vxi.ptr = vxi;
+	if (vxi) {
+		entry->vxi.usecnt = atomic_read(&vxi->vx_usecnt);
+		entry->vxi.tasks = atomic_read(&vxi->vx_tasks);
+		entry->vxi.xid = vxi->vx_id;
+	}
+}
+
+
+#define	__HERE__ current_text_addr()
+
+#define __VXH_BODY(__type, __data, __here)	\
+	struct _vx_hist_entry *entry;		\
+						\
+	preempt_disable();			\
+	entry = vxh_advance(__here);		\
+	__data;					\
+	entry->type = __type;			\
+	preempt_enable();
+
+
+	/* pass vxi only */
+
+#define __VXH_SMPL				\
+	__vxh_copy_vxi(entry, vxi)
+
+static inline
+void	__vxh_smpl(struct vx_info *vxi, int __type, void *__here)
+{
+	__VXH_BODY(__type, __VXH_SMPL, __here)
+}
+
+	/* pass vxi and data (void *) */
+
+#define __VXH_DATA				\
+	__vxh_copy_vxi(entry, vxi);		\
+	entry->sc.data = data
+
+static inline
+void	__vxh_data(struct vx_info *vxi, void *data,
+			int __type, void *__here)
+{
+	__VXH_BODY(__type, __VXH_DATA, __here)
+}
+
+	/* pass vxi and arg (long) */
+
+#define __VXH_LONG				\
+	__vxh_copy_vxi(entry, vxi);		\
+	entry->ll.arg = arg
+
+static inline
+void	__vxh_long(struct vx_info *vxi, long arg,
+			int __type, void *__here)
+{
+	__VXH_BODY(__type, __VXH_LONG, __here)
+}
+
+
+static inline
+void	__vxh_throw_oops(void *__here)
+{
+	__VXH_BODY(VXH_THROW_OOPS, {}, __here);
+	/* prevent further acquisition */
+	vxh_active = 0;
+}
+
+
+#define vxh_throw_oops()	__vxh_throw_oops(__HERE__);
+
+#define __vxh_get_vx_info(v, h)	__vxh_smpl(v, VXH_GET_VX_INFO, h);
+#define __vxh_put_vx_info(v, h)	__vxh_smpl(v, VXH_PUT_VX_INFO, h);
+
+#define __vxh_init_vx_info(v, d, h) \
+	__vxh_data(v, d, VXH_INIT_VX_INFO, h);
+#define __vxh_set_vx_info(v, d, h) \
+	__vxh_data(v, d, VXH_SET_VX_INFO, h);
+#define __vxh_clr_vx_info(v, d, h) \
+	__vxh_data(v, d, VXH_CLR_VX_INFO, h);
+
+#define __vxh_claim_vx_info(v, d, h) \
+	__vxh_data(v, d, VXH_CLAIM_VX_INFO, h);
+#define __vxh_release_vx_info(v, d, h) \
+	__vxh_data(v, d, VXH_RELEASE_VX_INFO, h);
+
+#define vxh_alloc_vx_info(v) \
+	__vxh_smpl(v, VXH_ALLOC_VX_INFO, __HERE__);
+#define vxh_dealloc_vx_info(v) \
+	__vxh_smpl(v, VXH_DEALLOC_VX_INFO, __HERE__);
+
+#define vxh_hash_vx_info(v) \
+	__vxh_smpl(v, VXH_HASH_VX_INFO, __HERE__);
+#define vxh_unhash_vx_info(v) \
+	__vxh_smpl(v, VXH_UNHASH_VX_INFO, __HERE__);
+
+#define vxh_loc_vx_info(v, l) \
+	__vxh_long(v, l, VXH_LOC_VX_INFO, __HERE__);
+#define vxh_lookup_vx_info(v, l) \
+	__vxh_long(v, l, VXH_LOOKUP_VX_INFO, __HERE__);
+#define vxh_create_vx_info(v, l) \
+	__vxh_long(v, l, VXH_CREATE_VX_INFO, __HERE__);
+
+extern void vxh_dump_history(void);
+
+
+#else  /* CONFIG_VSERVER_HISTORY */
+
+#define	__HERE__	0
+
+#define vxh_throw_oops()		do { } while (0)
+
+#define __vxh_get_vx_info(v, h)		do { } while (0)
+#define __vxh_put_vx_info(v, h)		do { } while (0)
+
+#define __vxh_init_vx_info(v, d, h)	do { } while (0)
+#define __vxh_set_vx_info(v, d, h)	do { } while (0)
+#define __vxh_clr_vx_info(v, d, h)	do { } while (0)
+
+#define __vxh_claim_vx_info(v, d, h)	do { } while (0)
+#define __vxh_release_vx_info(v, d, h)	do { } while (0)
+
+#define vxh_alloc_vx_info(v)		do { } while (0)
+#define vxh_dealloc_vx_info(v)		do { } while (0)
+
+#define vxh_hash_vx_info(v)		do { } while (0)
+#define vxh_unhash_vx_info(v)		do { } while (0)
+
+#define vxh_loc_vx_info(v, l)		do { } while (0)
+#define vxh_lookup_vx_info(v, l)	do { } while (0)
+#define vxh_create_vx_info(v, l)	do { } while (0)
+
+#define vxh_dump_history()		do { } while (0)
+
+
+#endif /* CONFIG_VSERVER_HISTORY */
+
+#endif /* _VX_HISTORY_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/inode_cmd.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/inode_cmd.h
--- linux-2.6.32.17/include/linux/vserver/inode_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/inode_cmd.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,59 @@
+#ifndef _VX_INODE_CMD_H
+#define _VX_INODE_CMD_H
+
+
+/*  inode vserver commands */
+
+#define VCMD_get_iattr		VC_CMD(INODE, 1, 1)
+#define VCMD_set_iattr		VC_CMD(INODE, 2, 1)
+
+#define VCMD_fget_iattr		VC_CMD(INODE, 3, 0)
+#define VCMD_fset_iattr		VC_CMD(INODE, 4, 0)
+
+struct	vcmd_ctx_iattr_v1 {
+	const char __user *name;
+	uint32_t tag;
+	uint32_t flags;
+	uint32_t mask;
+};
+
+struct	vcmd_ctx_fiattr_v0 {
+	uint32_t tag;
+	uint32_t flags;
+	uint32_t mask;
+};
+
+
+#ifdef	__KERNEL__
+
+
+#ifdef	CONFIG_COMPAT
+
+#include <asm/compat.h>
+
+struct	vcmd_ctx_iattr_v1_x32 {
+	compat_uptr_t name_ptr;
+	uint32_t tag;
+	uint32_t flags;
+	uint32_t mask;
+};
+
+#endif	/* CONFIG_COMPAT */
+
+#include <linux/compiler.h>
+
+extern int vc_get_iattr(void __user *);
+extern int vc_set_iattr(void __user *);
+
+extern int vc_fget_iattr(uint32_t, void __user *);
+extern int vc_fset_iattr(uint32_t, void __user *);
+
+#ifdef	CONFIG_COMPAT
+
+extern int vc_get_iattr_x32(void __user *);
+extern int vc_set_iattr_x32(void __user *);
+
+#endif	/* CONFIG_COMPAT */
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_INODE_CMD_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/inode.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/inode.h
--- linux-2.6.32.17/include/linux/vserver/inode.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/inode.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,39 @@
+#ifndef _VX_INODE_H
+#define _VX_INODE_H
+
+
+#define IATTR_TAG	0x01000000
+
+#define IATTR_ADMIN	0x00000001
+#define IATTR_WATCH	0x00000002
+#define IATTR_HIDE	0x00000004
+#define IATTR_FLAGS	0x00000007
+
+#define IATTR_BARRIER	0x00010000
+#define IATTR_IXUNLINK	0x00020000
+#define IATTR_IMMUTABLE 0x00040000
+#define IATTR_COW	0x00080000
+
+#ifdef	__KERNEL__
+
+
+#ifdef	CONFIG_VSERVER_PROC_SECURE
+#define IATTR_PROC_DEFAULT	( IATTR_ADMIN | IATTR_HIDE )
+#define IATTR_PROC_SYMLINK	( IATTR_ADMIN )
+#else
+#define IATTR_PROC_DEFAULT	( IATTR_ADMIN )
+#define IATTR_PROC_SYMLINK	( IATTR_ADMIN )
+#endif
+
+#define vx_hide_check(c, m)	(((m) & IATTR_HIDE) ? vx_check(c, m) : 1)
+
+#endif	/* __KERNEL__ */
+
+/* inode ioctls */
+
+#define FIOC_GETXFLG	_IOR('x', 5, long)
+#define FIOC_SETXFLG	_IOW('x', 6, long)
+
+#else	/* _VX_INODE_H */
+#warning duplicate inclusion
+#endif	/* _VX_INODE_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/Kbuild linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/Kbuild
--- linux-2.6.32.17/include/linux/vserver/Kbuild	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/Kbuild	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,8 @@
+
+unifdef-y += context_cmd.h network_cmd.h space_cmd.h \
+	cacct_cmd.h cvirt_cmd.h limit_cmd.h dlimit_cmd.h \
+	inode_cmd.h tag_cmd.h sched_cmd.h signal_cmd.h \
+	debug_cmd.h device_cmd.h
+
+unifdef-y += switch.h network.h monitor.h inode.h device.h
+
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/limit_cmd.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/limit_cmd.h
--- linux-2.6.32.17/include/linux/vserver/limit_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/limit_cmd.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,71 @@
+#ifndef _VX_LIMIT_CMD_H
+#define _VX_LIMIT_CMD_H
+
+
+/*  rlimit vserver commands */
+
+#define VCMD_get_rlimit		VC_CMD(RLIMIT, 1, 0)
+#define VCMD_set_rlimit		VC_CMD(RLIMIT, 2, 0)
+#define VCMD_get_rlimit_mask	VC_CMD(RLIMIT, 3, 0)
+#define VCMD_reset_hits		VC_CMD(RLIMIT, 7, 0)
+#define VCMD_reset_minmax	VC_CMD(RLIMIT, 9, 0)
+
+struct	vcmd_ctx_rlimit_v0 {
+	uint32_t id;
+	uint64_t minimum;
+	uint64_t softlimit;
+	uint64_t maximum;
+};
+
+struct	vcmd_ctx_rlimit_mask_v0 {
+	uint32_t minimum;
+	uint32_t softlimit;
+	uint32_t maximum;
+};
+
+#define VCMD_rlimit_stat	VC_CMD(VSTAT, 1, 0)
+
+struct	vcmd_rlimit_stat_v0 {
+	uint32_t id;
+	uint32_t hits;
+	uint64_t value;
+	uint64_t minimum;
+	uint64_t maximum;
+};
+
+#define CRLIM_UNSET		(0ULL)
+#define CRLIM_INFINITY		(~0ULL)
+#define CRLIM_KEEP		(~1ULL)
+
+#ifdef	__KERNEL__
+
+#ifdef	CONFIG_IA32_EMULATION
+
+struct	vcmd_ctx_rlimit_v0_x32 {
+	uint32_t id;
+	uint64_t minimum;
+	uint64_t softlimit;
+	uint64_t maximum;
+} __attribute__ ((packed));
+
+#endif	/* CONFIG_IA32_EMULATION */
+
+#include <linux/compiler.h>
+
+extern int vc_get_rlimit_mask(uint32_t, void __user *);
+extern int vc_get_rlimit(struct vx_info *, void __user *);
+extern int vc_set_rlimit(struct vx_info *, void __user *);
+extern int vc_reset_hits(struct vx_info *, void __user *);
+extern int vc_reset_minmax(struct vx_info *, void __user *);
+
+extern int vc_rlimit_stat(struct vx_info *, void __user *);
+
+#ifdef	CONFIG_IA32_EMULATION
+
+extern int vc_get_rlimit_x32(struct vx_info *, void __user *);
+extern int vc_set_rlimit_x32(struct vx_info *, void __user *);
+
+#endif	/* CONFIG_IA32_EMULATION */
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_LIMIT_CMD_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/limit_def.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/limit_def.h
--- linux-2.6.32.17/include/linux/vserver/limit_def.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/limit_def.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,47 @@
+#ifndef _VX_LIMIT_DEF_H
+#define _VX_LIMIT_DEF_H
+
+#include <asm/atomic.h>
+#include <asm/resource.h>
+
+#include "limit.h"
+
+
+struct _vx_res_limit {
+	rlim_t soft;		/* Context soft limit */
+	rlim_t hard;		/* Context hard limit */
+
+	rlim_atomic_t rcur;	/* Current value */
+	rlim_t rmin;		/* Context minimum */
+	rlim_t rmax;		/* Context maximum */
+
+	atomic_t lhit;		/* Limit hits */
+};
+
+/* context sub struct */
+
+struct _vx_limit {
+	struct _vx_res_limit res[NUM_LIMITS];
+};
+
+#ifdef CONFIG_VSERVER_DEBUG
+
+static inline void __dump_vx_limit(struct _vx_limit *limit)
+{
+	int i;
+
+	printk("\t_vx_limit:");
+	for (i = 0; i < NUM_LIMITS; i++) {
+		printk("\t [%2d] = %8lu %8lu/%8lu, %8ld/%8ld, %8d\n",
+			i, (unsigned long)__rlim_get(limit, i),
+			(unsigned long)__rlim_rmin(limit, i),
+			(unsigned long)__rlim_rmax(limit, i),
+			(long)__rlim_soft(limit, i),
+			(long)__rlim_hard(limit, i),
+			atomic_read(&__rlim_lhit(limit, i)));
+	}
+}
+
+#endif
+
+#endif	/* _VX_LIMIT_DEF_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/limit.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/limit.h
--- linux-2.6.32.17/include/linux/vserver/limit.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/limit.h	2010-02-05 00:34:02.000000000 +0100
@@ -0,0 +1,71 @@
+#ifndef _VX_LIMIT_H
+#define _VX_LIMIT_H
+
+#define VLIMIT_NSOCK	16
+#define VLIMIT_OPENFD	17
+#define VLIMIT_ANON	18
+#define VLIMIT_SHMEM	19
+#define VLIMIT_SEMARY	20
+#define VLIMIT_NSEMS	21
+#define VLIMIT_DENTRY	22
+#define VLIMIT_MAPPED	23
+
+
+#ifdef	__KERNEL__
+
+#define	VLIM_NOCHECK	((1L << VLIMIT_DENTRY) | (1L << RLIMIT_RSS))
+
+/*	keep in sync with CRLIM_INFINITY */
+
+#define	VLIM_INFINITY	(~0ULL)
+
+#include <asm/atomic.h>
+#include <asm/resource.h>
+
+#ifndef RLIM_INFINITY
+#warning RLIM_INFINITY is undefined
+#endif
+
+#define __rlim_val(l, r, v)	((l)->res[r].v)
+
+#define __rlim_soft(l, r)	__rlim_val(l, r, soft)
+#define __rlim_hard(l, r)	__rlim_val(l, r, hard)
+
+#define __rlim_rcur(l, r)	__rlim_val(l, r, rcur)
+#define __rlim_rmin(l, r)	__rlim_val(l, r, rmin)
+#define __rlim_rmax(l, r)	__rlim_val(l, r, rmax)
+
+#define __rlim_lhit(l, r)	__rlim_val(l, r, lhit)
+#define __rlim_hit(l, r)	atomic_inc(&__rlim_lhit(l, r))
+
+typedef atomic_long_t rlim_atomic_t;
+typedef unsigned long rlim_t;
+
+#define __rlim_get(l, r)	atomic_long_read(&__rlim_rcur(l, r))
+#define __rlim_set(l, r, v)	atomic_long_set(&__rlim_rcur(l, r), v)
+#define __rlim_inc(l, r)	atomic_long_inc(&__rlim_rcur(l, r))
+#define __rlim_dec(l, r)	atomic_long_dec(&__rlim_rcur(l, r))
+#define __rlim_add(l, r, v)	atomic_long_add(v, &__rlim_rcur(l, r))
+#define __rlim_sub(l, r, v)	atomic_long_sub(v, &__rlim_rcur(l, r))
+
+
+#if	(RLIM_INFINITY == VLIM_INFINITY)
+#define	VX_VLIM(r) ((long long)(long)(r))
+#define	VX_RLIM(v) ((rlim_t)(v))
+#else
+#define	VX_VLIM(r) (((r) == RLIM_INFINITY) \
+		? VLIM_INFINITY : (long long)(r))
+#define	VX_RLIM(v) (((v) == VLIM_INFINITY) \
+		? RLIM_INFINITY : (rlim_t)(v))
+#endif
+
+struct sysinfo;
+
+void vx_vsi_meminfo(struct sysinfo *);
+void vx_vsi_swapinfo(struct sysinfo *);
+long vx_vsi_cached(struct sysinfo *);
+
+#define NUM_LIMITS	24
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_LIMIT_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/limit_int.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/limit_int.h
--- linux-2.6.32.17/include/linux/vserver/limit_int.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/limit_int.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,198 @@
+#ifndef _VX_LIMIT_INT_H
+#define _VX_LIMIT_INT_H
+
+#include "context.h"
+
+#ifdef	__KERNEL__
+
+#define VXD_RCRES_COND(r)	VXD_CBIT(cres, r)
+#define VXD_RLIMIT_COND(r)	VXD_CBIT(limit, r)
+
+extern const char *vlimit_name[NUM_LIMITS];
+
+static inline void __vx_acc_cres(struct vx_info *vxi,
+	int res, int dir, void *_data, char *_file, int _line)
+{
+	if (VXD_RCRES_COND(res))
+		vxlprintk(1, "vx_acc_cres[%5d,%s,%2d]: %5ld%s (%p)",
+			(vxi ? vxi->vx_id : -1), vlimit_name[res], res,
+			(vxi ? (long)__rlim_get(&vxi->limit, res) : 0),
+			(dir > 0) ? "++" : "--", _data, _file, _line);
+	if (!vxi)
+		return;
+
+	if (dir > 0)
+		__rlim_inc(&vxi->limit, res);
+	else
+		__rlim_dec(&vxi->limit, res);
+}
+
+static inline void __vx_add_cres(struct vx_info *vxi,
+	int res, int amount, void *_data, char *_file, int _line)
+{
+	if (VXD_RCRES_COND(res))
+		vxlprintk(1, "vx_add_cres[%5d,%s,%2d]: %5ld += %5d (%p)",
+			(vxi ? vxi->vx_id : -1), vlimit_name[res], res,
+			(vxi ? (long)__rlim_get(&vxi->limit, res) : 0),
+			amount, _data, _file, _line);
+	if (amount == 0)
+		return;
+	if (!vxi)
+		return;
+	__rlim_add(&vxi->limit, res, amount);
+}
+
+static inline
+int __vx_cres_adjust_max(struct _vx_limit *limit, int res, rlim_t value)
+{
+	int cond = (value > __rlim_rmax(limit, res));
+
+	if (cond)
+		__rlim_rmax(limit, res) = value;
+	return cond;
+}
+
+static inline
+int __vx_cres_adjust_min(struct _vx_limit *limit, int res, rlim_t value)
+{
+	int cond = (value < __rlim_rmin(limit, res));
+
+	if (cond)
+		__rlim_rmin(limit, res) = value;
+	return cond;
+}
+
+static inline
+void __vx_cres_fixup(struct _vx_limit *limit, int res, rlim_t value)
+{
+	if (!__vx_cres_adjust_max(limit, res, value))
+		__vx_cres_adjust_min(limit, res, value);
+}
+
+
+/*	return values:
+	 +1 ... no limit hit
+	 -1 ... over soft limit
+	  0 ... over hard limit		*/
+
+static inline int __vx_cres_avail(struct vx_info *vxi,
+	int res, int num, char *_file, int _line)
+{
+	struct _vx_limit *limit;
+	rlim_t value;
+
+	if (VXD_RLIMIT_COND(res))
+		vxlprintk(1, "vx_cres_avail[%5d,%s,%2d]: %5ld/%5ld > %5ld + %5d",
+			(vxi ? vxi->vx_id : -1), vlimit_name[res], res,
+			(vxi ? (long)__rlim_soft(&vxi->limit, res) : -1),
+			(vxi ? (long)__rlim_hard(&vxi->limit, res) : -1),
+			(vxi ? (long)__rlim_get(&vxi->limit, res) : 0),
+			num, _file, _line);
+	if (!vxi)
+		return 1;
+
+	limit = &vxi->limit;
+	value = __rlim_get(limit, res);
+
+	if (!__vx_cres_adjust_max(limit, res, value))
+		__vx_cres_adjust_min(limit, res, value);
+
+	if (num == 0)
+		return 1;
+
+	if (__rlim_soft(limit, res) == RLIM_INFINITY)
+		return -1;
+	if (value + num <= __rlim_soft(limit, res))
+		return -1;
+
+	if (__rlim_hard(limit, res) == RLIM_INFINITY)
+		return 1;
+	if (value + num <= __rlim_hard(limit, res))
+		return 1;
+
+	__rlim_hit(limit, res);
+	return 0;
+}
+
+
+static const int VLA_RSS[] = { RLIMIT_RSS, VLIMIT_ANON, VLIMIT_MAPPED, 0 };
+
+static inline
+rlim_t __vx_cres_array_sum(struct _vx_limit *limit, const int *array)
+{
+	rlim_t value, sum = 0;
+	int res;
+
+	while ((res = *array++)) {
+		value = __rlim_get(limit, res);
+		__vx_cres_fixup(limit, res, value);
+		sum += value;
+	}
+	return sum;
+}
+
+static inline
+rlim_t __vx_cres_array_fixup(struct _vx_limit *limit, const int *array)
+{
+	rlim_t value = __vx_cres_array_sum(limit, array + 1);
+	int res = *array;
+
+	if (value == __rlim_get(limit, res))
+		return value;
+
+	__rlim_set(limit, res, value);
+	/* now adjust min/max */
+	if (!__vx_cres_adjust_max(limit, res, value))
+		__vx_cres_adjust_min(limit, res, value);
+
+	return value;
+}
+
+static inline int __vx_cres_array_avail(struct vx_info *vxi,
+	const int *array, int num, char *_file, int _line)
+{
+	struct _vx_limit *limit;
+	rlim_t value = 0;
+	int res;
+
+	if (num == 0)
+		return 1;
+	if (!vxi)
+		return 1;
+
+	limit = &vxi->limit;
+	res = *array;
+	value = __vx_cres_array_sum(limit, array + 1);
+
+	__rlim_set(limit, res, value);
+	__vx_cres_fixup(limit, res, value);
+
+	return __vx_cres_avail(vxi, res, num, _file, _line);
+}
+
+
+static inline void vx_limit_fixup(struct _vx_limit *limit, int id)
+{
+	rlim_t value;
+	int res;
+
+	/* complex resources first */
+	if ((id < 0) || (id == RLIMIT_RSS))
+		__vx_cres_array_fixup(limit, VLA_RSS);
+
+	for (res = 0; res < NUM_LIMITS; res++) {
+		if ((id > 0) && (res != id))
+			continue;
+
+		value = __rlim_get(limit, res);
+		__vx_cres_fixup(limit, res, value);
+
+		/* not supposed to happen, maybe warn? */
+		if (__rlim_rmax(limit, res) > __rlim_hard(limit, res))
+			__rlim_rmax(limit, res) = __rlim_hard(limit, res);
+	}
+}
+
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_LIMIT_INT_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/monitor.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/monitor.h
--- linux-2.6.32.17/include/linux/vserver/monitor.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/monitor.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,96 @@
+#ifndef _VX_MONITOR_H
+#define _VX_MONITOR_H
+
+#include <linux/types.h>
+
+enum {
+	VXM_UNUSED = 0,
+
+	VXM_SYNC = 0x10,
+
+	VXM_UPDATE = 0x20,
+	VXM_UPDATE_1,
+	VXM_UPDATE_2,
+
+	VXM_RQINFO_1 = 0x24,
+	VXM_RQINFO_2,
+
+	VXM_ACTIVATE = 0x40,
+	VXM_DEACTIVATE,
+	VXM_IDLE,
+
+	VXM_HOLD = 0x44,
+	VXM_UNHOLD,
+
+	VXM_MIGRATE = 0x48,
+	VXM_RESCHED,
+
+	/* all other bits are flags */
+	VXM_SCHED = 0x80,
+};
+
+struct _vxm_update_1 {
+	uint32_t tokens_max;
+	uint32_t fill_rate;
+	uint32_t interval;
+};
+
+struct _vxm_update_2 {
+	uint32_t tokens_min;
+	uint32_t fill_rate;
+	uint32_t interval;
+};
+
+struct _vxm_rqinfo_1 {
+	uint16_t running;
+	uint16_t onhold;
+	uint16_t iowait;
+	uint16_t uintr;
+	uint32_t idle_tokens;
+};
+
+struct _vxm_rqinfo_2 {
+	uint32_t norm_time;
+	uint32_t idle_time;
+	uint32_t idle_skip;
+};
+
+struct _vxm_sched {
+	uint32_t tokens;
+	uint32_t norm_time;
+	uint32_t idle_time;
+};
+
+struct _vxm_task {
+	uint16_t pid;
+	uint16_t state;
+};
+
+struct _vxm_event {
+	uint32_t jif;
+	union {
+		uint32_t seq;
+		uint32_t sec;
+	};
+	union {
+		uint32_t tokens;
+		uint32_t nsec;
+		struct _vxm_task tsk;
+	};
+};
+
+struct _vx_mon_entry {
+	uint16_t type;
+	uint16_t xid;
+	union {
+		struct _vxm_event ev;
+		struct _vxm_sched sd;
+		struct _vxm_update_1 u1;
+		struct _vxm_update_2 u2;
+		struct _vxm_rqinfo_1 q1;
+		struct _vxm_rqinfo_2 q2;
+	};
+};
+
+
+#endif /* _VX_MONITOR_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/network_cmd.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/network_cmd.h
--- linux-2.6.32.17/include/linux/vserver/network_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/network_cmd.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,150 @@
+#ifndef _VX_NETWORK_CMD_H
+#define _VX_NETWORK_CMD_H
+
+
+/* vinfo commands */
+
+#define VCMD_task_nid		VC_CMD(VINFO, 2, 0)
+
+#ifdef	__KERNEL__
+extern int vc_task_nid(uint32_t);
+
+#endif	/* __KERNEL__ */
+
+#define VCMD_nx_info		VC_CMD(VINFO, 6, 0)
+
+struct	vcmd_nx_info_v0 {
+	uint32_t nid;
+	/* more to come */
+};
+
+#ifdef	__KERNEL__
+extern int vc_nx_info(struct nx_info *, void __user *);
+
+#endif	/* __KERNEL__ */
+
+#include <linux/in.h>
+#include <linux/in6.h>
+
+#define VCMD_net_create_v0	VC_CMD(VNET, 1, 0)
+#define VCMD_net_create		VC_CMD(VNET, 1, 1)
+
+struct  vcmd_net_create {
+	uint64_t flagword;
+};
+
+#define VCMD_net_migrate	VC_CMD(NETMIG, 1, 0)
+
+#define VCMD_net_add		VC_CMD(NETALT, 1, 0)
+#define VCMD_net_remove		VC_CMD(NETALT, 2, 0)
+
+struct	vcmd_net_addr_v0 {
+	uint16_t type;
+	uint16_t count;
+	struct in_addr ip[4];
+	struct in_addr mask[4];
+};
+
+#define VCMD_net_add_ipv4	VC_CMD(NETALT, 1, 1)
+#define VCMD_net_remove_ipv4	VC_CMD(NETALT, 2, 1)
+
+struct	vcmd_net_addr_ipv4_v1 {
+	uint16_t type;
+	uint16_t flags;
+	struct in_addr ip;
+	struct in_addr mask;
+};
+
+#define VCMD_net_add_ipv6	VC_CMD(NETALT, 3, 1)
+#define VCMD_net_remove_ipv6	VC_CMD(NETALT, 4, 1)
+
+struct	vcmd_net_addr_ipv6_v1 {
+	uint16_t type;
+	uint16_t flags;
+	uint32_t prefix;
+	struct in6_addr ip;
+	struct in6_addr mask;
+};
+
+#define VCMD_add_match_ipv4	VC_CMD(NETALT, 5, 0)
+#define VCMD_get_match_ipv4	VC_CMD(NETALT, 6, 0)
+
+struct	vcmd_match_ipv4_v0 {
+	uint16_t type;
+	uint16_t flags;
+	uint16_t parent;
+	uint16_t prefix;
+	struct in_addr ip;
+	struct in_addr ip2;
+	struct in_addr mask;
+};
+
+#define VCMD_add_match_ipv6	VC_CMD(NETALT, 7, 0)
+#define VCMD_get_match_ipv6	VC_CMD(NETALT, 8, 0)
+
+struct	vcmd_match_ipv6_v0 {
+	uint16_t type;
+	uint16_t flags;
+	uint16_t parent;
+	uint16_t prefix;
+	struct in6_addr ip;
+	struct in6_addr ip2;
+	struct in6_addr mask;
+};
+
+
+#ifdef	__KERNEL__
+extern int vc_net_create(uint32_t, void __user *);
+extern int vc_net_migrate(struct nx_info *, void __user *);
+
+extern int vc_net_add(struct nx_info *, void __user *);
+extern int vc_net_remove(struct nx_info *, void __user *);
+
+extern int vc_net_add_ipv4(struct nx_info *, void __user *);
+extern int vc_net_remove_ipv4(struct nx_info *, void __user *);
+
+extern int vc_net_add_ipv6(struct nx_info *, void __user *);
+extern int vc_net_remove_ipv6(struct nx_info *, void __user *);
+
+extern int vc_add_match_ipv4(struct nx_info *, void __user *);
+extern int vc_get_match_ipv4(struct nx_info *, void __user *);
+
+extern int vc_add_match_ipv6(struct nx_info *, void __user *);
+extern int vc_get_match_ipv6(struct nx_info *, void __user *);
+
+#endif	/* __KERNEL__ */
+
+
+/* flag commands */
+
+#define VCMD_get_nflags		VC_CMD(FLAGS, 5, 0)
+#define VCMD_set_nflags		VC_CMD(FLAGS, 6, 0)
+
+struct	vcmd_net_flags_v0 {
+	uint64_t flagword;
+	uint64_t mask;
+};
+
+#ifdef	__KERNEL__
+extern int vc_get_nflags(struct nx_info *, void __user *);
+extern int vc_set_nflags(struct nx_info *, void __user *);
+
+#endif	/* __KERNEL__ */
+
+
+/* network caps commands */
+
+#define VCMD_get_ncaps		VC_CMD(FLAGS, 7, 0)
+#define VCMD_set_ncaps		VC_CMD(FLAGS, 8, 0)
+
+struct	vcmd_net_caps_v0 {
+	uint64_t ncaps;
+	uint64_t cmask;
+};
+
+#ifdef	__KERNEL__
+extern int vc_get_ncaps(struct nx_info *, void __user *);
+extern int vc_set_ncaps(struct nx_info *, void __user *);
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_CONTEXT_CMD_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/network.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/network.h
--- linux-2.6.32.17/include/linux/vserver/network.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/network.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,146 @@
+#ifndef _VX_NETWORK_H
+#define _VX_NETWORK_H
+
+#include <linux/types.h>
+
+
+#define MAX_N_CONTEXT	65535	/* Arbitrary limit */
+
+
+/* network flags */
+
+#define NXF_INFO_PRIVATE	0x00000008
+
+#define NXF_SINGLE_IP		0x00000100
+#define NXF_LBACK_REMAP		0x00000200
+#define NXF_LBACK_ALLOW		0x00000400
+
+#define NXF_HIDE_NETIF		0x02000000
+#define NXF_HIDE_LBACK		0x04000000
+
+#define NXF_STATE_SETUP		(1ULL << 32)
+#define NXF_STATE_ADMIN		(1ULL << 34)
+
+#define NXF_SC_HELPER		(1ULL << 36)
+#define NXF_PERSISTENT		(1ULL << 38)
+
+#define NXF_ONE_TIME		(0x0005ULL << 32)
+
+
+#define	NXF_INIT_SET		(__nxf_init_set())
+
+static inline uint64_t __nxf_init_set(void) {
+	return	  NXF_STATE_ADMIN
+#ifdef	CONFIG_VSERVER_AUTO_LBACK
+		| NXF_LBACK_REMAP
+		| NXF_HIDE_LBACK
+#endif
+#ifdef	CONFIG_VSERVER_AUTO_SINGLE
+		| NXF_SINGLE_IP
+#endif
+		| NXF_HIDE_NETIF;
+}
+
+
+/* network caps */
+
+#define NXC_TUN_CREATE		0x00000001
+
+#define NXC_RAW_ICMP		0x00000100
+
+
+/* address types */
+
+#define NXA_TYPE_IPV4		0x0001
+#define NXA_TYPE_IPV6		0x0002
+
+#define NXA_TYPE_NONE		0x0000
+#define NXA_TYPE_ANY		0x00FF
+
+#define NXA_TYPE_ADDR		0x0010
+#define NXA_TYPE_MASK		0x0020
+#define NXA_TYPE_RANGE		0x0040
+
+#define NXA_MASK_ALL		(NXA_TYPE_ADDR | NXA_TYPE_MASK | NXA_TYPE_RANGE)
+
+#define NXA_MOD_BCAST		0x0100
+#define NXA_MOD_LBACK		0x0200
+
+#define NXA_LOOPBACK		0x1000
+
+#define NXA_MASK_BIND		(NXA_MASK_ALL | NXA_MOD_BCAST | NXA_MOD_LBACK)
+#define NXA_MASK_SHOW		(NXA_MASK_ALL | NXA_LOOPBACK)
+
+#ifdef	__KERNEL__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <asm/atomic.h>
+
+struct nx_addr_v4 {
+	struct nx_addr_v4 *next;
+	struct in_addr ip[2];
+	struct in_addr mask;
+	uint16_t type;
+	uint16_t flags;
+};
+
+struct nx_addr_v6 {
+	struct nx_addr_v6 *next;
+	struct in6_addr ip;
+	struct in6_addr mask;
+	uint32_t prefix;
+	uint16_t type;
+	uint16_t flags;
+};
+
+struct nx_info {
+	struct hlist_node nx_hlist;	/* linked list of nxinfos */
+	nid_t nx_id;			/* vnet id */
+	atomic_t nx_usecnt;		/* usage count */
+	atomic_t nx_tasks;		/* tasks count */
+	int nx_state;			/* context state */
+
+	uint64_t nx_flags;		/* network flag word */
+	uint64_t nx_ncaps;		/* network capabilities */
+
+	struct in_addr v4_lback;	/* Loopback address */
+	struct in_addr v4_bcast;	/* Broadcast address */
+	struct nx_addr_v4 v4;		/* First/Single ipv4 address */
+#ifdef	CONFIG_IPV6
+	struct nx_addr_v6 v6;		/* First/Single ipv6 address */
+#endif
+	char nx_name[65];		/* network context name */
+};
+
+
+/* status flags */
+
+#define NXS_HASHED      0x0001
+#define NXS_SHUTDOWN    0x0100
+#define NXS_RELEASED    0x8000
+
+extern struct nx_info *lookup_nx_info(int);
+
+extern int get_nid_list(int, unsigned int *, int);
+extern int nid_is_hashed(nid_t);
+
+extern int nx_migrate_task(struct task_struct *, struct nx_info *);
+
+extern long vs_net_change(struct nx_info *, unsigned int);
+
+struct sock;
+
+
+#define NX_IPV4(n)	((n)->v4.type != NXA_TYPE_NONE)
+#ifdef  CONFIG_IPV6
+#define NX_IPV6(n)	((n)->v6.type != NXA_TYPE_NONE)
+#else
+#define NX_IPV6(n)	(0)
+#endif
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_NETWORK_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/percpu.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/percpu.h
--- linux-2.6.32.17/include/linux/vserver/percpu.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/percpu.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,14 @@
+#ifndef _VX_PERCPU_H
+#define _VX_PERCPU_H
+
+#include "cvirt_def.h"
+#include "sched_def.h"
+
+struct	_vx_percpu {
+	struct _vx_cvirt_pc cvirt;
+	struct _vx_sched_pc sched;
+};
+
+#define	PERCPU_PERCTX	(sizeof(struct _vx_percpu))
+
+#endif	/* _VX_PERCPU_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/pid.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/pid.h
--- linux-2.6.32.17/include/linux/vserver/pid.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/pid.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,51 @@
+#ifndef _VSERVER_PID_H
+#define _VSERVER_PID_H
+
+/* pid faking stuff */
+
+#define vx_info_map_pid(v, p) \
+	__vx_info_map_pid((v), (p), __func__, __FILE__, __LINE__)
+#define vx_info_map_tgid(v,p)  vx_info_map_pid(v,p)
+#define vx_map_pid(p) vx_info_map_pid(current_vx_info(), p)
+#define vx_map_tgid(p) vx_map_pid(p)
+
+static inline int __vx_info_map_pid(struct vx_info *vxi, int pid,
+	const char *func, const char *file, int line)
+{
+	if (vx_info_flags(vxi, VXF_INFO_INIT, 0)) {
+		vxfprintk(VXD_CBIT(cvirt, 2),
+			"vx_map_tgid: %p/%llx: %d -> %d",
+			vxi, (long long)vxi->vx_flags, pid,
+			(pid && pid == vxi->vx_initpid) ? 1 : pid,
+			func, file, line);
+		if (pid == 0)
+			return 0;
+		if (pid == vxi->vx_initpid)
+			return 1;
+	}
+	return pid;
+}
+
+#define vx_info_rmap_pid(v, p) \
+	__vx_info_rmap_pid((v), (p), __func__, __FILE__, __LINE__)
+#define vx_rmap_pid(p) vx_info_rmap_pid(current_vx_info(), p)
+#define vx_rmap_tgid(p) vx_rmap_pid(p)
+
+static inline int __vx_info_rmap_pid(struct vx_info *vxi, int pid,
+	const char *func, const char *file, int line)
+{
+	if (vx_info_flags(vxi, VXF_INFO_INIT, 0)) {
+		vxfprintk(VXD_CBIT(cvirt, 2),
+			"vx_rmap_tgid: %p/%llx: %d -> %d",
+			vxi, (long long)vxi->vx_flags, pid,
+			(pid == 1) ? vxi->vx_initpid : pid,
+			func, file, line);
+		if ((pid == 1) && vxi->vx_initpid)
+			return vxi->vx_initpid;
+		if (pid == vxi->vx_initpid)
+			return ~0U;
+	}
+	return pid;
+}
+
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/sched_cmd.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/sched_cmd.h
--- linux-2.6.32.17/include/linux/vserver/sched_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/sched_cmd.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,108 @@
+#ifndef _VX_SCHED_CMD_H
+#define _VX_SCHED_CMD_H
+
+
+/*  sched vserver commands */
+
+#define VCMD_set_sched_v2	VC_CMD(SCHED, 1, 2)
+#define VCMD_set_sched_v3	VC_CMD(SCHED, 1, 3)
+#define VCMD_set_sched_v4	VC_CMD(SCHED, 1, 4)
+
+struct	vcmd_set_sched_v2 {
+	int32_t fill_rate;
+	int32_t interval;
+	int32_t tokens;
+	int32_t tokens_min;
+	int32_t tokens_max;
+	uint64_t cpu_mask;
+};
+
+struct	vcmd_set_sched_v3 {
+	uint32_t set_mask;
+	int32_t fill_rate;
+	int32_t interval;
+	int32_t tokens;
+	int32_t tokens_min;
+	int32_t tokens_max;
+	int32_t priority_bias;
+};
+
+struct	vcmd_set_sched_v4 {
+	uint32_t set_mask;
+	int32_t fill_rate;
+	int32_t interval;
+	int32_t tokens;
+	int32_t tokens_min;
+	int32_t tokens_max;
+	int32_t prio_bias;
+	int32_t cpu_id;
+	int32_t bucket_id;
+};
+
+#define VCMD_set_sched		VC_CMD(SCHED, 1, 5)
+#define VCMD_get_sched		VC_CMD(SCHED, 2, 5)
+
+struct	vcmd_sched_v5 {
+	uint32_t mask;
+	int32_t cpu_id;
+	int32_t bucket_id;
+	int32_t fill_rate[2];
+	int32_t interval[2];
+	int32_t tokens;
+	int32_t tokens_min;
+	int32_t tokens_max;
+	int32_t prio_bias;
+};
+
+#define VXSM_FILL_RATE		0x0001
+#define VXSM_INTERVAL		0x0002
+#define VXSM_FILL_RATE2		0x0004
+#define VXSM_INTERVAL2		0x0008
+#define VXSM_TOKENS		0x0010
+#define VXSM_TOKENS_MIN		0x0020
+#define VXSM_TOKENS_MAX		0x0040
+#define VXSM_PRIO_BIAS		0x0100
+
+#define VXSM_IDLE_TIME		0x0200
+#define VXSM_FORCE		0x0400
+
+#define	VXSM_V3_MASK		0x0173
+#define	VXSM_SET_MASK		0x01FF
+
+#define VXSM_CPU_ID		0x1000
+#define VXSM_BUCKET_ID		0x2000
+
+#define VXSM_MSEC		0x4000
+
+#define SCHED_KEEP		(-2)	/* only for v2 */
+
+#ifdef	__KERNEL__
+
+#include <linux/compiler.h>
+
+extern int vc_set_sched_v2(struct vx_info *, void __user *);
+extern int vc_set_sched_v3(struct vx_info *, void __user *);
+extern int vc_set_sched_v4(struct vx_info *, void __user *);
+extern int vc_set_sched(struct vx_info *, void __user *);
+extern int vc_get_sched(struct vx_info *, void __user *);
+
+#endif	/* __KERNEL__ */
+
+#define VCMD_sched_info		VC_CMD(SCHED, 3, 0)
+
+struct	vcmd_sched_info {
+	int32_t cpu_id;
+	int32_t bucket_id;
+	uint64_t user_msec;
+	uint64_t sys_msec;
+	uint64_t hold_msec;
+	uint32_t token_usec;
+	int32_t vavavoom;
+};
+
+#ifdef	__KERNEL__
+
+extern int vc_sched_info(struct vx_info *, void __user *);
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_SCHED_CMD_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/sched_def.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/sched_def.h
--- linux-2.6.32.17/include/linux/vserver/sched_def.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/sched_def.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,68 @@
+#ifndef _VX_SCHED_DEF_H
+#define _VX_SCHED_DEF_H
+
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <linux/cpumask.h>
+#include <asm/atomic.h>
+#include <asm/param.h>
+
+
+/* context sub struct */
+
+struct _vx_sched {
+	spinlock_t tokens_lock;		/* lock for token bucket */
+
+	int tokens;			/* number of CPU tokens */
+	int fill_rate[2];		/* Fill rate: add X tokens... */
+	int interval[2];		/* Divisor:   per Y jiffies   */
+	int tokens_min;			/* Limit:     minimum for unhold */
+	int tokens_max;			/* Limit:     no more than N tokens */
+
+	int prio_bias;			/* bias offset for priority */
+
+	unsigned update_mask;		/* which features should be updated */
+	cpumask_t update;		/* CPUs which should update */
+};
+
+struct _vx_sched_pc {
+	int tokens;			/* number of CPU tokens */
+	int flags;			/* bucket flags */
+
+	int fill_rate[2];		/* Fill rate: add X tokens... */
+	int interval[2];		/* Divisor:   per Y jiffies   */
+	int tokens_min;			/* Limit:     minimum for unhold */
+	int tokens_max;			/* Limit:     no more than N tokens */
+
+	int prio_bias;			/* bias offset for priority */
+	int vavavoom;			/* last calculated vavavoom */
+
+	unsigned long norm_time;	/* last time accounted */
+	unsigned long idle_time;	/* non linear time for fair sched */
+	unsigned long token_time;	/* token time for accounting */
+	unsigned long onhold;		/* jiffies when put on hold */
+
+	uint64_t user_ticks;		/* token tick events */
+	uint64_t sys_ticks;		/* token tick events */
+	uint64_t hold_ticks;		/* token ticks paused */
+};
+
+
+#define VXSF_ONHOLD	0x0001
+#define VXSF_IDLE_TIME	0x0100
+
+#ifdef CONFIG_VSERVER_DEBUG
+
+static inline void __dump_vx_sched(struct _vx_sched *sched)
+{
+	printk("\t_vx_sched:\n");
+	printk("\t tokens: %4d/%4d, %4d/%4d, %4d, %4d\n",
+		sched->fill_rate[0], sched->interval[0],
+		sched->fill_rate[1], sched->interval[1],
+		sched->tokens_min, sched->tokens_max);
+	printk("\t priority = %4d\n", sched->prio_bias);
+}
+
+#endif
+
+#endif	/* _VX_SCHED_DEF_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/sched.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/sched.h
--- linux-2.6.32.17/include/linux/vserver/sched.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/sched.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,26 @@
+#ifndef _VX_SCHED_H
+#define _VX_SCHED_H
+
+
+#ifdef	__KERNEL__
+
+struct timespec;
+
+void vx_vsi_uptime(struct timespec *, struct timespec *);
+
+
+struct vx_info;
+
+void vx_update_load(struct vx_info *);
+
+
+int vx_tokens_recalc(struct _vx_sched_pc *,
+	unsigned long *, unsigned long *, int [2]);
+
+void vx_update_sched_param(struct _vx_sched *sched,
+	struct _vx_sched_pc *sched_pc);
+
+#endif	/* __KERNEL__ */
+#else	/* _VX_SCHED_H */
+#warning duplicate inclusion
+#endif	/* _VX_SCHED_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/signal_cmd.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/signal_cmd.h
--- linux-2.6.32.17/include/linux/vserver/signal_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/signal_cmd.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,43 @@
+#ifndef _VX_SIGNAL_CMD_H
+#define _VX_SIGNAL_CMD_H
+
+
+/*  signalling vserver commands */
+
+#define VCMD_ctx_kill		VC_CMD(PROCTRL, 1, 0)
+#define VCMD_wait_exit		VC_CMD(EVENT, 99, 0)
+
+struct	vcmd_ctx_kill_v0 {
+	int32_t pid;
+	int32_t sig;
+};
+
+struct	vcmd_wait_exit_v0 {
+	int32_t reboot_cmd;
+	int32_t exit_code;
+};
+
+#ifdef	__KERNEL__
+
+extern int vc_ctx_kill(struct vx_info *, void __user *);
+extern int vc_wait_exit(struct vx_info *, void __user *);
+
+#endif	/* __KERNEL__ */
+
+/*  process alteration commands */
+
+#define VCMD_get_pflags		VC_CMD(PROCALT, 5, 0)
+#define VCMD_set_pflags		VC_CMD(PROCALT, 6, 0)
+
+struct	vcmd_pflags_v0 {
+	uint32_t flagword;
+	uint32_t mask;
+};
+
+#ifdef	__KERNEL__
+
+extern int vc_get_pflags(uint32_t pid, void __user *);
+extern int vc_set_pflags(uint32_t pid, void __user *);
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_SIGNAL_CMD_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/signal.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/signal.h
--- linux-2.6.32.17/include/linux/vserver/signal.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/signal.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,14 @@
+#ifndef _VX_SIGNAL_H
+#define _VX_SIGNAL_H
+
+
+#ifdef	__KERNEL__
+
+struct vx_info;
+
+int vx_info_kill(struct vx_info *, int, int);
+
+#endif	/* __KERNEL__ */
+#else	/* _VX_SIGNAL_H */
+#warning duplicate inclusion
+#endif	/* _VX_SIGNAL_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/space_cmd.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/space_cmd.h
--- linux-2.6.32.17/include/linux/vserver/space_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/space_cmd.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,38 @@
+#ifndef _VX_SPACE_CMD_H
+#define _VX_SPACE_CMD_H
+
+
+#define VCMD_enter_space_v0	VC_CMD(PROCALT, 1, 0)
+#define VCMD_enter_space_v1	VC_CMD(PROCALT, 1, 1)
+#define VCMD_enter_space	VC_CMD(PROCALT, 1, 2)
+
+#define VCMD_set_space_v0	VC_CMD(PROCALT, 3, 0)
+#define VCMD_set_space_v1	VC_CMD(PROCALT, 3, 1)
+#define VCMD_set_space		VC_CMD(PROCALT, 3, 2)
+
+#define VCMD_get_space_mask_v0	VC_CMD(PROCALT, 4, 0)
+
+#define VCMD_get_space_mask	VC_CMD(VSPACE, 0, 1)
+#define VCMD_get_space_default	VC_CMD(VSPACE, 1, 0)
+
+
+struct	vcmd_space_mask_v1 {
+	uint64_t mask;
+};
+
+struct	vcmd_space_mask_v2 {
+	uint64_t mask;
+	uint32_t index;
+};
+
+
+#ifdef	__KERNEL__
+
+extern int vc_enter_space_v1(struct vx_info *, void __user *);
+extern int vc_set_space_v1(struct vx_info *, void __user *);
+extern int vc_enter_space(struct vx_info *, void __user *);
+extern int vc_set_space(struct vx_info *, void __user *);
+extern int vc_get_space_mask(void __user *, int);
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_SPACE_CMD_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/space.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/space.h
--- linux-2.6.32.17/include/linux/vserver/space.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/space.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,12 @@
+#ifndef _VX_SPACE_H
+#define _VX_SPACE_H
+
+#include <linux/types.h>
+
+struct vx_info;
+
+int vx_set_space(struct vx_info *vxi, unsigned long mask, unsigned index);
+
+#else	/* _VX_SPACE_H */
+#warning duplicate inclusion
+#endif	/* _VX_SPACE_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/switch.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/switch.h
--- linux-2.6.32.17/include/linux/vserver/switch.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/switch.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,98 @@
+#ifndef _VX_SWITCH_H
+#define _VX_SWITCH_H
+
+#include <linux/types.h>
+
+
+#define VC_CATEGORY(c)		(((c) >> 24) & 0x3F)
+#define VC_COMMAND(c)		(((c) >> 16) & 0xFF)
+#define VC_VERSION(c)		((c) & 0xFFF)
+
+#define VC_CMD(c, i, v)		((((VC_CAT_ ## c) & 0x3F) << 24) \
+				| (((i) & 0xFF) << 16) | ((v) & 0xFFF))
+
+/*
+
+  Syscall Matrix V2.8
+
+	 |VERSION|CREATE |MODIFY |MIGRATE|CONTROL|EXPERIM| |SPECIAL|SPECIAL|
+	 |STATS  |DESTROY|ALTER  |CHANGE |LIMIT  |TEST   | |       |       |
+	 |INFO   |SETUP  |       |MOVE   |       |       | |       |       |
+  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+  SYSTEM |VERSION|VSETUP |VHOST  |       |       |       | |DEVICE |       |
+  HOST   |     00|     01|     02|     03|     04|     05| |     06|     07|
+  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+  CPU    |       |VPROC  |PROCALT|PROCMIG|PROCTRL|       | |SCHED. |       |
+  PROCESS|     08|     09|     10|     11|     12|     13| |     14|     15|
+  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+  MEMORY |       |       |       |       |MEMCTRL|       | |SWAP   |       |
+	 |     16|     17|     18|     19|     20|     21| |     22|     23|
+  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+  NETWORK|       |VNET   |NETALT |NETMIG |NETCTL |       | |SERIAL |       |
+	 |     24|     25|     26|     27|     28|     29| |     30|     31|
+  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+  DISK   |       |       |       |TAGMIG |DLIMIT |       | |INODE  |       |
+  VFS    |     32|     33|     34|     35|     36|     37| |     38|     39|
+  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+  OTHER  |VSTAT  |       |       |       |       |       | |VINFO  |       |
+	 |     40|     41|     42|     43|     44|     45| |     46|     47|
+  =======+=======+=======+=======+=======+=======+=======+ +=======+=======+
+  SPECIAL|EVENT  |       |       |       |FLAGS  |       | |VSPACE |       |
+	 |     48|     49|     50|     51|     52|     53| |     54|     55|
+  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+  SPECIAL|DEBUG  |       |       |       |RLIMIT |SYSCALL| |       |COMPAT |
+	 |     56|     57|     58|     59|     60|TEST 61| |     62|     63|
+  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+
+*/
+
+#define VC_CAT_VERSION		0
+
+#define VC_CAT_VSETUP		1
+#define VC_CAT_VHOST		2
+
+#define VC_CAT_DEVICE		6
+
+#define VC_CAT_VPROC		9
+#define VC_CAT_PROCALT		10
+#define VC_CAT_PROCMIG		11
+#define VC_CAT_PROCTRL		12
+
+#define VC_CAT_SCHED		14
+#define VC_CAT_MEMCTRL		20
+
+#define VC_CAT_VNET		25
+#define VC_CAT_NETALT		26
+#define VC_CAT_NETMIG		27
+#define VC_CAT_NETCTRL		28
+
+#define VC_CAT_TAGMIG		35
+#define VC_CAT_DLIMIT		36
+#define VC_CAT_INODE		38
+
+#define VC_CAT_VSTAT		40
+#define VC_CAT_VINFO		46
+#define VC_CAT_EVENT		48
+
+#define VC_CAT_FLAGS		52
+#define VC_CAT_VSPACE		54
+#define VC_CAT_DEBUG		56
+#define VC_CAT_RLIMIT		60
+
+#define VC_CAT_SYSTEST		61
+#define VC_CAT_COMPAT		63
+
+/*  query version */
+
+#define VCMD_get_version	VC_CMD(VERSION, 0, 0)
+#define VCMD_get_vci		VC_CMD(VERSION, 1, 0)
+
+
+#ifdef	__KERNEL__
+
+#include <linux/errno.h>
+
+#endif	/* __KERNEL__ */
+
+#endif	/* _VX_SWITCH_H */
+
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/tag_cmd.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/tag_cmd.h
--- linux-2.6.32.17/include/linux/vserver/tag_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/tag_cmd.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,22 @@
+#ifndef _VX_TAG_CMD_H
+#define _VX_TAG_CMD_H
+
+
+/* vinfo commands */
+
+#define VCMD_task_tag		VC_CMD(VINFO, 3, 0)
+
+#ifdef	__KERNEL__
+extern int vc_task_tag(uint32_t);
+
+#endif	/* __KERNEL__ */
+
+/* context commands */
+
+#define VCMD_tag_migrate	VC_CMD(TAGMIG, 1, 0)
+
+#ifdef	__KERNEL__
+extern int vc_tag_migrate(uint32_t);
+
+#endif	/* __KERNEL__ */
+#endif	/* _VX_TAG_CMD_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vserver/tag.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/tag.h
--- linux-2.6.32.17/include/linux/vserver/tag.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vserver/tag.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,143 @@
+#ifndef _DX_TAG_H
+#define _DX_TAG_H
+
+#include <linux/types.h>
+
+
+#define DX_TAG(in)	(IS_TAGGED(in))
+
+
+#ifdef CONFIG_TAG_NFSD
+#define DX_TAG_NFSD	1
+#else
+#define DX_TAG_NFSD	0
+#endif
+
+
+#ifdef CONFIG_TAGGING_NONE
+
+#define MAX_UID		0xFFFFFFFF
+#define MAX_GID		0xFFFFFFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag)	(0)
+
+#define TAGINO_UID(cond, uid, tag)	(uid)
+#define TAGINO_GID(cond, gid, tag)	(gid)
+
+#endif
+
+
+#ifdef CONFIG_TAGGING_GID16
+
+#define MAX_UID		0xFFFFFFFF
+#define MAX_GID		0x0000FFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag)	\
+	((cond) ? (((gid) >> 16) & 0xFFFF) : 0)
+
+#define TAGINO_UID(cond, uid, tag)	(uid)
+#define TAGINO_GID(cond, gid, tag)	\
+	((cond) ? (((gid) & 0xFFFF) | ((tag) << 16)) : (gid))
+
+#endif
+
+
+#ifdef CONFIG_TAGGING_ID24
+
+#define MAX_UID		0x00FFFFFF
+#define MAX_GID		0x00FFFFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag)	\
+	((cond) ? ((((uid) >> 16) & 0xFF00) | (((gid) >> 24) & 0xFF)) : 0)
+
+#define TAGINO_UID(cond, uid, tag)	\
+	((cond) ? (((uid) & 0xFFFFFF) | (((tag) & 0xFF00) << 16)) : (uid))
+#define TAGINO_GID(cond, gid, tag)	\
+	((cond) ? (((gid) & 0xFFFFFF) | (((tag) & 0x00FF) << 24)) : (gid))
+
+#endif
+
+
+#ifdef CONFIG_TAGGING_UID16
+
+#define MAX_UID		0x0000FFFF
+#define MAX_GID		0xFFFFFFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag)	\
+	((cond) ? (((uid) >> 16) & 0xFFFF) : 0)
+
+#define TAGINO_UID(cond, uid, tag)	\
+	((cond) ? (((uid) & 0xFFFF) | ((tag) << 16)) : (uid))
+#define TAGINO_GID(cond, gid, tag)	(gid)
+
+#endif
+
+
+#ifdef CONFIG_TAGGING_INTERN
+
+#define MAX_UID		0xFFFFFFFF
+#define MAX_GID		0xFFFFFFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag)	\
+	((cond) ? (tag) : 0)
+
+#define TAGINO_UID(cond, uid, tag)	(uid)
+#define TAGINO_GID(cond, gid, tag)	(gid)
+
+#endif
+
+
+#ifndef CONFIG_TAGGING_NONE
+#define dx_current_fstag(sb)	\
+	((sb)->s_flags & MS_TAGGED ? dx_current_tag() : 0)
+#else
+#define dx_current_fstag(sb)	(0)
+#endif
+
+#ifndef CONFIG_TAGGING_INTERN
+#define TAGINO_TAG(cond, tag)	(0)
+#else
+#define TAGINO_TAG(cond, tag)	((cond) ? (tag) : 0)
+#endif
+
+#define INOTAG_UID(cond, uid, gid)	\
+	((cond) ? ((uid) & MAX_UID) : (uid))
+#define INOTAG_GID(cond, uid, gid)	\
+	((cond) ? ((gid) & MAX_GID) : (gid))
+
+
+static inline uid_t dx_map_uid(uid_t uid)
+{
+	if ((uid > MAX_UID) && (uid != -1))
+		uid = -2;
+	return (uid & MAX_UID);
+}
+
+static inline gid_t dx_map_gid(gid_t gid)
+{
+	if ((gid > MAX_GID) && (gid != -1))
+		gid = -2;
+	return (gid & MAX_GID);
+}
+
+struct peer_tag {
+	int32_t xid;
+	int32_t nid;
+};
+
+#define dx_notagcheck(sb) ((sb) && ((sb)->s_flags & MS_NOTAGCHECK))
+
+int dx_parse_tag(char *string, tag_t *tag, int remove, int *mnt_flags,
+		 unsigned long *flags);
+
+#ifdef	CONFIG_PROPAGATE
+
+void __dx_propagate_tag(struct nameidata *nd, struct inode *inode);
+
+#define dx_propagate_tag(n, i)	__dx_propagate_tag(n, i)
+
+#else
+#define dx_propagate_tag(n, i)	do { } while (0)
+#endif
+
+#endif /* _DX_TAG_H */
diff -NurpP --minimal linux-2.6.32.17/include/linux/vs_inet6.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_inet6.h
--- linux-2.6.32.17/include/linux/vs_inet6.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_inet6.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,246 @@
+#ifndef _VS_INET6_H
+#define _VS_INET6_H
+
+#include "vserver/base.h"
+#include "vserver/network.h"
+#include "vserver/debug.h"
+
+#include <net/ipv6.h>
+
+#define NXAV6(a)	&(a)->ip, &(a)->mask, (a)->prefix, (a)->type
+#define NXAV6_FMT	"[%pI6/%pI6/%d:%04x]"
+
+
+#ifdef	CONFIG_IPV6
+
+static inline
+int v6_addr_match(struct nx_addr_v6 *nxa,
+	const struct in6_addr *addr, uint16_t mask)
+{
+	int ret = 0;
+
+	switch (nxa->type & mask) {
+	case NXA_TYPE_MASK:
+		ret = ipv6_masked_addr_cmp(&nxa->ip, &nxa->mask, addr);
+		break;
+	case NXA_TYPE_ADDR:
+		ret = ipv6_addr_equal(&nxa->ip, addr);
+		break;
+	case NXA_TYPE_ANY:
+		ret = 1;
+		break;
+	}
+	vxdprintk(VXD_CBIT(net, 0),
+		"v6_addr_match(%p" NXAV6_FMT ",%pI6,%04x) = %d",
+		nxa, NXAV6(nxa), addr, mask, ret);
+	return ret;
+}
+
+static inline
+int v6_addr_in_nx_info(struct nx_info *nxi,
+	const struct in6_addr *addr, uint16_t mask)
+{
+	struct nx_addr_v6 *nxa;
+	int ret = 1;
+
+	if (!nxi)
+		goto out;
+	for (nxa = &nxi->v6; nxa; nxa = nxa->next)
+		if (v6_addr_match(nxa, addr, mask))
+			goto out;
+	ret = 0;
+out:
+	vxdprintk(VXD_CBIT(net, 0),
+		"v6_addr_in_nx_info(%p[#%u],%pI6,%04x) = %d",
+		nxi, nxi ? nxi->nx_id : 0, addr, mask, ret);
+	return ret;
+}
+
+static inline
+int v6_nx_addr_match(struct nx_addr_v6 *nxa, struct nx_addr_v6 *addr, uint16_t mask)
+{
+	/* FIXME: needs full range checks */
+	return v6_addr_match(nxa, &addr->ip, mask);
+}
+
+static inline
+int v6_nx_addr_in_nx_info(struct nx_info *nxi, struct nx_addr_v6 *nxa, uint16_t mask)
+{
+	struct nx_addr_v6 *ptr;
+
+	for (ptr = &nxi->v6; ptr; ptr = ptr->next)
+		if (v6_nx_addr_match(ptr, nxa, mask))
+			return 1;
+	return 0;
+}
+
+
+/*
+ *	Check if a given address matches for a socket
+ *
+ *	nxi:		the socket's nx_info if any
+ *	addr:		to be verified address
+ */
+static inline
+int v6_sock_addr_match (
+	struct nx_info *nxi,
+	struct inet_sock *inet,
+	struct in6_addr *addr)
+{
+	struct sock *sk = &inet->sk;
+	struct in6_addr *saddr = inet6_rcv_saddr(sk);
+
+	if (!ipv6_addr_any(addr) &&
+		ipv6_addr_equal(saddr, addr))
+		return 1;
+	if (ipv6_addr_any(saddr))
+		return v6_addr_in_nx_info(nxi, addr, -1);
+	return 0;
+}
+
+/*
+ *	check if address is covered by socket
+ *
+ *	sk:	the socket to check against
+ *	addr:	the address in question (must be != 0)
+ */
+
+static inline
+int __v6_addr_match_socket(const struct sock *sk, struct nx_addr_v6 *nxa)
+{
+	struct nx_info *nxi = sk->sk_nx_info;
+	struct in6_addr *saddr = inet6_rcv_saddr(sk);
+
+	vxdprintk(VXD_CBIT(net, 5),
+		"__v6_addr_in_socket(%p," NXAV6_FMT ") %p:%pI6 %p;%lx",
+		sk, NXAV6(nxa), nxi, saddr, sk->sk_socket,
+		(sk->sk_socket?sk->sk_socket->flags:0));
+
+	if (!ipv6_addr_any(saddr)) {	/* direct address match */
+		return v6_addr_match(nxa, saddr, -1);
+	} else if (nxi) {		/* match against nx_info */
+		return v6_nx_addr_in_nx_info(nxi, nxa, -1);
+	} else {			/* unrestricted any socket */
+		return 1;
+	}
+}
+
+
+/* inet related checks and helpers */
+
+
+struct in_ifaddr;
+struct net_device;
+struct sock;
+
+
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <net/inet_timewait_sock.h>
+
+
+int dev_in_nx_info(struct net_device *, struct nx_info *);
+int v6_dev_in_nx_info(struct net_device *, struct nx_info *);
+int nx_v6_addr_conflict(struct nx_info *, struct nx_info *);
+
+
+
+static inline
+int v6_ifa_in_nx_info(struct inet6_ifaddr *ifa, struct nx_info *nxi)
+{
+	if (!nxi)
+		return 1;
+	if (!ifa)
+		return 0;
+	return v6_addr_in_nx_info(nxi, &ifa->addr, -1);
+}
+
+static inline
+int nx_v6_ifa_visible(struct nx_info *nxi, struct inet6_ifaddr *ifa)
+{
+	vxdprintk(VXD_CBIT(net, 1), "nx_v6_ifa_visible(%p[#%u],%p) %d",
+		nxi, nxi ? nxi->nx_id : 0, ifa,
+		nxi ? v6_ifa_in_nx_info(ifa, nxi) : 0);
+
+	if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0))
+		return 1;
+	if (v6_ifa_in_nx_info(ifa, nxi))
+		return 1;
+	return 0;
+}
+
+
+struct nx_v6_sock_addr {
+	struct in6_addr saddr;	/* Address used for validation */
+	struct in6_addr baddr;	/* Address used for socket bind */
+};
+
+static inline
+int v6_map_sock_addr(struct inet_sock *inet, struct sockaddr_in6 *addr,
+	struct nx_v6_sock_addr *nsa)
+{
+	// struct sock *sk = &inet->sk;
+	// struct nx_info *nxi = sk->sk_nx_info;
+	struct in6_addr saddr = addr->sin6_addr;
+	struct in6_addr baddr = saddr;
+
+	nsa->saddr = saddr;
+	nsa->baddr = baddr;
+	return 0;
+}
+
+static inline
+void v6_set_sock_addr(struct inet_sock *inet, struct nx_v6_sock_addr *nsa)
+{
+	// struct sock *sk = &inet->sk;
+	// struct in6_addr *saddr = inet6_rcv_saddr(sk);
+
+	// *saddr = nsa->baddr;
+	// inet->saddr = nsa->baddr;
+}
+
+static inline
+int nx_info_has_v6(struct nx_info *nxi)
+{
+	if (!nxi)
+		return 1;
+	if (NX_IPV6(nxi))
+		return 1;
+	return 0;
+}
+
+#else /* CONFIG_IPV6 */
+
+static inline
+int nx_v6_dev_visible(struct nx_info *n, struct net_device *d)
+{
+	return 1;
+}
+
+
+static inline
+int nx_v6_addr_conflict(struct nx_info *n, uint32_t a, const struct sock *s)
+{
+	return 1;
+}
+
+static inline
+int v6_ifa_in_nx_info(struct in_ifaddr *a, struct nx_info *n)
+{
+	return 1;
+}
+
+static inline
+int nx_info_has_v6(struct nx_info *nxi)
+{
+	return 0;
+}
+
+#endif /* CONFIG_IPV6 */
+
+#define current_nx_info_has_v6() \
+	nx_info_has_v6(current_nx_info())
+
+#else
+#warning duplicate inclusion
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/linux/vs_inet.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_inet.h
--- linux-2.6.32.17/include/linux/vs_inet.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_inet.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,342 @@
+#ifndef _VS_INET_H
+#define _VS_INET_H
+
+#include "vserver/base.h"
+#include "vserver/network.h"
+#include "vserver/debug.h"
+
+#define IPI_LOOPBACK	htonl(INADDR_LOOPBACK)
+
+#define NXAV4(a)	NIPQUAD((a)->ip[0]), NIPQUAD((a)->ip[1]), \
+			NIPQUAD((a)->mask), (a)->type
+#define NXAV4_FMT	"[" NIPQUAD_FMT "-" NIPQUAD_FMT "/" NIPQUAD_FMT ":%04x]"
+
+
+static inline
+int v4_addr_match(struct nx_addr_v4 *nxa, __be32 addr, uint16_t tmask)
+{
+	__be32 ip = nxa->ip[0].s_addr;
+	__be32 mask = nxa->mask.s_addr;
+	__be32 bcast = ip | ~mask;
+	int ret = 0;
+
+	switch (nxa->type & tmask) {
+	case NXA_TYPE_MASK:
+		ret = (ip == (addr & mask));
+		break;
+	case NXA_TYPE_ADDR:
+		ret = 3;
+		if (addr == ip)
+			break;
+		/* fall through to broadcast */
+	case NXA_MOD_BCAST:
+		ret = ((tmask & NXA_MOD_BCAST) && (addr == bcast));
+		break;
+	case NXA_TYPE_RANGE:
+		ret = ((nxa->ip[0].s_addr <= addr) &&
+			(nxa->ip[1].s_addr > addr));
+		break;
+	case NXA_TYPE_ANY:
+		ret = 2;
+		break;
+	}
+
+	vxdprintk(VXD_CBIT(net, 0),
+		"v4_addr_match(%p" NXAV4_FMT "," NIPQUAD_FMT ",%04x) = %d",
+		nxa, NXAV4(nxa), NIPQUAD(addr), tmask, ret);
+	return ret;
+}
+
+static inline
+int v4_addr_in_nx_info(struct nx_info *nxi, __be32 addr, uint16_t tmask)
+{
+	struct nx_addr_v4 *nxa;
+	int ret = 1;
+
+	if (!nxi)
+		goto out;
+
+	ret = 2;
+	/* allow 127.0.0.1 when remapping lback */
+	if ((tmask & NXA_LOOPBACK) &&
+		(addr == IPI_LOOPBACK) &&
+		nx_info_flags(nxi, NXF_LBACK_REMAP, 0))
+		goto out;
+	ret = 3;
+	/* check for lback address */
+	if ((tmask & NXA_MOD_LBACK) &&
+		(nxi->v4_lback.s_addr == addr))
+		goto out;
+	ret = 4;
+	/* check for broadcast address */
+	if ((tmask & NXA_MOD_BCAST) &&
+		(nxi->v4_bcast.s_addr == addr))
+		goto out;
+	ret = 5;
+	/* check for v4 addresses */
+	for (nxa = &nxi->v4; nxa; nxa = nxa->next)
+		if (v4_addr_match(nxa, addr, tmask))
+			goto out;
+	ret = 0;
+out:
+	vxdprintk(VXD_CBIT(net, 0),
+		"v4_addr_in_nx_info(%p[#%u]," NIPQUAD_FMT ",%04x) = %d",
+		nxi, nxi ? nxi->nx_id : 0, NIPQUAD(addr), tmask, ret);
+	return ret;
+}
+
+static inline
+int v4_nx_addr_match(struct nx_addr_v4 *nxa, struct nx_addr_v4 *addr, uint16_t mask)
+{
+	/* FIXME: needs full range checks */
+	return v4_addr_match(nxa, addr->ip[0].s_addr, mask);
+}
+
+static inline
+int v4_nx_addr_in_nx_info(struct nx_info *nxi, struct nx_addr_v4 *nxa, uint16_t mask)
+{
+	struct nx_addr_v4 *ptr;
+
+	for (ptr = &nxi->v4; ptr; ptr = ptr->next)
+		if (v4_nx_addr_match(ptr, nxa, mask))
+			return 1;
+	return 0;
+}
+
+#include <net/inet_sock.h>
+
+/*
+ *	Check if a given address matches for a socket
+ *
+ *	nxi:		the socket's nx_info if any
+ *	addr:		to be verified address
+ */
+static inline
+int v4_sock_addr_match (
+	struct nx_info *nxi,
+	struct inet_sock *inet,
+	__be32 addr)
+{
+	__be32 saddr = inet->rcv_saddr;
+	__be32 bcast = nxi ? nxi->v4_bcast.s_addr : INADDR_BROADCAST;
+
+	if (addr && (saddr == addr || bcast == addr))
+		return 1;
+	if (!saddr)
+		return v4_addr_in_nx_info(nxi, addr, NXA_MASK_BIND);
+	return 0;
+}
+
+
+/* inet related checks and helpers */
+
+
+struct in_ifaddr;
+struct net_device;
+struct sock;
+
+#ifdef CONFIG_INET
+
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <net/inet_sock.h>
+#include <net/inet_timewait_sock.h>
+
+
+int dev_in_nx_info(struct net_device *, struct nx_info *);
+int v4_dev_in_nx_info(struct net_device *, struct nx_info *);
+int nx_v4_addr_conflict(struct nx_info *, struct nx_info *);
+
+
+/*
+ *	check if address is covered by socket
+ *
+ *	sk:	the socket to check against
+ *	addr:	the address in question (must be != 0)
+ */
+
+static inline
+int __v4_addr_match_socket(const struct sock *sk, struct nx_addr_v4 *nxa)
+{
+	struct nx_info *nxi = sk->sk_nx_info;
+	__be32 saddr = inet_rcv_saddr(sk);
+
+	vxdprintk(VXD_CBIT(net, 5),
+		"__v4_addr_in_socket(%p," NXAV4_FMT ") %p:" NIPQUAD_FMT " %p;%lx",
+		sk, NXAV4(nxa), nxi, NIPQUAD(saddr), sk->sk_socket,
+		(sk->sk_socket?sk->sk_socket->flags:0));
+
+	if (saddr) {		/* direct address match */
+		return v4_addr_match(nxa, saddr, -1);
+	} else if (nxi) {	/* match against nx_info */
+		return v4_nx_addr_in_nx_info(nxi, nxa, -1);
+	} else {		/* unrestricted any socket */
+		return 1;
+	}
+}
+
+
+
+static inline
+int nx_dev_visible(struct nx_info *nxi, struct net_device *dev)
+{
+	vxdprintk(VXD_CBIT(net, 1), "nx_dev_visible(%p[#%u],%p »%s«) %d",
+		nxi, nxi ? nxi->nx_id : 0, dev, dev->name,
+		nxi ? dev_in_nx_info(dev, nxi) : 0);
+
+	if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0))
+		return 1;
+	if (dev_in_nx_info(dev, nxi))
+		return 1;
+	return 0;
+}
+
+
+static inline
+int v4_ifa_in_nx_info(struct in_ifaddr *ifa, struct nx_info *nxi)
+{
+	if (!nxi)
+		return 1;
+	if (!ifa)
+		return 0;
+	return v4_addr_in_nx_info(nxi, ifa->ifa_local, NXA_MASK_SHOW);
+}
+
+static inline
+int nx_v4_ifa_visible(struct nx_info *nxi, struct in_ifaddr *ifa)
+{
+	vxdprintk(VXD_CBIT(net, 1), "nx_v4_ifa_visible(%p[#%u],%p) %d",
+		nxi, nxi ? nxi->nx_id : 0, ifa,
+		nxi ? v4_ifa_in_nx_info(ifa, nxi) : 0);
+
+	if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0))
+		return 1;
+	if (v4_ifa_in_nx_info(ifa, nxi))
+		return 1;
+	return 0;
+}
+
+
+struct nx_v4_sock_addr {
+	__be32 saddr;	/* Address used for validation */
+	__be32 baddr;	/* Address used for socket bind */
+};
+
+static inline
+int v4_map_sock_addr(struct inet_sock *inet, struct sockaddr_in *addr,
+	struct nx_v4_sock_addr *nsa)
+{
+	struct sock *sk = &inet->sk;
+	struct nx_info *nxi = sk->sk_nx_info;
+	__be32 saddr = addr->sin_addr.s_addr;
+	__be32 baddr = saddr;
+
+	vxdprintk(VXD_CBIT(net, 3),
+		"inet_bind(%p)* %p,%p;%lx " NIPQUAD_FMT,
+		sk, sk->sk_nx_info, sk->sk_socket,
+		(sk->sk_socket ? sk->sk_socket->flags : 0),
+		NIPQUAD(saddr));
+
+	if (nxi) {
+		if (saddr == INADDR_ANY) {
+			if (nx_info_flags(nxi, NXF_SINGLE_IP, 0))
+				baddr = nxi->v4.ip[0].s_addr;
+		} else if (saddr == IPI_LOOPBACK) {
+			if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0))
+				baddr = nxi->v4_lback.s_addr;
+		} else {	/* normal address bind */
+			if (!v4_addr_in_nx_info(nxi, saddr, NXA_MASK_BIND))
+				return -EADDRNOTAVAIL;
+		}
+	}
+
+	vxdprintk(VXD_CBIT(net, 3),
+		"inet_bind(%p) " NIPQUAD_FMT ", " NIPQUAD_FMT,
+		sk, NIPQUAD(saddr), NIPQUAD(baddr));
+
+	nsa->saddr = saddr;
+	nsa->baddr = baddr;
+	return 0;
+}
+
+static inline
+void v4_set_sock_addr(struct inet_sock *inet, struct nx_v4_sock_addr *nsa)
+{
+	inet->saddr = nsa->baddr;
+	inet->rcv_saddr = nsa->baddr;
+}
+
+
+/*
+ *      helper to simplify inet_lookup_listener
+ *
+ *      nxi:	the socket's nx_info if any
+ *      addr:	to be verified address
+ *      saddr:	socket address
+ */
+static inline int v4_inet_addr_match (
+	struct nx_info *nxi,
+	__be32 addr,
+	__be32 saddr)
+{
+	if (addr && (saddr == addr))
+		return 1;
+	if (!saddr)
+		return nxi ? v4_addr_in_nx_info(nxi, addr, NXA_MASK_BIND) : 1;
+	return 0;
+}
+
+static inline __be32 nx_map_sock_lback(struct nx_info *nxi, __be32 addr)
+{
+	if (nx_info_flags(nxi, NXF_HIDE_LBACK, 0) &&
+		(addr == nxi->v4_lback.s_addr))
+		return IPI_LOOPBACK;
+	return addr;
+}
+
+static inline
+int nx_info_has_v4(struct nx_info *nxi)
+{
+	if (!nxi)
+		return 1;
+	if (NX_IPV4(nxi))
+		return 1;
+	if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0))
+		return 1;
+	return 0;
+}
+
+#else /* CONFIG_INET */
+
+static inline
+int nx_dev_visible(struct nx_info *n, struct net_device *d)
+{
+	return 1;
+}
+
+static inline
+int nx_v4_addr_conflict(struct nx_info *n, uint32_t a, const struct sock *s)
+{
+	return 1;
+}
+
+static inline
+int v4_ifa_in_nx_info(struct in_ifaddr *a, struct nx_info *n)
+{
+	return 1;
+}
+
+static inline
+int nx_info_has_v4(struct nx_info *nxi)
+{
+	return 0;
+}
+
+#endif /* CONFIG_INET */
+
+#define current_nx_info_has_v4() \
+	nx_info_has_v4(current_nx_info())
+
+#else
+// #warning duplicate inclusion
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/linux/vs_limit.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_limit.h
--- linux-2.6.32.17/include/linux/vs_limit.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_limit.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,140 @@
+#ifndef _VS_LIMIT_H
+#define _VS_LIMIT_H
+
+#include "vserver/limit.h"
+#include "vserver/base.h"
+#include "vserver/context.h"
+#include "vserver/debug.h"
+#include "vserver/context.h"
+#include "vserver/limit_int.h"
+
+
+#define vx_acc_cres(v, d, p, r) \
+	__vx_acc_cres(v, r, d, p, __FILE__, __LINE__)
+
+#define vx_acc_cres_cond(x, d, p, r) \
+	__vx_acc_cres(((x) == vx_current_xid()) ? current_vx_info() : 0, \
+	r, d, p, __FILE__, __LINE__)
+
+
+#define vx_add_cres(v, a, p, r) \
+	__vx_add_cres(v, r, a, p, __FILE__, __LINE__)
+#define vx_sub_cres(v, a, p, r)		vx_add_cres(v, -(a), p, r)
+
+#define vx_add_cres_cond(x, a, p, r) \
+	__vx_add_cres(((x) == vx_current_xid()) ? current_vx_info() : 0, \
+	r, a, p, __FILE__, __LINE__)
+#define vx_sub_cres_cond(x, a, p, r)	vx_add_cres_cond(x, -(a), p, r)
+
+
+/* process and file limits */
+
+#define vx_nproc_inc(p) \
+	vx_acc_cres((p)->vx_info, 1, p, RLIMIT_NPROC)
+
+#define vx_nproc_dec(p) \
+	vx_acc_cres((p)->vx_info,-1, p, RLIMIT_NPROC)
+
+#define vx_files_inc(f) \
+	vx_acc_cres_cond((f)->f_xid, 1, f, RLIMIT_NOFILE)
+
+#define vx_files_dec(f) \
+	vx_acc_cres_cond((f)->f_xid,-1, f, RLIMIT_NOFILE)
+
+#define vx_locks_inc(l) \
+	vx_acc_cres_cond((l)->fl_xid, 1, l, RLIMIT_LOCKS)
+
+#define vx_locks_dec(l) \
+	vx_acc_cres_cond((l)->fl_xid,-1, l, RLIMIT_LOCKS)
+
+#define vx_openfd_inc(f) \
+	vx_acc_cres(current_vx_info(), 1, (void *)(long)(f), VLIMIT_OPENFD)
+
+#define vx_openfd_dec(f) \
+	vx_acc_cres(current_vx_info(),-1, (void *)(long)(f), VLIMIT_OPENFD)
+
+
+#define vx_cres_avail(v, n, r) \
+	__vx_cres_avail(v, r, n, __FILE__, __LINE__)
+
+
+#define vx_nproc_avail(n) \
+	vx_cres_avail(current_vx_info(), n, RLIMIT_NPROC)
+
+#define vx_files_avail(n) \
+	vx_cres_avail(current_vx_info(), n, RLIMIT_NOFILE)
+
+#define vx_locks_avail(n) \
+	vx_cres_avail(current_vx_info(), n, RLIMIT_LOCKS)
+
+#define vx_openfd_avail(n) \
+	vx_cres_avail(current_vx_info(), n, VLIMIT_OPENFD)
+
+
+/* dentry limits */
+
+#define vx_dentry_inc(d) do {						\
+	if (atomic_read(&d->d_count) == 1)				\
+		vx_acc_cres(current_vx_info(), 1, d, VLIMIT_DENTRY);	\
+	} while (0)
+
+#define vx_dentry_dec(d) do {						\
+	if (atomic_read(&d->d_count) == 0)				\
+		vx_acc_cres(current_vx_info(),-1, d, VLIMIT_DENTRY);	\
+	} while (0)
+
+#define vx_dentry_avail(n) \
+	vx_cres_avail(current_vx_info(), n, VLIMIT_DENTRY)
+
+
+/* socket limits */
+
+#define vx_sock_inc(s) \
+	vx_acc_cres((s)->sk_vx_info, 1, s, VLIMIT_NSOCK)
+
+#define vx_sock_dec(s) \
+	vx_acc_cres((s)->sk_vx_info,-1, s, VLIMIT_NSOCK)
+
+#define vx_sock_avail(n) \
+	vx_cres_avail(current_vx_info(), n, VLIMIT_NSOCK)
+
+
+/* ipc resource limits */
+
+#define vx_ipcmsg_add(v, u, a) \
+	vx_add_cres(v, a, u, RLIMIT_MSGQUEUE)
+
+#define vx_ipcmsg_sub(v, u, a) \
+	vx_sub_cres(v, a, u, RLIMIT_MSGQUEUE)
+
+#define vx_ipcmsg_avail(v, a) \
+	vx_cres_avail(v, a, RLIMIT_MSGQUEUE)
+
+
+#define vx_ipcshm_add(v, k, a) \
+	vx_add_cres(v, a, (void *)(long)(k), VLIMIT_SHMEM)
+
+#define vx_ipcshm_sub(v, k, a) \
+	vx_sub_cres(v, a, (void *)(long)(k), VLIMIT_SHMEM)
+
+#define vx_ipcshm_avail(v, a) \
+	vx_cres_avail(v, a, VLIMIT_SHMEM)
+
+
+#define vx_semary_inc(a) \
+	vx_acc_cres(current_vx_info(), 1, a, VLIMIT_SEMARY)
+
+#define vx_semary_dec(a) \
+	vx_acc_cres(current_vx_info(), -1, a, VLIMIT_SEMARY)
+
+
+#define vx_nsems_add(a,n) \
+	vx_add_cres(current_vx_info(), n, a, VLIMIT_NSEMS)
+
+#define vx_nsems_sub(a,n) \
+	vx_sub_cres(current_vx_info(), n, a, VLIMIT_NSEMS)
+
+
+#else
+#warning duplicate inclusion
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/linux/vs_memory.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_memory.h
--- linux-2.6.32.17/include/linux/vs_memory.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_memory.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,159 @@
+#ifndef _VS_MEMORY_H
+#define _VS_MEMORY_H
+
+#include "vserver/limit.h"
+#include "vserver/base.h"
+#include "vserver/context.h"
+#include "vserver/debug.h"
+#include "vserver/context.h"
+#include "vserver/limit_int.h"
+
+
+#define __acc_add_long(a, v)	(*(v) += (a))
+#define __acc_inc_long(v)	(++*(v))
+#define __acc_dec_long(v)	(--*(v))
+
+#if	NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
+#define __acc_add_atomic(a, v)	atomic_long_add(a, v)
+#define __acc_inc_atomic(v)	atomic_long_inc(v)
+#define __acc_dec_atomic(v)	atomic_long_dec(v)
+#else  /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
+#define __acc_add_atomic(a, v)	__acc_add_long(a, v)
+#define __acc_inc_atomic(v)	__acc_inc_long(v)
+#define __acc_dec_atomic(v)	__acc_dec_long(v)
+#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
+
+
+#define vx_acc_page(m, d, v, r) do {					\
+	if ((d) > 0)							\
+		__acc_inc_long(&(m)->v);				\
+	else								\
+		__acc_dec_long(&(m)->v);				\
+	__vx_acc_cres(m->mm_vx_info, r, d, m, __FILE__, __LINE__);	\
+} while (0)
+
+#define vx_acc_page_atomic(m, d, v, r) do {				\
+	if ((d) > 0)							\
+		__acc_inc_atomic(&(m)->v);				\
+	else								\
+		__acc_dec_atomic(&(m)->v);				\
+	__vx_acc_cres(m->mm_vx_info, r, d, m, __FILE__, __LINE__);	\
+} while (0)
+
+
+#define vx_acc_pages(m, p, v, r) do {					\
+	unsigned long __p = (p);					\
+	__acc_add_long(__p, &(m)->v);					\
+	__vx_add_cres(m->mm_vx_info, r, __p, m, __FILE__, __LINE__);	\
+} while (0)
+
+#define vx_acc_pages_atomic(m, p, v, r) do {				\
+	unsigned long __p = (p);					\
+	__acc_add_atomic(__p, &(m)->v);					\
+	__vx_add_cres(m->mm_vx_info, r, __p, m, __FILE__, __LINE__);	\
+} while (0)
+
+
+
+#define vx_acc_vmpage(m, d) \
+	vx_acc_page(m, d, total_vm,  RLIMIT_AS)
+#define vx_acc_vmlpage(m, d) \
+	vx_acc_page(m, d, locked_vm, RLIMIT_MEMLOCK)
+#define vx_acc_file_rsspage(m, d) \
+	vx_acc_page_atomic(m, d, _file_rss, VLIMIT_MAPPED)
+#define vx_acc_anon_rsspage(m, d) \
+	vx_acc_page_atomic(m, d, _anon_rss, VLIMIT_ANON)
+
+#define vx_acc_vmpages(m, p) \
+	vx_acc_pages(m, p, total_vm,  RLIMIT_AS)
+#define vx_acc_vmlpages(m, p) \
+	vx_acc_pages(m, p, locked_vm, RLIMIT_MEMLOCK)
+#define vx_acc_file_rsspages(m, p) \
+	vx_acc_pages_atomic(m, p, _file_rss, VLIMIT_MAPPED)
+#define vx_acc_anon_rsspages(m, p) \
+	vx_acc_pages_atomic(m, p, _anon_rss, VLIMIT_ANON)
+
+#define vx_pages_add(s, r, p)	__vx_add_cres(s, r, p, 0, __FILE__, __LINE__)
+#define vx_pages_sub(s, r, p)	vx_pages_add(s, r, -(p))
+
+#define vx_vmpages_inc(m)		vx_acc_vmpage(m, 1)
+#define vx_vmpages_dec(m)		vx_acc_vmpage(m, -1)
+#define vx_vmpages_add(m, p)		vx_acc_vmpages(m, p)
+#define vx_vmpages_sub(m, p)		vx_acc_vmpages(m, -(p))
+
+#define vx_vmlocked_inc(m)		vx_acc_vmlpage(m, 1)
+#define vx_vmlocked_dec(m)		vx_acc_vmlpage(m, -1)
+#define vx_vmlocked_add(m, p)		vx_acc_vmlpages(m, p)
+#define vx_vmlocked_sub(m, p)		vx_acc_vmlpages(m, -(p))
+
+#define vx_file_rsspages_inc(m)		vx_acc_file_rsspage(m, 1)
+#define vx_file_rsspages_dec(m)		vx_acc_file_rsspage(m, -1)
+#define vx_file_rsspages_add(m, p)	vx_acc_file_rsspages(m, p)
+#define vx_file_rsspages_sub(m, p)	vx_acc_file_rsspages(m, -(p))
+
+#define vx_anon_rsspages_inc(m)		vx_acc_anon_rsspage(m, 1)
+#define vx_anon_rsspages_dec(m)		vx_acc_anon_rsspage(m, -1)
+#define vx_anon_rsspages_add(m, p)	vx_acc_anon_rsspages(m, p)
+#define vx_anon_rsspages_sub(m, p)	vx_acc_anon_rsspages(m, -(p))
+
+
+#define vx_pages_avail(m, p, r) \
+	__vx_cres_avail((m)->mm_vx_info, r, p, __FILE__, __LINE__)
+
+#define vx_vmpages_avail(m, p)	vx_pages_avail(m, p, RLIMIT_AS)
+#define vx_vmlocked_avail(m, p)	vx_pages_avail(m, p, RLIMIT_MEMLOCK)
+#define vx_anon_avail(m, p)	vx_pages_avail(m, p, VLIMIT_ANON)
+#define vx_mapped_avail(m, p)	vx_pages_avail(m, p, VLIMIT_MAPPED)
+
+#define vx_rss_avail(m, p) \
+	__vx_cres_array_avail((m)->mm_vx_info, VLA_RSS, p, __FILE__, __LINE__)
+
+
+enum {
+	VXPT_UNKNOWN = 0,
+	VXPT_ANON,
+	VXPT_NONE,
+	VXPT_FILE,
+	VXPT_SWAP,
+	VXPT_WRITE
+};
+
+#if 0
+#define	vx_page_fault(mm, vma, type, ret)
+#else
+
+static inline
+void __vx_page_fault(struct mm_struct *mm,
+	struct vm_area_struct *vma, int type, int ret)
+{
+	struct vx_info *vxi = mm->mm_vx_info;
+	int what;
+/*
+	static char *page_type[6] =
+		{ "UNKNOWN", "ANON", "NONE", "FILE", "SWAP", "WRITE" };
+	static char *page_what[4] =
+		{ "FAULT_OOM", "FAULT_SIGBUS", "FAULT_MINOR", "FAULT_MAJOR" };
+*/
+
+	if (!vxi)
+		return;
+
+	what = (ret & 0x3);
+
+/*	printk("[%d] page[%d][%d] %2x %s %s\n", vxi->vx_id,
+		type, what, ret, page_type[type], page_what[what]);
+*/
+	if (ret & VM_FAULT_WRITE)
+		what |= 0x4;
+	atomic_inc(&vxi->cacct.page[type][what]);
+}
+
+#define	vx_page_fault(mm, vma, type, ret)	__vx_page_fault(mm, vma, type, ret)
+#endif
+
+
+extern unsigned long vx_badness(struct task_struct *task, struct mm_struct *mm);
+
+#else
+#warning duplicate inclusion
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/linux/vs_network.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_network.h
--- linux-2.6.32.17/include/linux/vs_network.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_network.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,169 @@
+#ifndef _NX_VS_NETWORK_H
+#define _NX_VS_NETWORK_H
+
+#include "vserver/context.h"
+#include "vserver/network.h"
+#include "vserver/base.h"
+#include "vserver/check.h"
+#include "vserver/debug.h"
+
+#include <linux/sched.h>
+
+
+#define get_nx_info(i) __get_nx_info(i, __FILE__, __LINE__)
+
+static inline struct nx_info *__get_nx_info(struct nx_info *nxi,
+	const char *_file, int _line)
+{
+	if (!nxi)
+		return NULL;
+
+	vxlprintk(VXD_CBIT(nid, 2), "get_nx_info(%p[#%d.%d])",
+		nxi, nxi ? nxi->nx_id : 0,
+		nxi ? atomic_read(&nxi->nx_usecnt) : 0,
+		_file, _line);
+
+	atomic_inc(&nxi->nx_usecnt);
+	return nxi;
+}
+
+
+extern void free_nx_info(struct nx_info *);
+
+#define put_nx_info(i) __put_nx_info(i, __FILE__, __LINE__)
+
+static inline void __put_nx_info(struct nx_info *nxi, const char *_file, int _line)
+{
+	if (!nxi)
+		return;
+
+	vxlprintk(VXD_CBIT(nid, 2), "put_nx_info(%p[#%d.%d])",
+		nxi, nxi ? nxi->nx_id : 0,
+		nxi ? atomic_read(&nxi->nx_usecnt) : 0,
+		_file, _line);
+
+	if (atomic_dec_and_test(&nxi->nx_usecnt))
+		free_nx_info(nxi);
+}
+
+
+#define init_nx_info(p, i) __init_nx_info(p, i, __FILE__, __LINE__)
+
+static inline void __init_nx_info(struct nx_info **nxp, struct nx_info *nxi,
+		const char *_file, int _line)
+{
+	if (nxi) {
+		vxlprintk(VXD_CBIT(nid, 3),
+			"init_nx_info(%p[#%d.%d])",
+			nxi, nxi ? nxi->nx_id : 0,
+			nxi ? atomic_read(&nxi->nx_usecnt) : 0,
+			_file, _line);
+
+		atomic_inc(&nxi->nx_usecnt);
+	}
+	*nxp = nxi;
+}
+
+
+#define set_nx_info(p, i) __set_nx_info(p, i, __FILE__, __LINE__)
+
+static inline void __set_nx_info(struct nx_info **nxp, struct nx_info *nxi,
+	const char *_file, int _line)
+{
+	struct nx_info *nxo;
+
+	if (!nxi)
+		return;
+
+	vxlprintk(VXD_CBIT(nid, 3), "set_nx_info(%p[#%d.%d])",
+		nxi, nxi ? nxi->nx_id : 0,
+		nxi ? atomic_read(&nxi->nx_usecnt) : 0,
+		_file, _line);
+
+	atomic_inc(&nxi->nx_usecnt);
+	nxo = xchg(nxp, nxi);
+	BUG_ON(nxo);
+}
+
+#define clr_nx_info(p) __clr_nx_info(p, __FILE__, __LINE__)
+
+static inline void __clr_nx_info(struct nx_info **nxp,
+	const char *_file, int _line)
+{
+	struct nx_info *nxo;
+
+	nxo = xchg(nxp, NULL);
+	if (!nxo)
+		return;
+
+	vxlprintk(VXD_CBIT(nid, 3), "clr_nx_info(%p[#%d.%d])",
+		nxo, nxo ? nxo->nx_id : 0,
+		nxo ? atomic_read(&nxo->nx_usecnt) : 0,
+		_file, _line);
+
+	if (atomic_dec_and_test(&nxo->nx_usecnt))
+		free_nx_info(nxo);
+}
+
+
+#define claim_nx_info(v, p) __claim_nx_info(v, p, __FILE__, __LINE__)
+
+static inline void __claim_nx_info(struct nx_info *nxi,
+	struct task_struct *task, const char *_file, int _line)
+{
+	vxlprintk(VXD_CBIT(nid, 3), "claim_nx_info(%p[#%d.%d.%d]) %p",
+		nxi, nxi ? nxi->nx_id : 0,
+		nxi?atomic_read(&nxi->nx_usecnt):0,
+		nxi?atomic_read(&nxi->nx_tasks):0,
+		task, _file, _line);
+
+	atomic_inc(&nxi->nx_tasks);
+}
+
+
+extern void unhash_nx_info(struct nx_info *);
+
+#define release_nx_info(v, p) __release_nx_info(v, p, __FILE__, __LINE__)
+
+static inline void __release_nx_info(struct nx_info *nxi,
+	struct task_struct *task, const char *_file, int _line)
+{
+	vxlprintk(VXD_CBIT(nid, 3), "release_nx_info(%p[#%d.%d.%d]) %p",
+		nxi, nxi ? nxi->nx_id : 0,
+		nxi ? atomic_read(&nxi->nx_usecnt) : 0,
+		nxi ? atomic_read(&nxi->nx_tasks) : 0,
+		task, _file, _line);
+
+	might_sleep();
+
+	if (atomic_dec_and_test(&nxi->nx_tasks))
+		unhash_nx_info(nxi);
+}
+
+
+#define task_get_nx_info(i)	__task_get_nx_info(i, __FILE__, __LINE__)
+
+static __inline__ struct nx_info *__task_get_nx_info(struct task_struct *p,
+	const char *_file, int _line)
+{
+	struct nx_info *nxi;
+
+	task_lock(p);
+	vxlprintk(VXD_CBIT(nid, 5), "task_get_nx_info(%p)",
+		p, _file, _line);
+	nxi = __get_nx_info(p->nx_info, _file, _line);
+	task_unlock(p);
+	return nxi;
+}
+
+
+static inline void exit_nx_info(struct task_struct *p)
+{
+	if (p->nx_info)
+		release_nx_info(p->nx_info, p);
+}
+
+
+#else
+#warning duplicate inclusion
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/linux/vs_pid.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_pid.h
--- linux-2.6.32.17/include/linux/vs_pid.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_pid.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,95 @@
+#ifndef _VS_PID_H
+#define _VS_PID_H
+
+#include "vserver/base.h"
+#include "vserver/check.h"
+#include "vserver/context.h"
+#include "vserver/debug.h"
+#include "vserver/pid.h"
+#include <linux/pid_namespace.h>
+
+
+#define VXF_FAKE_INIT	(VXF_INFO_INIT | VXF_STATE_INIT)
+
+static inline
+int vx_proc_task_visible(struct task_struct *task)
+{
+	if ((task->pid == 1) &&
+		!vx_flags(VXF_FAKE_INIT, VXF_FAKE_INIT))
+		/* show a blend through init */
+		goto visible;
+	if (vx_check(vx_task_xid(task), VS_WATCH | VS_IDENT))
+		goto visible;
+	return 0;
+visible:
+	return 1;
+}
+
+#define find_task_by_real_pid(pid) find_task_by_pid_ns(pid, &init_pid_ns)
+
+#if 0
+
+static inline
+struct task_struct *vx_find_proc_task_by_pid(int pid)
+{
+	struct task_struct *task = find_task_by_real_pid(pid);
+
+	if (task && !vx_proc_task_visible(task)) {
+		vxdprintk(VXD_CBIT(misc, 6),
+			"dropping task (find) %p[#%u,%u] for %p[#%u,%u]",
+			task, task->xid, task->pid,
+			current, current->xid, current->pid);
+		task = NULL;
+	}
+	return task;
+}
+
+#endif
+
+static inline
+struct task_struct *vx_get_proc_task(struct inode *inode, struct pid *pid)
+{
+	struct task_struct *task = get_pid_task(pid, PIDTYPE_PID);
+
+	if (task && !vx_proc_task_visible(task)) {
+		vxdprintk(VXD_CBIT(misc, 6),
+			"dropping task (get) %p[#%u,%u] for %p[#%u,%u]",
+			task, task->xid, task->pid,
+			current, current->xid, current->pid);
+		put_task_struct(task);
+		task = NULL;
+	}
+	return task;
+}
+
+#if 0
+
+static inline
+struct task_struct *vx_child_reaper(struct task_struct *p)
+{
+	struct vx_info *vxi = p->vx_info;
+	struct task_struct *reaper = child_reaper(p);
+
+	if (!vxi)
+		goto out;
+
+	BUG_ON(!p->vx_info->vx_reaper);
+
+	/* child reaper for the guest reaper */
+	if (vxi->vx_reaper == p)
+		goto out;
+
+	reaper = vxi->vx_reaper;
+out:
+	vxdprintk(VXD_CBIT(xid, 7),
+		"vx_child_reaper(%p[#%u,%u]) = %p[#%u,%u]",
+		p, p->xid, p->pid, reaper, reaper->xid, reaper->pid);
+	return reaper;
+}
+
+#endif
+
+
+#else
+#warning duplicate inclusion
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/linux/vs_sched.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_sched.h
--- linux-2.6.32.17/include/linux/vs_sched.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_sched.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,110 @@
+#ifndef _VS_SCHED_H
+#define _VS_SCHED_H
+
+#include "vserver/base.h"
+#include "vserver/context.h"
+#include "vserver/sched.h"
+
+
+#define VAVAVOOM_RATIO		 50
+
+#define MAX_PRIO_BIAS		 20
+#define MIN_PRIO_BIAS		-20
+
+
+#ifdef CONFIG_VSERVER_HARDCPU
+
+/*
+ * effective_prio - return the priority that is based on the static
+ * priority but is modified by bonuses/penalties.
+ *
+ * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
+ * into a -4 ... 0 ... +4 bonus/penalty range.
+ *
+ * Additionally, we scale another amount based on the number of
+ * CPU tokens currently held by the context, if the process is
+ * part of a context (and the appropriate SCHED flag is set).
+ * This ranges from -5 ... 0 ... +15, quadratically.
+ *
+ * So, the total bonus is -9 .. 0 .. +19
+ * We use ~50% of the full 0...39 priority range so that:
+ *
+ * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
+ * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
+ *    unless that context is far exceeding its CPU allocation.
+ *
+ * Both properties are important to certain workloads.
+ */
+static inline
+int vx_effective_vavavoom(struct _vx_sched_pc *sched_pc, int max_prio)
+{
+	int vavavoom, max;
+
+	/* lots of tokens = lots of vavavoom
+	 *      no tokens = no vavavoom      */
+	if ((vavavoom = sched_pc->tokens) >= 0) {
+		max = sched_pc->tokens_max;
+		vavavoom = max - vavavoom;
+		max = max * max;
+		vavavoom = max_prio * VAVAVOOM_RATIO / 100
+			* (vavavoom*vavavoom - (max >> 2)) / max;
+		return vavavoom;
+	}
+	return 0;
+}
+
+
+static inline
+int vx_adjust_prio(struct task_struct *p, int prio, int max_user)
+{
+	struct vx_info *vxi = p->vx_info;
+	struct _vx_sched_pc *sched_pc;
+
+	if (!vxi)
+		return prio;
+
+	sched_pc = &vx_cpu(vxi, sched_pc);
+	if (vx_info_flags(vxi, VXF_SCHED_PRIO, 0)) {
+		int vavavoom = vx_effective_vavavoom(sched_pc, max_user);
+
+		sched_pc->vavavoom = vavavoom;
+		prio += vavavoom;
+	}
+	prio += sched_pc->prio_bias;
+	return prio;
+}
+
+#else /* !CONFIG_VSERVER_HARDCPU */
+
+static inline
+int vx_adjust_prio(struct task_struct *p, int prio, int max_user)
+{
+	struct vx_info *vxi = p->vx_info;
+
+	if (vxi)
+		prio += vx_cpu(vxi, sched_pc).prio_bias;
+	return prio;
+}
+
+#endif /* CONFIG_VSERVER_HARDCPU */
+
+
+static inline void vx_account_user(struct vx_info *vxi,
+	cputime_t cputime, int nice)
+{
+	if (!vxi)
+		return;
+	vx_cpu(vxi, sched_pc).user_ticks += cputime;
+}
+
+static inline void vx_account_system(struct vx_info *vxi,
+	cputime_t cputime, int idle)
+{
+	if (!vxi)
+		return;
+	vx_cpu(vxi, sched_pc).sys_ticks += cputime;
+}
+
+#else
+#warning duplicate inclusion
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/linux/vs_socket.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_socket.h
--- linux-2.6.32.17/include/linux/vs_socket.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_socket.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,67 @@
+#ifndef _VS_SOCKET_H
+#define _VS_SOCKET_H
+
+#include "vserver/debug.h"
+#include "vserver/base.h"
+#include "vserver/cacct.h"
+#include "vserver/context.h"
+#include "vserver/tag.h"
+
+
+/* socket accounting */
+
+#include <linux/socket.h>
+
+static inline int vx_sock_type(int family)
+{
+	switch (family) {
+	case PF_UNSPEC:
+		return VXA_SOCK_UNSPEC;
+	case PF_UNIX:
+		return VXA_SOCK_UNIX;
+	case PF_INET:
+		return VXA_SOCK_INET;
+	case PF_INET6:
+		return VXA_SOCK_INET6;
+	case PF_PACKET:
+		return VXA_SOCK_PACKET;
+	default:
+		return VXA_SOCK_OTHER;
+	}
+}
+
+#define vx_acc_sock(v, f, p, s) \
+	__vx_acc_sock(v, f, p, s, __FILE__, __LINE__)
+
+static inline void __vx_acc_sock(struct vx_info *vxi,
+	int family, int pos, int size, char *file, int line)
+{
+	if (vxi) {
+		int type = vx_sock_type(family);
+
+		atomic_long_inc(&vxi->cacct.sock[type][pos].count);
+		atomic_long_add(size, &vxi->cacct.sock[type][pos].total);
+	}
+}
+
+#define vx_sock_recv(sk, s) \
+	vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 0, s)
+#define vx_sock_send(sk, s) \
+	vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 1, s)
+#define vx_sock_fail(sk, s) \
+	vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 2, s)
+
+
+#define sock_vx_init(s) do {		\
+	(s)->sk_xid = 0;		\
+	(s)->sk_vx_info = NULL;		\
+	} while (0)
+
+#define sock_nx_init(s) do {		\
+	(s)->sk_nid = 0;		\
+	(s)->sk_nx_info = NULL;		\
+	} while (0)
+
+#else
+#warning duplicate inclusion
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/linux/vs_tag.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_tag.h
--- linux-2.6.32.17/include/linux/vs_tag.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_tag.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,47 @@
+#ifndef _VS_TAG_H
+#define _VS_TAG_H
+
+#include <linux/vserver/tag.h>
+
+/* check conditions */
+
+#define DX_ADMIN	0x0001
+#define DX_WATCH	0x0002
+#define DX_HOSTID	0x0008
+
+#define DX_IDENT	0x0010
+
+#define DX_ARG_MASK	0x0010
+
+
+#define dx_task_tag(t)	((t)->tag)
+
+#define dx_current_tag() dx_task_tag(current)
+
+#define dx_check(c, m)	__dx_check(dx_current_tag(), c, m)
+
+#define dx_weak_check(c, m)	((m) ? dx_check(c, m) : 1)
+
+
+/*
+ * check current context for ADMIN/WATCH and
+ * optionally against supplied argument
+ */
+static inline int __dx_check(tag_t cid, tag_t id, unsigned int mode)
+{
+	if (mode & DX_ARG_MASK) {
+		if ((mode & DX_IDENT) && (id == cid))
+			return 1;
+	}
+	return (((mode & DX_ADMIN) && (cid == 0)) ||
+		((mode & DX_WATCH) && (cid == 1)) ||
+		((mode & DX_HOSTID) && (id == 0)));
+}
+
+struct inode;
+int dx_permission(const struct inode *inode, int mask);
+
+
+#else
+#warning duplicate inclusion
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/linux/vs_time.h linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_time.h
--- linux-2.6.32.17/include/linux/vs_time.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/linux/vs_time.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,19 @@
+#ifndef _VS_TIME_H
+#define _VS_TIME_H
+
+
+/* time faking stuff */
+
+#ifdef CONFIG_VSERVER_VTIME
+
+extern void vx_gettimeofday(struct timeval *tv);
+extern int vx_settimeofday(struct timespec *ts);
+
+#else
+#define	vx_gettimeofday(t)	do_gettimeofday(t)
+#define	vx_settimeofday(t)	do_settimeofday(t)
+#endif
+
+#else
+#warning duplicate inclusion
+#endif
diff -NurpP --minimal linux-2.6.32.17/include/net/addrconf.h linux-2.6.32.17-vs2.3.0.36.29.4/include/net/addrconf.h
--- linux-2.6.32.17/include/net/addrconf.h	2009-12-03 20:02:57.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/net/addrconf.h	2009-12-03 20:04:56.000000000 +0100
@@ -84,7 +84,8 @@ extern int			ipv6_dev_get_saddr(struct n
 					       struct net_device *dev,
 					       const struct in6_addr *daddr,
 					       unsigned int srcprefs,
-					       struct in6_addr *saddr);
+					       struct in6_addr *saddr,
+					       struct nx_info *nxi);
 extern int			ipv6_get_lladdr(struct net_device *dev,
 						struct in6_addr *addr,
 						unsigned char banned_flags);
diff -NurpP --minimal linux-2.6.32.17/include/net/af_unix.h linux-2.6.32.17-vs2.3.0.36.29.4/include/net/af_unix.h
--- linux-2.6.32.17/include/net/af_unix.h	2008-12-25 00:26:37.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/net/af_unix.h	2009-12-03 20:04:56.000000000 +0100
@@ -4,6 +4,7 @@
 #include <linux/socket.h>
 #include <linux/un.h>
 #include <linux/mutex.h>
+#include <linux/vs_base.h>
 #include <net/sock.h>
 
 extern void unix_inflight(struct file *fp);
diff -NurpP --minimal linux-2.6.32.17/include/net/inet_timewait_sock.h linux-2.6.32.17-vs2.3.0.36.29.4/include/net/inet_timewait_sock.h
--- linux-2.6.32.17/include/net/inet_timewait_sock.h	2009-12-03 20:02:57.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/net/inet_timewait_sock.h	2009-12-03 20:04:56.000000000 +0100
@@ -117,6 +117,10 @@ struct inet_timewait_sock {
 #define tw_hash			__tw_common.skc_hash
 #define tw_prot			__tw_common.skc_prot
 #define tw_net			__tw_common.skc_net
+#define tw_xid			__tw_common.skc_xid
+#define tw_vx_info		__tw_common.skc_vx_info
+#define tw_nid			__tw_common.skc_nid
+#define tw_nx_info		__tw_common.skc_nx_info
 	int			tw_timeout;
 	volatile unsigned char	tw_substate;
 	/* 3 bits hole, try to pack */
diff -NurpP --minimal linux-2.6.32.17/include/net/route.h linux-2.6.32.17-vs2.3.0.36.29.4/include/net/route.h
--- linux-2.6.32.17/include/net/route.h	2009-09-10 15:26:27.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/net/route.h	2009-12-03 20:04:56.000000000 +0100
@@ -135,6 +135,9 @@ static inline void ip_rt_put(struct rtab
 		dst_release(&rt->u.dst);
 }
 
+#include <linux/vs_base.h>
+#include <linux/vs_inet.h>
+
 #define IPTOS_RT_MASK	(IPTOS_TOS_MASK & ~3)
 
 extern const __u8 ip_tos2prio[16];
@@ -144,6 +147,9 @@ static inline char rt_tos2priority(u8 to
 	return ip_tos2prio[IPTOS_TOS(tos)>>1];
 }
 
+extern int ip_v4_find_src(struct net *net, struct nx_info *,
+	struct rtable **, struct flowi *);
+
 static inline int ip_route_connect(struct rtable **rp, __be32 dst,
 				   __be32 src, u32 tos, int oif, u8 protocol,
 				   __be16 sport, __be16 dport, struct sock *sk,
@@ -161,11 +167,24 @@ static inline int ip_route_connect(struc
 
 	int err;
 	struct net *net = sock_net(sk);
+	struct nx_info *nx_info = current_nx_info();
 
 	if (inet_sk(sk)->transparent)
 		fl.flags |= FLOWI_FLAG_ANYSRC;
 
-	if (!dst || !src) {
+	if (sk)
+		nx_info = sk->sk_nx_info;
+
+	vxdprintk(VXD_CBIT(net, 4),
+		"ip_route_connect(%p) %p,%p;%lx",
+		sk, nx_info, sk->sk_socket,
+		(sk->sk_socket?sk->sk_socket->flags:0));
+
+	err = ip_v4_find_src(net, nx_info, rp, &fl);
+	if (err)
+		return err;
+
+	if (!fl.fl4_dst || !fl.fl4_src) {
 		err = __ip_route_output_key(net, rp, &fl);
 		if (err)
 			return err;
diff -NurpP --minimal linux-2.6.32.17/include/net/sock.h linux-2.6.32.17-vs2.3.0.36.29.4/include/net/sock.h
--- linux-2.6.32.17/include/net/sock.h	2009-12-03 20:02:57.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/include/net/sock.h	2009-12-03 20:04:56.000000000 +0100
@@ -139,6 +139,10 @@ struct sock_common {
 #ifdef CONFIG_NET_NS
 	struct net	 	*skc_net;
 #endif
+	xid_t			skc_xid;
+	struct vx_info		*skc_vx_info;
+	nid_t			skc_nid;
+	struct nx_info		*skc_nx_info;
 };
 
 /**
@@ -225,6 +229,10 @@ struct sock {
 #define sk_bind_node		__sk_common.skc_bind_node
 #define sk_prot			__sk_common.skc_prot
 #define sk_net			__sk_common.skc_net
+#define sk_xid			__sk_common.skc_xid
+#define sk_vx_info		__sk_common.skc_vx_info
+#define sk_nid			__sk_common.skc_nid
+#define sk_nx_info		__sk_common.skc_nx_info
 	kmemcheck_bitfield_begin(flags);
 	unsigned int		sk_shutdown  : 2,
 				sk_no_check  : 2,
diff -NurpP --minimal linux-2.6.32.17/init/Kconfig linux-2.6.32.17-vs2.3.0.36.29.4/init/Kconfig
--- linux-2.6.32.17/init/Kconfig	2009-12-03 20:02:57.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/init/Kconfig	2009-12-03 20:04:56.000000000 +0100
@@ -477,6 +477,19 @@ config CGROUP_SCHED
 
 endchoice
 
+config CFS_HARD_LIMITS
+	bool "Hard Limits for CFS Group Scheduler"
+	depends on EXPERIMENTAL
+	depends on FAIR_GROUP_SCHED && CGROUP_SCHED
+	default n
+	help
+	  This option enables hard limiting of CPU time obtained by
+	  a fair task group. Use this if you want to throttle a group of tasks
+	  based on its CPU usage. For more details refer to
+	  Documentation/scheduler/sched-cfs-hard-limits.txt
+
+	  Say N if unsure.
+
 menuconfig CGROUPS
 	boolean "Control Group support"
 	help
diff -NurpP --minimal linux-2.6.32.17/init/main.c linux-2.6.32.17-vs2.3.0.36.29.4/init/main.c
--- linux-2.6.32.17/init/main.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/init/main.c	2010-04-05 20:56:22.000000000 +0200
@@ -70,6 +70,7 @@
 #include <linux/sfi.h>
 #include <linux/shmem_fs.h>
 #include <trace/boot.h>
+#include <linux/vserver/percpu.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
diff -NurpP --minimal linux-2.6.32.17/ipc/mqueue.c linux-2.6.32.17-vs2.3.0.36.29.4/ipc/mqueue.c
--- linux-2.6.32.17/ipc/mqueue.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/ipc/mqueue.c	2010-04-05 20:56:22.000000000 +0200
@@ -33,6 +33,8 @@
 #include <linux/pid.h>
 #include <linux/ipc_namespace.h>
 #include <linux/ima.h>
+#include <linux/vs_context.h>
+#include <linux/vs_limit.h>
 
 #include <net/sock.h>
 #include "util.h"
@@ -66,6 +68,7 @@ struct mqueue_inode_info {
 	struct sigevent notify;
 	struct pid* notify_owner;
 	struct user_struct *user;	/* user who created, for accounting */
+	struct vx_info *vxi;
 	struct sock *notify_sock;
 	struct sk_buff *notify_cookie;
 
@@ -125,6 +128,7 @@ static struct inode *mqueue_get_inode(st
 		if (S_ISREG(mode)) {
 			struct mqueue_inode_info *info;
 			struct task_struct *p = current;
+			struct vx_info *vxi = p->vx_info;
 			unsigned long mq_bytes, mq_msg_tblsz;
 
 			inode->i_fop = &mqueue_file_operations;
@@ -139,6 +143,7 @@ static struct inode *mqueue_get_inode(st
 			info->notify_owner = NULL;
 			info->qsize = 0;
 			info->user = NULL;	/* set when all is ok */
+			info->vxi = NULL;
 			memset(&info->attr, 0, sizeof(info->attr));
 			info->attr.mq_maxmsg = ipc_ns->mq_msg_max;
 			info->attr.mq_msgsize = ipc_ns->mq_msgsize_max;
@@ -153,22 +158,26 @@ static struct inode *mqueue_get_inode(st
 			spin_lock(&mq_lock);
 			if (u->mq_bytes + mq_bytes < u->mq_bytes ||
 		 	    u->mq_bytes + mq_bytes >
-			    p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) {
+			    p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur ||
+			    !vx_ipcmsg_avail(vxi, mq_bytes)) {
 				spin_unlock(&mq_lock);
 				goto out_inode;
 			}
 			u->mq_bytes += mq_bytes;
+			vx_ipcmsg_add(vxi, u, mq_bytes);
 			spin_unlock(&mq_lock);
 
 			info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
 			if (!info->messages) {
 				spin_lock(&mq_lock);
 				u->mq_bytes -= mq_bytes;
+				vx_ipcmsg_sub(vxi, u, mq_bytes);
 				spin_unlock(&mq_lock);
 				goto out_inode;
 			}
 			/* all is ok */
 			info->user = get_uid(u);
+			info->vxi = get_vx_info(vxi);
 		} else if (S_ISDIR(mode)) {
 			inc_nlink(inode);
 			/* Some things misbehave if size == 0 on a directory */
@@ -269,8 +278,11 @@ static void mqueue_delete_inode(struct i
 		   (info->attr.mq_maxmsg * info->attr.mq_msgsize));
 	user = info->user;
 	if (user) {
+		struct vx_info *vxi = info->vxi;
+
 		spin_lock(&mq_lock);
 		user->mq_bytes -= mq_bytes;
+		vx_ipcmsg_sub(vxi, user, mq_bytes);
 		/*
 		 * get_ns_from_inode() ensures that the
 		 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
@@ -280,6 +292,7 @@ static void mqueue_delete_inode(struct i
 		if (ipc_ns)
 			ipc_ns->mq_queues_count--;
 		spin_unlock(&mq_lock);
+		put_vx_info(vxi);
 		free_uid(user);
 	}
 	if (ipc_ns)
diff -NurpP --minimal linux-2.6.32.17/ipc/msg.c linux-2.6.32.17-vs2.3.0.36.29.4/ipc/msg.c
--- linux-2.6.32.17/ipc/msg.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/ipc/msg.c	2010-02-12 10:59:55.000000000 +0100
@@ -38,6 +38,7 @@
 #include <linux/rwsem.h>
 #include <linux/nsproxy.h>
 #include <linux/ipc_namespace.h>
+#include <linux/vs_base.h>
 
 #include <asm/current.h>
 #include <asm/uaccess.h>
@@ -191,6 +192,7 @@ static int newque(struct ipc_namespace *
 
 	msq->q_perm.mode = msgflg & S_IRWXUGO;
 	msq->q_perm.key = key;
+	msq->q_perm.xid = vx_current_xid();
 
 	msq->q_perm.security = NULL;
 	retval = security_msg_queue_alloc(msq);
diff -NurpP --minimal linux-2.6.32.17/ipc/namespace.c linux-2.6.32.17-vs2.3.0.36.29.4/ipc/namespace.c
--- linux-2.6.32.17/ipc/namespace.c	2009-09-10 15:26:27.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/ipc/namespace.c	2009-12-03 20:04:56.000000000 +0100
@@ -11,6 +11,8 @@
 #include <linux/slab.h>
 #include <linux/fs.h>
 #include <linux/mount.h>
+#include <linux/vs_base.h>
+#include <linux/vserver/global.h>
 
 #include "util.h"
 
diff -NurpP --minimal linux-2.6.32.17/ipc/sem.c linux-2.6.32.17-vs2.3.0.36.29.4/ipc/sem.c
--- linux-2.6.32.17/ipc/sem.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/ipc/sem.c	2010-02-12 10:59:55.000000000 +0100
@@ -83,6 +83,8 @@
 #include <linux/rwsem.h>
 #include <linux/nsproxy.h>
 #include <linux/ipc_namespace.h>
+#include <linux/vs_base.h>
+#include <linux/vs_limit.h>
 
 #include <asm/uaccess.h>
 #include "util.h"
@@ -256,6 +258,7 @@ static int newary(struct ipc_namespace *
 
 	sma->sem_perm.mode = (semflg & S_IRWXUGO);
 	sma->sem_perm.key = key;
+	sma->sem_perm.xid = vx_current_xid();
 
 	sma->sem_perm.security = NULL;
 	retval = security_sem_alloc(sma);
@@ -271,6 +274,9 @@ static int newary(struct ipc_namespace *
 		return id;
 	}
 	ns->used_sems += nsems;
+	/* FIXME: obsoleted? */
+	vx_semary_inc(sma);
+	vx_nsems_add(sma, nsems);
 
 	sma->sem_base = (struct sem *) &sma[1];
 	INIT_LIST_HEAD(&sma->sem_pending);
@@ -547,6 +553,9 @@ static void freeary(struct ipc_namespace
 	sem_unlock(sma);
 
 	ns->used_sems -= sma->sem_nsems;
+	/* FIXME: obsoleted? */
+	vx_nsems_sub(sma, sma->sem_nsems);
+	vx_semary_dec(sma);
 	security_sem_free(sma);
 	ipc_rcu_putref(sma);
 }
diff -NurpP --minimal linux-2.6.32.17/ipc/shm.c linux-2.6.32.17-vs2.3.0.36.29.4/ipc/shm.c
--- linux-2.6.32.17/ipc/shm.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/ipc/shm.c	2010-02-12 10:59:55.000000000 +0100
@@ -40,6 +40,8 @@
 #include <linux/mount.h>
 #include <linux/ipc_namespace.h>
 #include <linux/ima.h>
+#include <linux/vs_context.h>
+#include <linux/vs_limit.h>
 
 #include <asm/uaccess.h>
 
@@ -170,7 +172,12 @@ static void shm_open(struct vm_area_stru
  */
 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 {
-	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	struct vx_info *vxi = lookup_vx_info(shp->shm_perm.xid);
+	int numpages = (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+	vx_ipcshm_sub(vxi, shp, numpages);
+	ns->shm_tot -= numpages;
+
 	shm_rmid(ns, shp);
 	shm_unlock(shp);
 	if (!is_file_hugepages(shp->shm_file))
@@ -180,6 +187,7 @@ static void shm_destroy(struct ipc_names
 						shp->mlock_user);
 	fput (shp->shm_file);
 	security_shm_free(shp);
+	put_vx_info(vxi);
 	ipc_rcu_putref(shp);
 }
 
@@ -350,11 +358,15 @@ static int newseg(struct ipc_namespace *
 	if (ns->shm_tot + numpages > ns->shm_ctlall)
 		return -ENOSPC;
 
+	if (!vx_ipcshm_avail(current_vx_info(), numpages))
+		return -ENOSPC;
+
 	shp = ipc_rcu_alloc(sizeof(*shp));
 	if (!shp)
 		return -ENOMEM;
 
 	shp->shm_perm.key = key;
+	shp->shm_perm.xid = vx_current_xid();
 	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
 	shp->mlock_user = NULL;
 
@@ -408,6 +420,7 @@ static int newseg(struct ipc_namespace *
 	ns->shm_tot += numpages;
 	error = shp->shm_perm.id;
 	shm_unlock(shp);
+	vx_ipcshm_add(current_vx_info(), key, numpages);
 	return error;
 
 no_id:
diff -NurpP --minimal linux-2.6.32.17/kernel/capability.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/capability.c
--- linux-2.6.32.17/kernel/capability.c	2009-03-24 14:22:44.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/capability.c	2009-12-03 20:04:56.000000000 +0100
@@ -14,6 +14,7 @@
 #include <linux/security.h>
 #include <linux/syscalls.h>
 #include <linux/pid_namespace.h>
+#include <linux/vs_context.h>
 #include <asm/uaccess.h>
 #include "cred-internals.h"
 
@@ -122,6 +123,7 @@ static int cap_validate_magic(cap_user_h
 	return 0;
 }
 
+
 /*
  * The only thing that can change the capabilities of the current
  * process is the current process. As such, we can't be in this code
@@ -289,6 +291,8 @@ error:
 	return ret;
 }
 
+#include <linux/vserver/base.h>
+
 /**
  * capable - Determine if the current task has a superior capability in effect
  * @cap: The capability to be tested for
@@ -301,6 +305,9 @@ error:
  */
 int capable(int cap)
 {
+	/* here for now so we don't require task locking */
+	if (vs_check_bit(VXC_CAP_MASK, cap) && !vx_mcaps(1L << cap))
+		return 0;
 	if (unlikely(!cap_valid(cap))) {
 		printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
 		BUG();
diff -NurpP --minimal linux-2.6.32.17/kernel/compat.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/compat.c
--- linux-2.6.32.17/kernel/compat.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/compat.c	2010-07-08 02:22:31.000000000 +0200
@@ -899,7 +899,7 @@ asmlinkage long compat_sys_time(compat_t
 	compat_time_t i;
 	struct timeval tv;
 
-	do_gettimeofday(&tv);
+	vx_gettimeofday(&tv);
 	i = tv.tv_sec;
 
 	if (tloc) {
@@ -924,7 +924,7 @@ asmlinkage long compat_sys_stime(compat_
 	if (err)
 		return err;
 
-	do_settimeofday(&tv);
+	vx_settimeofday(&tv);
 	return 0;
 }
 
diff -NurpP --minimal linux-2.6.32.17/kernel/exit.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/exit.c
--- linux-2.6.32.17/kernel/exit.c	2009-12-03 20:02:57.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/exit.c	2009-12-03 20:04:56.000000000 +0100
@@ -48,6 +48,10 @@
 #include <linux/fs_struct.h>
 #include <linux/init_task.h>
 #include <linux/perf_event.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vs_pid.h>
 #include <trace/events/sched.h>
 
 #include <asm/uaccess.h>
@@ -488,9 +492,11 @@ static void close_files(struct files_str
 					filp_close(file, files);
 					cond_resched();
 				}
+				vx_openfd_dec(i);
 			}
 			i++;
 			set >>= 1;
+			cond_resched();
 		}
 	}
 }
@@ -1011,11 +1017,16 @@ NORET_TYPE void do_exit(long code)
 
 	validate_creds_for_do_exit(tsk);
 
+	/* needs to stay after exit_notify() */
+	exit_vx_info(tsk, code);
+	exit_nx_info(tsk);
+
 	preempt_disable();
 	exit_rcu();
 	/* causes final put_task_struct in finish_task_switch(). */
 	tsk->state = TASK_DEAD;
 	schedule();
+	printk("bad task: %p [%lx]\n", current, current->state);
 	BUG();
 	/* Avoid "noreturn function does return".  */
 	for (;;)
diff -NurpP --minimal linux-2.6.32.17/kernel/fork.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/fork.c
--- linux-2.6.32.17/kernel/fork.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/fork.c	2010-05-27 19:21:36.000000000 +0200
@@ -64,6 +64,10 @@
 #include <linux/magic.h>
 #include <linux/perf_event.h>
 #include <linux/posix-timers.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_memory.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -151,6 +155,8 @@ void free_task(struct task_struct *tsk)
 	account_kernel_stack(tsk->stack, -1);
 	free_thread_info(tsk->stack);
 	rt_mutex_debug_task_free(tsk);
+	clr_vx_info(&tsk->vx_info);
+	clr_nx_info(&tsk->nx_info);
 	ftrace_graph_exit_task(tsk);
 	free_task_struct(tsk);
 }
@@ -296,6 +302,8 @@ static int dup_mmap(struct mm_struct *mm
 	mm->free_area_cache = oldmm->mmap_base;
 	mm->cached_hole_size = ~0UL;
 	mm->map_count = 0;
+	__set_mm_counter(mm, file_rss, 0);
+	__set_mm_counter(mm, anon_rss, 0);
 	cpumask_clear(mm_cpumask(mm));
 	mm->mm_rb = RB_ROOT;
 	rb_link = &mm->mm_rb.rb_node;
@@ -310,7 +318,7 @@ static int dup_mmap(struct mm_struct *mm
 
 		if (mpnt->vm_flags & VM_DONTCOPY) {
 			long pages = vma_pages(mpnt);
-			mm->total_vm -= pages;
+			vx_vmpages_sub(mm, pages);
 			vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
 								-pages);
 			continue;
@@ -452,8 +460,8 @@ static struct mm_struct * mm_init(struct
 		(current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
 	mm->core_state = NULL;
 	mm->nr_ptes = 0;
-	set_mm_counter(mm, file_rss, 0);
-	set_mm_counter(mm, anon_rss, 0);
+	__set_mm_counter(mm, file_rss, 0);
+	__set_mm_counter(mm, anon_rss, 0);
 	spin_lock_init(&mm->page_table_lock);
 	mm->free_area_cache = TASK_UNMAPPED_BASE;
 	mm->cached_hole_size = ~0UL;
@@ -463,6 +471,7 @@ static struct mm_struct * mm_init(struct
 	if (likely(!mm_alloc_pgd(mm))) {
 		mm->def_flags = 0;
 		mmu_notifier_mm_init(mm);
+		set_vx_info(&mm->mm_vx_info, p->vx_info);
 		return mm;
 	}
 
@@ -496,6 +505,7 @@ void __mmdrop(struct mm_struct *mm)
 	mm_free_pgd(mm);
 	destroy_context(mm);
 	mmu_notifier_mm_destroy(mm);
+	clr_vx_info(&mm->mm_vx_info);
 	free_mm(mm);
 }
 EXPORT_SYMBOL_GPL(__mmdrop);
@@ -631,6 +641,7 @@ struct mm_struct *dup_mm(struct task_str
 		goto fail_nomem;
 
 	memcpy(mm, oldmm, sizeof(*mm));
+	mm->mm_vx_info = NULL;
 
 	/* Initializing for Swap token stuff */
 	mm->token_priority = 0;
@@ -669,6 +680,7 @@ fail_nocontext:
 	 * If init_new_context() failed, we cannot use mmput() to free the mm
 	 * because it calls destroy_context()
 	 */
+	clr_vx_info(&mm->mm_vx_info);
 	mm_free_pgd(mm);
 	free_mm(mm);
 	return NULL;
@@ -980,6 +992,8 @@ static struct task_struct *copy_process(
 	int retval;
 	struct task_struct *p;
 	int cgroup_callbacks_done = 0;
+	struct vx_info *vxi;
+	struct nx_info *nxi;
 
 	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
 		return ERR_PTR(-EINVAL);
@@ -1026,12 +1040,28 @@ static struct task_struct *copy_process(
 	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
 	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
 #endif
+	init_vx_info(&p->vx_info, current_vx_info());
+	init_nx_info(&p->nx_info, current_nx_info());
+
+	/* check vserver memory */
+	if (p->mm && !(clone_flags & CLONE_VM)) {
+		if (vx_vmpages_avail(p->mm, p->mm->total_vm))
+			vx_pages_add(p->vx_info, RLIMIT_AS, p->mm->total_vm);
+		else
+			goto bad_fork_free;
+	}
+	if (p->mm && vx_flags(VXF_FORK_RSS, 0)) {
+		if (!vx_rss_avail(p->mm, get_mm_counter(p->mm, file_rss)))
+			goto bad_fork_cleanup_vm;
+	}
 	retval = -EAGAIN;
+	if (!vx_nproc_avail(1))
+		goto bad_fork_cleanup_vm;
 	if (atomic_read(&p->real_cred->user->processes) >=
 			p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
 		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
 		    p->real_cred->user != INIT_USER)
-			goto bad_fork_free;
+			goto bad_fork_cleanup_vm;
 	}
 
 	retval = copy_creds(p, clone_flags);
@@ -1298,6 +1328,18 @@ static struct task_struct *copy_process(
 
 	total_forks++;
 	spin_unlock(&current->sighand->siglock);
+
+	/* p is copy of current */
+	vxi = p->vx_info;
+	if (vxi) {
+		claim_vx_info(vxi, p);
+		atomic_inc(&vxi->cvirt.nr_threads);
+		atomic_inc(&vxi->cvirt.total_forks);
+		vx_nproc_inc(p);
+	}
+	nxi = p->nx_info;
+	if (nxi)
+		claim_nx_info(nxi, p);
 	write_unlock_irq(&tasklist_lock);
 	proc_fork_connector(p);
 	cgroup_post_fork(p);
@@ -1339,6 +1381,9 @@ bad_fork_cleanup_cgroup:
 bad_fork_cleanup_count:
 	atomic_dec(&p->cred->user->processes);
 	exit_creds(p);
+bad_fork_cleanup_vm:
+	if (p->mm && !(clone_flags & CLONE_VM))
+		vx_pages_sub(p->vx_info, RLIMIT_AS, p->mm->total_vm);
 bad_fork_free:
 	free_task(p);
 fork_out:
diff -NurpP --minimal linux-2.6.32.17/kernel/kthread.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/kthread.c
--- linux-2.6.32.17/kernel/kthread.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/kthread.c	2010-04-05 20:56:22.000000000 +0200
@@ -14,6 +14,7 @@
 #include <linux/file.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/vs_pid.h>
 #include <trace/events/sched.h>
 
 static DEFINE_SPINLOCK(kthread_create_lock);
diff -NurpP --minimal linux-2.6.32.17/kernel/Makefile linux-2.6.32.17-vs2.3.0.36.29.4/kernel/Makefile
--- linux-2.6.32.17/kernel/Makefile	2009-12-03 20:02:57.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/Makefile	2009-12-03 20:04:56.000000000 +0100
@@ -23,6 +23,7 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg
 CFLAGS_REMOVE_sched_clock.o = -pg
 endif
 
+obj-y += vserver/
 obj-$(CONFIG_FREEZER) += freezer.o
 obj-$(CONFIG_PROFILING) += profile.o
 obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
diff -NurpP --minimal linux-2.6.32.17/kernel/nsproxy.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/nsproxy.c
--- linux-2.6.32.17/kernel/nsproxy.c	2009-09-10 15:26:28.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/nsproxy.c	2009-12-03 20:04:56.000000000 +0100
@@ -19,6 +19,8 @@
 #include <linux/mnt_namespace.h>
 #include <linux/utsname.h>
 #include <linux/pid_namespace.h>
+#include <linux/vserver/global.h>
+#include <linux/vserver/debug.h>
 #include <net/net_namespace.h>
 #include <linux/ipc_namespace.h>
 
@@ -31,8 +33,11 @@ static inline struct nsproxy *create_nsp
 	struct nsproxy *nsproxy;
 
 	nsproxy = kmem_cache_alloc(nsproxy_cachep, GFP_KERNEL);
-	if (nsproxy)
+	if (nsproxy) {
 		atomic_set(&nsproxy->count, 1);
+		atomic_inc(&vs_global_nsproxy);
+	}
+	vxdprintk(VXD_CBIT(space, 2), "create_nsproxy = %p[1]", nsproxy);
 	return nsproxy;
 }
 
@@ -41,41 +46,52 @@ static inline struct nsproxy *create_nsp
  * Return the newly created nsproxy.  Do not attach this to the task,
  * leave it to the caller to do proper locking and attach it to task.
  */
-static struct nsproxy *create_new_namespaces(unsigned long flags,
-			struct task_struct *tsk, struct fs_struct *new_fs)
+static struct nsproxy *unshare_namespaces(unsigned long flags,
+			struct nsproxy *orig, struct fs_struct *new_fs)
 {
 	struct nsproxy *new_nsp;
 	int err;
 
+	vxdprintk(VXD_CBIT(space, 4),
+		"unshare_namespaces(0x%08lx,%p,%p)",
+		flags, orig, new_fs);
+
 	new_nsp = create_nsproxy();
 	if (!new_nsp)
 		return ERR_PTR(-ENOMEM);
 
-	new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, new_fs);
+	new_nsp->mnt_ns = copy_mnt_ns(flags, orig->mnt_ns, new_fs);
 	if (IS_ERR(new_nsp->mnt_ns)) {
 		err = PTR_ERR(new_nsp->mnt_ns);
 		goto out_ns;
 	}
 
-	new_nsp->uts_ns = copy_utsname(flags, tsk->nsproxy->uts_ns);
+	new_nsp->uts_ns = copy_utsname(flags, orig->uts_ns);
 	if (IS_ERR(new_nsp->uts_ns)) {
 		err = PTR_ERR(new_nsp->uts_ns);
 		goto out_uts;
 	}
 
-	new_nsp->ipc_ns = copy_ipcs(flags, tsk->nsproxy->ipc_ns);
+	new_nsp->ipc_ns = copy_ipcs(flags, orig->ipc_ns);
 	if (IS_ERR(new_nsp->ipc_ns)) {
 		err = PTR_ERR(new_nsp->ipc_ns);
 		goto out_ipc;
 	}
 
-	new_nsp->pid_ns = copy_pid_ns(flags, task_active_pid_ns(tsk));
+	new_nsp->pid_ns = copy_pid_ns(flags, orig->pid_ns);
 	if (IS_ERR(new_nsp->pid_ns)) {
 		err = PTR_ERR(new_nsp->pid_ns);
 		goto out_pid;
 	}
 
-	new_nsp->net_ns = copy_net_ns(flags, tsk->nsproxy->net_ns);
+	/* disabled now?
+	new_nsp->user_ns = copy_user_ns(flags, orig->user_ns);
+	if (IS_ERR(new_nsp->user_ns)) {
+		err = PTR_ERR(new_nsp->user_ns);
+		goto out_user;
+	} */
+
+	new_nsp->net_ns = copy_net_ns(flags, orig->net_ns);
 	if (IS_ERR(new_nsp->net_ns)) {
 		err = PTR_ERR(new_nsp->net_ns);
 		goto out_net;
@@ -100,6 +116,38 @@ out_ns:
 	return ERR_PTR(err);
 }
 
+static struct nsproxy *create_new_namespaces(int flags, struct task_struct *tsk,
+			struct fs_struct *new_fs)
+{
+	return unshare_namespaces(flags, tsk->nsproxy, new_fs);
+}
+
+/*
+ * copies the nsproxy, setting refcount to 1, and grabbing a
+ * reference to all contained namespaces.
+ */
+struct nsproxy *copy_nsproxy(struct nsproxy *orig)
+{
+	struct nsproxy *ns = create_nsproxy();
+
+	if (ns) {
+		memcpy(ns, orig, sizeof(struct nsproxy));
+		atomic_set(&ns->count, 1);
+
+		if (ns->mnt_ns)
+			get_mnt_ns(ns->mnt_ns);
+		if (ns->uts_ns)
+			get_uts_ns(ns->uts_ns);
+		if (ns->ipc_ns)
+			get_ipc_ns(ns->ipc_ns);
+		if (ns->pid_ns)
+			get_pid_ns(ns->pid_ns);
+		if (ns->net_ns)
+			get_net(ns->net_ns);
+	}
+	return ns;
+}
+
 /*
  * called from clone.  This now handles copy for nsproxy and all
  * namespaces therein.
@@ -107,9 +155,12 @@ out_ns:
 int copy_namespaces(unsigned long flags, struct task_struct *tsk)
 {
 	struct nsproxy *old_ns = tsk->nsproxy;
-	struct nsproxy *new_ns;
+	struct nsproxy *new_ns = NULL;
 	int err = 0;
 
+	vxdprintk(VXD_CBIT(space, 7), "copy_namespaces(0x%08lx,%p[%p])",
+		flags, tsk, old_ns);
+
 	if (!old_ns)
 		return 0;
 
@@ -119,7 +170,7 @@ int copy_namespaces(unsigned long flags,
 				CLONE_NEWPID | CLONE_NEWNET)))
 		return 0;
 
-	if (!capable(CAP_SYS_ADMIN)) {
+	if (!vx_can_unshare(CAP_SYS_ADMIN, flags)) {
 		err = -EPERM;
 		goto out;
 	}
@@ -146,6 +197,9 @@ int copy_namespaces(unsigned long flags,
 
 out:
 	put_nsproxy(old_ns);
+	vxdprintk(VXD_CBIT(space, 3),
+		"copy_namespaces(0x%08lx,%p[%p]) = %d [%p]",
+		flags, tsk, old_ns, err, new_ns);
 	return err;
 }
 
@@ -159,7 +213,9 @@ void free_nsproxy(struct nsproxy *ns)
 		put_ipc_ns(ns->ipc_ns);
 	if (ns->pid_ns)
 		put_pid_ns(ns->pid_ns);
-	put_net(ns->net_ns);
+	if (ns->net_ns)
+		put_net(ns->net_ns);
+	atomic_dec(&vs_global_nsproxy);
 	kmem_cache_free(nsproxy_cachep, ns);
 }
 
@@ -172,11 +228,15 @@ int unshare_nsproxy_namespaces(unsigned 
 {
 	int err = 0;
 
+	vxdprintk(VXD_CBIT(space, 4),
+		"unshare_nsproxy_namespaces(0x%08lx,[%p])",
+		unshare_flags, current->nsproxy);
+
 	if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
 			       CLONE_NEWNET)))
 		return 0;
 
-	if (!capable(CAP_SYS_ADMIN))
+	if (!vx_can_unshare(CAP_SYS_ADMIN, unshare_flags))
 		return -EPERM;
 
 	*new_nsp = create_new_namespaces(unshare_flags, current,
diff -NurpP --minimal linux-2.6.32.17/kernel/pid.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/pid.c
--- linux-2.6.32.17/kernel/pid.c	2009-12-03 20:02:58.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/pid.c	2009-12-03 20:04:56.000000000 +0100
@@ -36,6 +36,7 @@
 #include <linux/pid_namespace.h>
 #include <linux/init_task.h>
 #include <linux/syscalls.h>
+#include <linux/vs_pid.h>
 
 #define pid_hashfn(nr, ns)	\
 	hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
@@ -305,7 +306,7 @@ EXPORT_SYMBOL_GPL(find_pid_ns);
 
 struct pid *find_vpid(int nr)
 {
-	return find_pid_ns(nr, current->nsproxy->pid_ns);
+	return find_pid_ns(vx_rmap_pid(nr), current->nsproxy->pid_ns);
 }
 EXPORT_SYMBOL_GPL(find_vpid);
 
@@ -365,6 +366,9 @@ void transfer_pid(struct task_struct *ol
 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
 {
 	struct task_struct *result = NULL;
+
+	if (type == PIDTYPE_REALPID)
+		type = PIDTYPE_PID;
 	if (pid) {
 		struct hlist_node *first;
 		first = rcu_dereference(pid->tasks[type].first);
@@ -380,7 +384,7 @@ EXPORT_SYMBOL(pid_task);
  */
 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
 {
-	return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
+	return pid_task(find_pid_ns(vx_rmap_pid(nr), ns), PIDTYPE_PID);
 }
 
 struct task_struct *find_task_by_vpid(pid_t vnr)
@@ -422,7 +426,7 @@ struct pid *find_get_pid(pid_t nr)
 }
 EXPORT_SYMBOL_GPL(find_get_pid);
 
-pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
+pid_t pid_unmapped_nr_ns(struct pid *pid, struct pid_namespace *ns)
 {
 	struct upid *upid;
 	pid_t nr = 0;
@@ -435,6 +439,11 @@ pid_t pid_nr_ns(struct pid *pid, struct 
 	return nr;
 }
 
+pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
+{
+	return vx_map_pid(pid_unmapped_nr_ns(pid, ns));
+}
+
 pid_t pid_vnr(struct pid *pid)
 {
 	return pid_nr_ns(pid, current->nsproxy->pid_ns);
diff -NurpP --minimal linux-2.6.32.17/kernel/pid_namespace.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/pid_namespace.c
--- linux-2.6.32.17/kernel/pid_namespace.c	2009-12-03 20:02:58.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/pid_namespace.c	2009-12-03 20:04:56.000000000 +0100
@@ -13,6 +13,7 @@
 #include <linux/syscalls.h>
 #include <linux/err.h>
 #include <linux/acct.h>
+#include <linux/vserver/global.h>
 
 #define BITS_PER_PAGE		(PAGE_SIZE*8)
 
@@ -86,6 +87,7 @@ static struct pid_namespace *create_pid_
 		goto out_free_map;
 
 	kref_init(&ns->kref);
+	atomic_inc(&vs_global_pid_ns);
 	ns->level = level;
 	ns->parent = get_pid_ns(parent_pid_ns);
 
@@ -111,6 +113,7 @@ static void destroy_pid_namespace(struct
 
 	for (i = 0; i < PIDMAP_ENTRIES; i++)
 		kfree(ns->pidmap[i].page);
+	atomic_dec(&vs_global_pid_ns);
 	kmem_cache_free(pid_ns_cachep, ns);
 }
 
diff -NurpP --minimal linux-2.6.32.17/kernel/posix-timers.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/posix-timers.c
--- linux-2.6.32.17/kernel/posix-timers.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/posix-timers.c	2010-07-08 02:22:31.000000000 +0200
@@ -46,6 +46,7 @@
 #include <linux/wait.h>
 #include <linux/workqueue.h>
 #include <linux/module.h>
+#include <linux/vs_context.h>
 
 /*
  * Management arrays for POSIX timers.	 Timers are kept in slab memory
@@ -363,6 +364,7 @@ int posix_timer_event(struct k_itimer *t
 {
 	struct task_struct *task;
 	int shared, ret = -1;
+
 	/*
 	 * FIXME: if ->sigq is queued we can race with
 	 * dequeue_signal()->do_schedule_next_timer().
@@ -379,10 +381,18 @@ int posix_timer_event(struct k_itimer *t
 	rcu_read_lock();
 	task = pid_task(timr->it_pid, PIDTYPE_PID);
 	if (task) {
+		struct vx_info_save vxis;
+		struct vx_info *vxi;
+
+		vxi = get_vx_info(task->vx_info);
+		enter_vx_info(vxi, &vxis);
 		shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
 		ret = send_sigqueue(timr->sigq, task, shared);
+		leave_vx_info(&vxis);
+		put_vx_info(vxi);
 	}
 	rcu_read_unlock();
+
 	/* If we failed to send the signal the timer stops. */
 	return ret > 0;
 }
diff -NurpP --minimal linux-2.6.32.17/kernel/printk.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/printk.c
--- linux-2.6.32.17/kernel/printk.c	2009-12-03 20:02:58.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/printk.c	2009-12-03 20:04:56.000000000 +0100
@@ -33,6 +33,7 @@
 #include <linux/bootmem.h>
 #include <linux/syscalls.h>
 #include <linux/kexec.h>
+#include <linux/vs_cvirt.h>
 
 #include <asm/uaccess.h>
 
@@ -276,18 +277,13 @@ int do_syslog(int type, char __user *buf
 	unsigned i, j, limit, count;
 	int do_clear = 0;
 	char c;
-	int error = 0;
+	int error;
 
 	error = security_syslog(type);
 	if (error)
 		return error;
 
-	switch (type) {
-	case 0:		/* Close log */
-		break;
-	case 1:		/* Open log */
-		break;
-	case 2:		/* Read from log */
+	if ((type >= 2) && (type <= 4)) {
 		error = -EINVAL;
 		if (!buf || len < 0)
 			goto out;
@@ -298,6 +294,16 @@ int do_syslog(int type, char __user *buf
 			error = -EFAULT;
 			goto out;
 		}
+	}
+	if (!vx_check(0, VS_ADMIN|VS_WATCH))
+		return vx_do_syslog(type, buf, len);
+
+	switch (type) {
+	case 0:		/* Close log */
+		break;
+	case 1:		/* Open log */
+		break;
+	case 2:		/* Read from log */
 		error = wait_event_interruptible(log_wait,
 							(log_start - log_end));
 		if (error)
@@ -322,16 +328,6 @@ int do_syslog(int type, char __user *buf
 		do_clear = 1;
 		/* FALL THRU */
 	case 3:		/* Read last kernel messages */
-		error = -EINVAL;
-		if (!buf || len < 0)
-			goto out;
-		error = 0;
-		if (!len)
-			goto out;
-		if (!access_ok(VERIFY_WRITE, buf, len)) {
-			error = -EFAULT;
-			goto out;
-		}
 		count = len;
 		if (count > log_buf_len)
 			count = log_buf_len;
diff -NurpP --minimal linux-2.6.32.17/kernel/ptrace.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/ptrace.c
--- linux-2.6.32.17/kernel/ptrace.c	2009-12-03 20:02:58.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/ptrace.c	2009-12-03 20:04:56.000000000 +0100
@@ -22,6 +22,7 @@
 #include <linux/pid_namespace.h>
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
+#include <linux/vs_context.h>
 
 
 /*
@@ -151,6 +152,11 @@ int __ptrace_may_access(struct task_stru
 		dumpable = get_dumpable(task->mm);
 	if (!dumpable && !capable(CAP_SYS_PTRACE))
 		return -EPERM;
+	if (!vx_check(task->xid, VS_ADMIN_P|VS_IDENT))
+		return -EPERM;
+	if (!vx_check(task->xid, VS_IDENT) &&
+		!task_vx_flags(task, VXF_STATE_ADMIN, 0))
+		return -EACCES;
 
 	return security_ptrace_access_check(task, mode);
 }
@@ -621,6 +627,10 @@ SYSCALL_DEFINE4(ptrace, long, request, l
 		goto out;
 	}
 
+	ret = -EPERM;
+	if (!vx_check(vx_task_xid(child), VS_WATCH_P | VS_IDENT))
+		goto out_put_task_struct;
+
 	if (request == PTRACE_ATTACH) {
 		ret = ptrace_attach(child);
 		/*
diff -NurpP --minimal linux-2.6.32.17/kernel/sched.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/sched.c
--- linux-2.6.32.17/kernel/sched.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/sched.c	2010-08-09 18:06:32.000000000 +0200
@@ -71,6 +71,8 @@
 #include <linux/debugfs.h>
 #include <linux/ctype.h>
 #include <linux/ftrace.h>
+#include <linux/vs_sched.h>
+#include <linux/vs_cvirt.h>
 
 #include <asm/tlb.h>
 #include <asm/irq_regs.h>
@@ -237,6 +239,15 @@ static DEFINE_MUTEX(sched_domains_mutex)
 
 #include <linux/cgroup.h>
 
+#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_CFS_HARD_LIMITS)
+struct cfs_bandwidth {
+	spinlock_t		cfs_runtime_lock;
+	ktime_t			cfs_period;
+	u64			cfs_runtime;
+	struct hrtimer		cfs_period_timer;
+};
+#endif
+
 struct cfs_rq;
 
 static LIST_HEAD(task_groups);
@@ -257,6 +268,9 @@ struct task_group {
 	/* runqueue "owned" by this group on each cpu */
 	struct cfs_rq **cfs_rq;
 	unsigned long shares;
+#ifdef CONFIG_CFS_HARD_LIMITS
+	struct cfs_bandwidth cfs_bandwidth;
+#endif
 #endif
 
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -446,6 +460,19 @@ struct cfs_rq {
 	unsigned long rq_weight;
 #endif
 #endif
+#ifdef CONFIG_CFS_HARD_LIMITS
+	/* set when the group is throttled  on this cpu */
+	int cfs_throttled;
+
+	/* runtime currently consumed by the group on this rq */
+	u64 cfs_time;
+
+	/* runtime available to the group on this rq */
+	u64 cfs_runtime;
+
+	/* Protects the cfs runtime related fields of this cfs_rq */
+	spinlock_t cfs_runtime_lock;
+#endif
 };
 
 /* Real-Time classes' related field in a runqueue: */
@@ -1616,6 +1643,7 @@ static void update_group_shares_cpu(stru
 	}
 }
 
+static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
 /*
  * Re-compute the task group their per cpu shares over the given domain.
  * This needs to be done in a bottom-up fashion because the rq weight of a
@@ -1643,8 +1671,10 @@ static int tg_shares_up(struct task_grou
 		 * If there are currently no tasks on the cpu pretend there
 		 * is one of average load so that when a new task gets to
 		 * run here it will not get delayed by group starvation.
+		 * Also if the group is throttled on this cpu, pretend that
+		 * it has no tasks.
 		 */
-		if (!weight)
+		if (!weight || cfs_rq_throttled(tg->cfs_rq[i]))
 			weight = NICE_0_LOAD;
 
 		rq_weight += weight;
@@ -1818,6 +1848,175 @@ static void cfs_rq_set_shares(struct cfs
 static void calc_load_account_active(struct rq *this_rq);
 static void update_sysctl(void);
 
+
+#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_FAIR_GROUP_SCHED)
+
+#ifdef CONFIG_SMP
+static inline const struct cpumask *sched_bw_period_mask(void)
+{
+	return cpu_rq(smp_processor_id())->rd->span;
+}
+#else /* !CONFIG_SMP */
+static inline const struct cpumask *sched_bw_period_mask(void)
+{
+	return cpu_online_mask;
+}
+#endif /* CONFIG_SMP */
+
+#else
+static inline const struct cpumask *sched_bw_period_mask(void)
+{
+	return cpu_online_mask;
+}
+
+#endif
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_CFS_HARD_LIMITS
+
+/*
+ * Runtime allowed for a cfs group before it is hard limited.
+ * default: Infinite which means no hard limiting.
+ */
+u64 sched_cfs_runtime = RUNTIME_INF;
+
+/*
+ * period over which we hard limit the cfs group's bandwidth.
+ * default: 0.5s
+ */
+u64 sched_cfs_period = 500000;
+
+static inline u64 global_cfs_period(void)
+{
+	return sched_cfs_period * NSEC_PER_USEC;
+}
+
+static inline u64 global_cfs_runtime(void)
+{
+	return RUNTIME_INF;
+}
+
+void do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b);
+
+static inline void cfs_rq_runtime_lock(struct cfs_rq *cfs_rq)
+{
+	spin_lock(&cfs_rq->cfs_runtime_lock);
+}
+
+static inline void cfs_rq_runtime_unlock(struct cfs_rq *cfs_rq)
+{
+	spin_unlock(&cfs_rq->cfs_runtime_lock);
+}
+
+/*
+ * Refresh the runtimes of the throttled groups.
+ * But nothing much to do now, will populate this in later patches.
+ */
+static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
+{
+	struct cfs_bandwidth *cfs_b =
+		container_of(timer, struct cfs_bandwidth, cfs_period_timer);
+
+	do_sched_cfs_period_timer(cfs_b);
+	hrtimer_add_expires_ns(timer, ktime_to_ns(cfs_b->cfs_period));
+	return HRTIMER_RESTART;
+}
+
+/*
+ * TODO: Check if this kind of timer setup is sufficient for cfs or
+ * should we do what rt is doing.
+ */
+static void start_cfs_bandwidth(struct task_group *tg)
+{
+	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
+
+	/*
+	 * Timer isn't setup for groups with infinite runtime
+	 */
+	if (cfs_b->cfs_runtime == RUNTIME_INF)
+		return;
+
+	if (hrtimer_active(&cfs_b->cfs_period_timer))
+		return;
+
+	hrtimer_start_range_ns(&cfs_b->cfs_period_timer, cfs_b->cfs_period,
+			0, HRTIMER_MODE_REL);
+}
+
+static void init_cfs_bandwidth(struct task_group *tg)
+{
+	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
+
+	cfs_b->cfs_period = ns_to_ktime(global_cfs_period());
+	cfs_b->cfs_runtime = global_cfs_runtime();
+
+	spin_lock_init(&cfs_b->cfs_runtime_lock);
+
+	hrtimer_init(&cfs_b->cfs_period_timer,
+			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	cfs_b->cfs_period_timer.function = &sched_cfs_period_timer;
+}
+
+static inline void destroy_cfs_bandwidth(struct task_group *tg)
+{
+	hrtimer_cancel(&tg->cfs_bandwidth.cfs_period_timer);
+}
+
+static void init_cfs_hard_limits(struct cfs_rq *cfs_rq, struct task_group *tg)
+{
+	cfs_rq->cfs_time = 0;
+	cfs_rq->cfs_throttled = 0;
+	cfs_rq->cfs_runtime = tg->cfs_bandwidth.cfs_runtime;
+	spin_lock_init(&cfs_rq->cfs_runtime_lock);
+}
+
+#else /* !CONFIG_CFS_HARD_LIMITS */
+
+static void init_cfs_bandwidth(struct task_group *tg)
+{
+	return;
+}
+
+static inline void destroy_cfs_bandwidth(struct task_group *tg)
+{
+	return;
+}
+
+static void init_cfs_hard_limits(struct cfs_rq *cfs_rq, struct task_group *tg)
+{
+	return;
+}
+
+static inline void cfs_rq_runtime_lock(struct cfs_rq *cfs_rq)
+{
+	return;
+}
+
+static inline void cfs_rq_runtime_unlock(struct cfs_rq *cfs_rq)
+{
+	return;
+}
+
+#endif /* CONFIG_CFS_HARD_LIMITS */
+#else /* !CONFIG_FAIR_GROUP_SCHED */
+
+static inline void cfs_rq_runtime_lock(struct cfs_rq *cfs_rq)
+{
+	return;
+}
+
+static inline void cfs_rq_runtime_unlock(struct cfs_rq *cfs_rq)
+{
+	return;
+}
+
+static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
+{
+	return 0;
+}
+
+#endif /* CONFIG_FAIR_GROUP_SCHED */
+
 #include "sched_stats.h"
 #include "sched_idletask.c"
 #include "sched_fair.c"
@@ -2981,9 +3180,17 @@ EXPORT_SYMBOL(avenrun);
  */
 void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
 {
-	loads[0] = (avenrun[0] + offset) << shift;
-	loads[1] = (avenrun[1] + offset) << shift;
-	loads[2] = (avenrun[2] + offset) << shift;
+	if (vx_flags(VXF_VIRT_LOAD, 0)) {
+		struct vx_info *vxi = current_vx_info();
+
+		loads[0] = (vxi->cvirt.load[0] + offset) << shift;
+		loads[1] = (vxi->cvirt.load[1] + offset) << shift;
+		loads[2] = (vxi->cvirt.load[2] + offset) << shift;
+	} else {
+		loads[0] = (avenrun[0] + offset) << shift;
+		loads[1] = (avenrun[1] + offset) << shift;
+		loads[2] = (avenrun[2] + offset) << shift;
+	}
 }
 
 static unsigned long
@@ -5046,16 +5253,19 @@ void account_user_time(struct task_struc
 		       cputime_t cputime_scaled)
 {
 	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+	struct vx_info *vxi = p->vx_info;  /* p is _always_ current */
 	cputime64_t tmp;
+	int nice = (TASK_NICE(p) > 0);
 
 	/* Add user time to process. */
 	p->utime = cputime_add(p->utime, cputime);
 	p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
+	vx_account_user(vxi, cputime, nice);
 	account_group_user_time(p, cputime);
 
 	/* Add user time to cpustat. */
 	tmp = cputime_to_cputime64(cputime);
-	if (TASK_NICE(p) > 0)
+	if (nice)
 		cpustat->nice = cputime64_add(cpustat->nice, tmp);
 	else
 		cpustat->user = cputime64_add(cpustat->user, tmp);
@@ -5101,6 +5311,7 @@ void account_system_time(struct task_str
 			 cputime_t cputime, cputime_t cputime_scaled)
 {
 	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+	struct vx_info *vxi = p->vx_info;  /* p is _always_ current */
 	cputime64_t tmp;
 
 	if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
@@ -5111,6 +5322,7 @@ void account_system_time(struct task_str
 	/* Add system time to process. */
 	p->stime = cputime_add(p->stime, cputime);
 	p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
+	vx_account_system(vxi, cputime, 0 /* do we have idle time? */);
 	account_group_system_time(p, cputime);
 
 	/* Add system time to cpustat. */
@@ -6147,7 +6359,7 @@ SYSCALL_DEFINE1(nice, int, increment)
 		nice = 19;
 
 	if (increment < 0 && !can_nice(current, nice))
-		return -EPERM;
+		return vx_flags(VXF_IGNEG_NICE, 0) ? 0 : -EPERM;
 
 	retval = security_task_setnice(current, nice);
 	if (retval)
@@ -9217,6 +9429,32 @@ static int update_sched_domains(struct n
 }
 #endif
 
+#ifdef CONFIG_SMP
+static void disable_runtime(struct rq *rq)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rq->lock, flags);
+#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_CFS_HARD_LIMITS)
+	disable_runtime_cfs(rq);
+#endif
+	disable_runtime_rt(rq);
+	spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+static void enable_runtime(struct rq *rq)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rq->lock, flags);
+#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_CFS_HARD_LIMITS)
+	enable_runtime_cfs(rq);
+#endif
+	enable_runtime_rt(rq);
+	spin_unlock_irqrestore(&rq->lock, flags);
+}
+#endif
+
 static int update_runtime(struct notifier_block *nfb,
 				unsigned long action, void *hcpu)
 {
@@ -9349,6 +9587,7 @@ static void init_tg_cfs_entry(struct tas
 	struct rq *rq = cpu_rq(cpu);
 	tg->cfs_rq[cpu] = cfs_rq;
 	init_cfs_rq(cfs_rq, rq);
+	init_cfs_hard_limits(cfs_rq, tg);
 	cfs_rq->tg = tg;
 	if (add)
 		list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
@@ -9478,6 +9717,10 @@ void __init sched_init(void)
 #endif /* CONFIG_USER_SCHED */
 #endif /* CONFIG_RT_GROUP_SCHED */
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	init_cfs_bandwidth(&init_task_group);
+#endif
+
 #ifdef CONFIG_GROUP_SCHED
 	list_add(&init_task_group.list, &task_groups);
 	INIT_LIST_HEAD(&init_task_group.children);
@@ -9504,6 +9747,7 @@ void __init sched_init(void)
 		init_cfs_rq(&rq->cfs, rq);
 		init_rt_rq(&rq->rt, rq);
 #ifdef CONFIG_FAIR_GROUP_SCHED
+		init_cfs_hard_limits(&rq->cfs, &init_task_group);
 		init_task_group.shares = init_task_group_load;
 		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
 #ifdef CONFIG_CGROUP_SCHED
@@ -9783,6 +10027,7 @@ static void free_fair_sched_group(struct
 {
 	int i;
 
+	destroy_cfs_bandwidth(tg);
 	for_each_possible_cpu(i) {
 		if (tg->cfs_rq)
 			kfree(tg->cfs_rq[i]);
@@ -9809,6 +10054,7 @@ int alloc_fair_sched_group(struct task_g
 	if (!tg->se)
 		goto err;
 
+	init_cfs_bandwidth(tg);
 	tg->shares = NICE_0_LOAD;
 
 	for_each_possible_cpu(i) {
@@ -10532,6 +10778,100 @@ static u64 cpu_shares_read_u64(struct cg
 
 	return (u64) tg->shares;
 }
+
+#ifdef CONFIG_CFS_HARD_LIMITS
+
+static int tg_set_cfs_bandwidth(struct task_group *tg,
+		u64 cfs_period, u64 cfs_runtime)
+{
+	int i;
+
+	spin_lock_irq(&tg->cfs_bandwidth.cfs_runtime_lock);
+	tg->cfs_bandwidth.cfs_period = ns_to_ktime(cfs_period);
+	tg->cfs_bandwidth.cfs_runtime = cfs_runtime;
+
+	for_each_possible_cpu(i) {
+		struct cfs_rq *cfs_rq = tg->cfs_rq[i];
+
+		cfs_rq_runtime_lock(cfs_rq);
+		cfs_rq->cfs_runtime = cfs_runtime;
+		cfs_rq_runtime_unlock(cfs_rq);
+	}
+
+	start_cfs_bandwidth(tg);
+	spin_unlock_irq(&tg->cfs_bandwidth.cfs_runtime_lock);
+	return 0;
+}
+
+int tg_set_cfs_runtime(struct task_group *tg, long cfs_runtime_us)
+{
+	u64 cfs_runtime, cfs_period;
+
+	cfs_period = ktime_to_ns(tg->cfs_bandwidth.cfs_period);
+	cfs_runtime = (u64)cfs_runtime_us * NSEC_PER_USEC;
+	if (cfs_runtime_us < 0)
+		cfs_runtime = RUNTIME_INF;
+
+	return tg_set_cfs_bandwidth(tg, cfs_period, cfs_runtime);
+}
+
+long tg_get_cfs_runtime(struct task_group *tg)
+{
+	u64 cfs_runtime_us;
+
+	if (tg->cfs_bandwidth.cfs_runtime == RUNTIME_INF)
+		return -1;
+
+	cfs_runtime_us = tg->cfs_bandwidth.cfs_runtime;
+	do_div(cfs_runtime_us, NSEC_PER_USEC);
+	return cfs_runtime_us;
+}
+
+int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
+{
+	u64 cfs_runtime, cfs_period;
+
+	cfs_period = (u64)cfs_period_us * NSEC_PER_USEC;
+	cfs_runtime = tg->cfs_bandwidth.cfs_runtime;
+
+	if (cfs_period == 0)
+		return -EINVAL;
+
+	return tg_set_cfs_bandwidth(tg, cfs_period, cfs_runtime);
+}
+
+long tg_get_cfs_period(struct task_group *tg)
+{
+	u64 cfs_period_us;
+
+	cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.cfs_period);
+	do_div(cfs_period_us, NSEC_PER_USEC);
+	return cfs_period_us;
+}
+
+static s64 cpu_cfs_runtime_read_s64(struct cgroup *cgrp, struct cftype *cft)
+{
+	return tg_get_cfs_runtime(cgroup_tg(cgrp));
+}
+
+static int cpu_cfs_runtime_write_s64(struct cgroup *cgrp, struct cftype *cftype,
+				s64 cfs_runtime_us)
+{
+	return tg_set_cfs_runtime(cgroup_tg(cgrp), cfs_runtime_us);
+}
+
+static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
+{
+	return tg_get_cfs_period(cgroup_tg(cgrp));
+}
+
+static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
+				u64 cfs_period_us)
+{
+	return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
+}
+
+#endif /* CONFIG_CFS_HARD_LIMITS */
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -10565,6 +10905,18 @@ static struct cftype cpu_files[] = {
 		.read_u64 = cpu_shares_read_u64,
 		.write_u64 = cpu_shares_write_u64,
 	},
+#ifdef CONFIG_CFS_HARD_LIMITS
+	{
+		.name = "cfs_runtime_us",
+		.read_s64 = cpu_cfs_runtime_read_s64,
+		.write_s64 = cpu_cfs_runtime_write_s64,
+	},
+	{
+		.name = "cfs_period_us",
+		.read_u64 = cpu_cfs_period_read_u64,
+		.write_u64 = cpu_cfs_period_write_u64,
+	},
+#endif /* CONFIG_CFS_HARD_LIMITS */
 #endif
 #ifdef CONFIG_RT_GROUP_SCHED
 	{
diff -NurpP --minimal linux-2.6.32.17/kernel/sched_debug.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/sched_debug.c
--- linux-2.6.32.17/kernel/sched_debug.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/sched_debug.c	2009-12-29 00:36:26.000000000 +0100
@@ -80,6 +80,11 @@ static void print_cfs_group_stats(struct
 	PN(se->wait_max);
 	PN(se->wait_sum);
 	P(se->wait_count);
+#ifdef CONFIG_CFS_HARD_LIMITS
+	PN(se->throttle_max);
+	PN(se->throttle_sum);
+	P(se->throttle_count);
+#endif
 #endif
 	P(se->load.weight);
 #undef PN
@@ -214,6 +219,16 @@ void print_cfs_rq(struct seq_file *m, in
 #ifdef CONFIG_SMP
 	SEQ_printf(m, "  .%-30s: %lu\n", "shares", cfs_rq->shares);
 #endif
+#ifdef CONFIG_CFS_HARD_LIMITS
+	spin_lock_irqsave(&rq->lock, flags);
+	SEQ_printf(m, "  .%-30s: %d\n", "cfs_throttled",
+			cfs_rq->cfs_throttled);
+	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "cfs_time",
+			SPLIT_NS(cfs_rq->cfs_time));
+	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "cfs_runtime",
+			SPLIT_NS(cfs_rq->cfs_runtime));
+	spin_unlock_irqrestore(&rq->lock, flags);
+#endif /* CONFIG_CFS_HARD_LIMITS */
 	print_cfs_group_stats(m, cpu, cfs_rq->tg);
 #endif
 }
@@ -314,7 +329,7 @@ static int sched_debug_show(struct seq_f
 	u64 now = ktime_to_ns(ktime_get());
 	int cpu;
 
-	SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s\n",
+	SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
 		init_utsname()->release,
 		(int)strcspn(init_utsname()->version, " "),
 		init_utsname()->version);
diff -NurpP --minimal linux-2.6.32.17/kernel/sched_fair.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/sched_fair.c
--- linux-2.6.32.17/kernel/sched_fair.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/sched_fair.c	2010-02-12 10:59:55.000000000 +0100
@@ -192,7 +192,308 @@ find_matching_se(struct sched_entity **s
 	}
 }
 
-#else	/* !CONFIG_FAIR_GROUP_SCHED */
+#ifdef CONFIG_CFS_HARD_LIMITS
+
+static inline void update_stats_throttle_start(struct cfs_rq *cfs_rq,
+			struct sched_entity *se)
+{
+	schedstat_set(se->throttle_start, rq_of(cfs_rq)->clock);
+}
+
+static inline void update_stats_throttle_end(struct cfs_rq *cfs_rq,
+			struct sched_entity *se)
+{
+	schedstat_set(se->throttle_max, max(se->throttle_max,
+			rq_of(cfs_rq)->clock - se->throttle_start));
+	schedstat_set(se->throttle_count, se->throttle_count + 1);
+	schedstat_set(se->throttle_sum, se->throttle_sum +
+			rq_of(cfs_rq)->clock - se->throttle_start);
+	schedstat_set(se->throttle_start, 0);
+}
+
+static inline
+struct cfs_rq *sched_cfs_period_cfs_rq(struct cfs_bandwidth *cfs_b, int cpu)
+{
+	return container_of(cfs_b, struct task_group,
+			cfs_bandwidth)->cfs_rq[cpu];
+}
+
+static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
+{
+	return cfs_rq->cfs_throttled;
+}
+
+#ifdef CONFIG_SMP
+/*
+ * Ensure this RQ takes back all the runtime it lend to its neighbours.
+ */
+static void disable_runtime_cfs(struct rq *rq)
+{
+	struct root_domain *rd = rq->rd;
+	struct cfs_rq *cfs_rq;
+
+	if (unlikely(!scheduler_running))
+		return;
+
+	for_each_leaf_cfs_rq(rq, cfs_rq) {
+		struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
+		s64 want;
+		int i;
+
+		spin_lock(&cfs_b->cfs_runtime_lock);
+		spin_lock(&cfs_rq->cfs_runtime_lock);
+
+		/*
+		 * Either we're all are infinity and nobody needs to borrow,
+		 * or we're already disabled and this have nothing to do, or
+		 * we have exactly the right amount of runtime to take out.
+		 */
+		 if (cfs_rq->cfs_runtime == RUNTIME_INF ||
+				cfs_rq->cfs_runtime == cfs_b->cfs_runtime)
+			goto balanced;
+		spin_unlock(&cfs_rq->cfs_runtime_lock);
+
+		/*
+		 * Calculate the difference between what we started out with
+		 * and what we current have, that's the amount of runtime
+		 * we lend and now have to reclaim.
+		 */
+		 want = cfs_b->cfs_runtime - cfs_rq->cfs_runtime;
+
+		/*
+		 * Greedy reclaim, take back as much as possible.
+		 */
+		for_each_cpu(i, rd->span) {
+			struct cfs_rq *iter = sched_cfs_period_cfs_rq(cfs_b, i);
+			s64 diff;
+
+			/*
+			 * Can't reclaim from ourselves or disabled runqueues.
+			 */
+			if (iter == cfs_rq || iter->cfs_runtime == RUNTIME_INF)
+				continue;
+
+			spin_lock(&iter->cfs_runtime_lock);
+			if (want > 0) {
+				diff = min_t(s64, iter->cfs_runtime, want);
+				iter->cfs_runtime -= diff;
+				want -= diff;
+			} else {
+				iter->cfs_runtime -= want;
+				want -= want;
+			}
+
+			spin_unlock(&iter->cfs_runtime_lock);
+			if (!want)
+				break;
+		}
+
+		spin_lock(&cfs_rq->cfs_runtime_lock);
+		/*
+		 * We cannot be left wanting - that would mean some
+		 * runtime leaked out of the system.
+		 */
+		BUG_ON(want);
+balanced:
+		/*
+		 * Disable all the borrow logic by pretending we have infinite
+		 * runtime - in which case borrowing doesn't make sense.
+		 */
+		 cfs_rq->cfs_runtime = RUNTIME_INF;
+		 spin_unlock(&cfs_rq->cfs_runtime_lock);
+		 spin_unlock(&cfs_b->cfs_runtime_lock);
+	}
+}
+
+static void enable_runtime_cfs(struct rq *rq)
+{
+	struct cfs_rq *cfs_rq;
+
+	if (unlikely(!scheduler_running))
+		return;
+
+	/*
+	 * Reset each runqueue's bandwidth settings
+	 */
+	for_each_leaf_cfs_rq(rq, cfs_rq) {
+		struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
+
+		spin_lock(&cfs_b->cfs_runtime_lock);
+		spin_lock(&cfs_rq->cfs_runtime_lock);
+		cfs_rq->cfs_runtime = cfs_b->cfs_runtime;
+		cfs_rq->cfs_time = 0;
+		cfs_rq->cfs_throttled = 0;
+		spin_unlock(&cfs_rq->cfs_runtime_lock);
+		spin_unlock(&cfs_b->cfs_runtime_lock);
+	}
+}
+
+/*
+ * Ran out of runtime, check if we can borrow some from others
+ * instead of getting throttled right away.
+ */
+static void do_cfs_balance_runtime(struct cfs_rq *cfs_rq)
+{
+	struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
+	const struct cpumask *span = sched_bw_period_mask();
+	int i, weight;
+	u64 cfs_period;
+
+	weight = cpumask_weight(span);
+	spin_lock(&cfs_b->cfs_runtime_lock);
+	cfs_period = ktime_to_ns(cfs_b->cfs_period);
+
+	for_each_cpu(i, span) {
+		struct cfs_rq *borrow_cfs_rq =
+				sched_cfs_period_cfs_rq(cfs_b, i);
+		s64 diff;
+
+		if (borrow_cfs_rq == cfs_rq)
+			continue;
+
+		cfs_rq_runtime_lock(borrow_cfs_rq);
+		if (borrow_cfs_rq->cfs_runtime == RUNTIME_INF) {
+			cfs_rq_runtime_unlock(borrow_cfs_rq);
+			continue;
+		}
+
+		diff = borrow_cfs_rq->cfs_runtime - borrow_cfs_rq->cfs_time;
+		if (diff > 0) {
+			diff = div_u64((u64)diff, weight);
+			if (cfs_rq->cfs_runtime + diff > cfs_period)
+				diff = cfs_period - cfs_rq->cfs_runtime;
+			borrow_cfs_rq->cfs_runtime -= diff;
+			cfs_rq->cfs_runtime += diff;
+			if (cfs_rq->cfs_runtime == cfs_period) {
+				cfs_rq_runtime_unlock(borrow_cfs_rq);
+				break;
+			}
+		}
+		cfs_rq_runtime_unlock(borrow_cfs_rq);
+	}
+	spin_unlock(&cfs_b->cfs_runtime_lock);
+}
+
+/*
+ * Called with rq->runtime_lock held.
+ */
+static void cfs_balance_runtime(struct cfs_rq *cfs_rq)
+{
+	cfs_rq_runtime_unlock(cfs_rq);
+	do_cfs_balance_runtime(cfs_rq);
+	cfs_rq_runtime_lock(cfs_rq);
+}
+
+#else /* !CONFIG_SMP */
+
+static void cfs_balance_runtime(struct cfs_rq *cfs_rq)
+{
+	return;
+}
+#endif /* CONFIG_SMP */
+
+/*
+ * Check if group entity exceeded its runtime. If so, mark the cfs_rq as
+ * throttled mark the current task for reschedling.
+ */
+static void sched_cfs_runtime_exceeded(struct sched_entity *se,
+	struct task_struct *tsk_curr, unsigned long delta_exec)
+{
+	struct cfs_rq *cfs_rq;
+
+	cfs_rq = group_cfs_rq(se);
+
+	if (cfs_rq->cfs_runtime == RUNTIME_INF)
+		return;
+
+	cfs_rq->cfs_time += delta_exec;
+
+	if (cfs_rq_throttled(cfs_rq))
+		return;
+
+	if (cfs_rq->cfs_time > cfs_rq->cfs_runtime)
+		cfs_balance_runtime(cfs_rq);
+
+	if (cfs_rq->cfs_time > cfs_rq->cfs_runtime) {
+		cfs_rq->cfs_throttled = 1;
+		update_stats_throttle_start(cfs_rq, se);
+		resched_task(tsk_curr);
+	}
+}
+
+static inline void update_curr_group(struct sched_entity *curr,
+		unsigned long delta_exec, struct task_struct *tsk_curr)
+{
+	sched_cfs_runtime_exceeded(curr, tsk_curr, delta_exec);
+}
+
+static void enqueue_entity_locked(struct cfs_rq *cfs_rq,
+		struct sched_entity *se, int wakeup);
+
+static void enqueue_throttled_entity(struct rq *rq, struct sched_entity *se)
+{
+	for_each_sched_entity(se) {
+		struct cfs_rq *gcfs_rq = group_cfs_rq(se);
+
+		if (se->on_rq || cfs_rq_throttled(gcfs_rq) ||
+				!gcfs_rq->nr_running)
+			break;
+		enqueue_entity_locked(cfs_rq_of(se), se, 0);
+	}
+}
+
+/*
+ * Refresh runtimes of all cfs_rqs in this group, i,e.,
+ * refresh runtimes of the representative cfs_rq of this
+ * tg on all cpus. Enqueue any throttled entity back.
+ */
+void do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b)
+{
+	int i;
+	const struct cpumask *span = sched_bw_period_mask();
+	unsigned long flags;
+
+	for_each_cpu(i, span) {
+		struct rq *rq = cpu_rq(i);
+		struct cfs_rq *cfs_rq = sched_cfs_period_cfs_rq(cfs_b, i);
+		struct sched_entity *se = cfs_rq->tg->se[i];
+
+		spin_lock_irqsave(&rq->lock, flags);
+		cfs_rq_runtime_lock(cfs_rq);
+		cfs_rq->cfs_time = 0;
+		if (cfs_rq_throttled(cfs_rq)) {
+			update_rq_clock(rq);
+			update_stats_throttle_end(cfs_rq, se);
+			cfs_rq->cfs_throttled = 0;
+			enqueue_throttled_entity(rq, se);
+		}
+		cfs_rq_runtime_unlock(cfs_rq);
+		spin_unlock_irqrestore(&rq->lock, flags);
+	}
+}
+
+#else
+
+static inline void update_curr_group(struct sched_entity *curr,
+		unsigned long delta_exec, struct task_struct *tsk_curr)
+{
+	return;
+}
+
+static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
+{
+	return 0;
+}
+
+#endif /* CONFIG_CFS_HARD_LIMITS */
+
+#else	/* CONFIG_FAIR_GROUP_SCHED */
+
+static inline void update_curr_group(struct sched_entity *curr,
+		unsigned long delta_exec, struct task_struct *tsk_curr)
+{
+	return;
+}
 
 static inline struct task_struct *task_of(struct sched_entity *se)
 {
@@ -254,7 +555,6 @@ find_matching_se(struct sched_entity **s
 
 #endif	/* CONFIG_FAIR_GROUP_SCHED */
 
-
 /**************************************************************
  * Scheduling class tree data structure manipulation methods:
  */
@@ -492,14 +792,25 @@ __update_curr(struct cfs_rq *cfs_rq, str
 	update_min_vruntime(cfs_rq);
 }
 
-static void update_curr(struct cfs_rq *cfs_rq)
+static void update_curr_task(struct sched_entity *curr,
+		unsigned long delta_exec)
+{
+	struct task_struct *curtask = task_of(curr);
+
+	trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
+	cpuacct_charge(curtask, delta_exec);
+	account_group_exec_runtime(curtask, delta_exec);
+}
+
+static int update_curr_common(struct cfs_rq *cfs_rq, unsigned long *delta)
 {
 	struct sched_entity *curr = cfs_rq->curr;
-	u64 now = rq_of(cfs_rq)->clock;
+	struct rq *rq = rq_of(cfs_rq);
+	u64 now = rq->clock;
 	unsigned long delta_exec;
 
 	if (unlikely(!curr))
-		return;
+		return 1;
 
 	/*
 	 * Get the amount of time the current task was running
@@ -508,17 +819,29 @@ static void update_curr(struct cfs_rq *c
 	 */
 	delta_exec = (unsigned long)(now - curr->exec_start);
 	if (!delta_exec)
-		return;
+		return 1;
 
 	__update_curr(cfs_rq, curr, delta_exec);
 	curr->exec_start = now;
+	*delta = delta_exec;
+	return 0;
+}
 
-	if (entity_is_task(curr)) {
-		struct task_struct *curtask = task_of(curr);
+static void update_curr(struct cfs_rq *cfs_rq)
+{
+	struct sched_entity *curr = cfs_rq->curr;
+	struct rq *rq = rq_of(cfs_rq);
+	unsigned long delta_exec;
 
-		trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
-		cpuacct_charge(curtask, delta_exec);
-		account_group_exec_runtime(curtask, delta_exec);
+	if (update_curr_common(cfs_rq, &delta_exec))
+		return ;
+
+	if (entity_is_task(curr))
+		update_curr_task(curr, delta_exec);
+	else {
+		cfs_rq_runtime_lock(group_cfs_rq(curr));
+		update_curr_group(curr, delta_exec, rq->curr);
+		cfs_rq_runtime_unlock(group_cfs_rq(curr));
 	}
 }
 
@@ -743,13 +1066,9 @@ place_entity(struct cfs_rq *cfs_rq, stru
 	se->vruntime = vruntime;
 }
 
-static void
-enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
+static void enqueue_entity_common(struct cfs_rq *cfs_rq,
+		struct sched_entity *se, int wakeup)
 {
-	/*
-	 * Update run-time statistics of the 'current'.
-	 */
-	update_curr(cfs_rq);
 	account_entity_enqueue(cfs_rq, se);
 
 	if (wakeup) {
@@ -761,6 +1080,29 @@ enqueue_entity(struct cfs_rq *cfs_rq, st
 	check_spread(cfs_rq, se);
 	if (se != cfs_rq->curr)
 		__enqueue_entity(cfs_rq, se);
+
+	if (entity_is_task(se))
+		vx_activate_task(task_of(se));
+}
+
+static void enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+		int wakeup)
+{
+	/*
+	 * Update run-time statistics of the 'current'.
+	 */
+	update_curr(cfs_rq);
+	enqueue_entity_common(cfs_rq, se, wakeup);
+}
+
+static void enqueue_entity_locked(struct cfs_rq *cfs_rq,
+		struct sched_entity *se, int wakeup)
+{
+	/*
+	 * Update run-time statistics of the 'current'.
+	 */
+	// update_curr_locked(cfs_rq);
+	enqueue_entity_common(cfs_rq, se, wakeup);
 }
 
 static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -804,6 +1146,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
 
 	if (se != cfs_rq->curr)
 		__dequeue_entity(cfs_rq, se);
+	if (entity_is_task(se))
+		vx_deactivate_task(task_of(se));
 	account_entity_dequeue(cfs_rq, se);
 	update_min_vruntime(cfs_rq);
 }
@@ -900,6 +1244,32 @@ static struct sched_entity *pick_next_en
 	return se;
 }
 
+/*
+ * Called from put_prev_entity()
+ * If a group entity (@se) is found to be throttled, it will not be put back
+ * on @cfs_rq, which is equivalent to dequeing it.
+ */
+static int dequeue_throttled_entity(struct cfs_rq *cfs_rq,
+		struct sched_entity *se)
+{
+	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
+
+	if (entity_is_task(se))
+		return 0;
+
+	cfs_rq_runtime_lock(gcfs_rq);
+	if (!cfs_rq_throttled(gcfs_rq) && gcfs_rq->nr_running) {
+		cfs_rq_runtime_unlock(gcfs_rq);
+		return 0;
+	}
+
+	__clear_buddies(cfs_rq, se);
+	account_entity_dequeue(cfs_rq, se);
+	cfs_rq->curr = NULL;
+	cfs_rq_runtime_unlock(gcfs_rq);
+	return 1;
+}
+
 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
 {
 	/*
@@ -911,6 +1281,8 @@ static void put_prev_entity(struct cfs_r
 
 	check_spread(cfs_rq, prev);
 	if (prev->on_rq) {
+		if (dequeue_throttled_entity(cfs_rq, prev))
+			return;
 		update_stats_wait_start(cfs_rq, prev);
 		/* Put 'current' back into the tree. */
 		__enqueue_entity(cfs_rq, prev);
@@ -1007,10 +1379,28 @@ static inline void hrtick_update(struct 
 }
 #endif
 
+static int enqueue_group_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+		 int wakeup)
+{
+	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
+	int ret = 0;
+
+	cfs_rq_runtime_lock(gcfs_rq);
+	if (cfs_rq_throttled(gcfs_rq)) {
+		ret = 1;
+		goto out;
+	}
+	enqueue_entity_locked(cfs_rq, se, wakeup);
+out:
+	cfs_rq_runtime_unlock(gcfs_rq);
+	return ret;
+}
+
 /*
  * The enqueue_task method is called before nr_running is
  * increased. Here we update the fair scheduling stats and
  * then put the task into the rbtree:
+ * Don't enqueue a throttled entity further into the hierarchy.
  */
 static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
 {
@@ -1020,11 +1410,15 @@ static void enqueue_task_fair(struct rq 
 	for_each_sched_entity(se) {
 		if (se->on_rq)
 			break;
+
 		cfs_rq = cfs_rq_of(se);
-		enqueue_entity(cfs_rq, se, wakeup);
+		if (entity_is_task(se))
+			enqueue_entity(cfs_rq, se, wakeup);
+		else
+			if (enqueue_group_entity(cfs_rq, se, wakeup))
+				break;
 		wakeup = 1;
 	}
-
 	hrtick_update(rq);
 }
 
@@ -1044,6 +1438,17 @@ static void dequeue_task_fair(struct rq 
 		/* Don't dequeue parent if it has other entities besides us */
 		if (cfs_rq->load.weight)
 			break;
+
+		/*
+		 * If this cfs_rq is throttled, then it is already
+		 * dequeued.
+		 */
+		cfs_rq_runtime_lock(cfs_rq);
+		if (cfs_rq_throttled(cfs_rq)) {
+			cfs_rq_runtime_unlock(cfs_rq);
+			break;
+		}
+		cfs_rq_runtime_unlock(cfs_rq);
 		sleep = 1;
 	}
 
@@ -1821,9 +2226,10 @@ load_balance_fair(struct rq *this_rq, in
 		u64 rem_load, moved_load;
 
 		/*
-		 * empty group
+		 * empty group or throttled group
 		 */
-		if (!busiest_cfs_rq->task_weight)
+		if (!busiest_cfs_rq->task_weight ||
+				cfs_rq_throttled(busiest_cfs_rq))
 			continue;
 
 		rem_load = (u64)rem_load_move * busiest_weight;
@@ -1872,6 +2278,12 @@ move_one_task_fair(struct rq *this_rq, i
 
 	for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
 		/*
+		 * Don't move task from a throttled cfs_rq
+		 */
+		if (cfs_rq_throttled(busy_cfs_rq))
+			continue;
+
+		/*
 		 * pass busy_cfs_rq argument into
 		 * load_balance_[start|next]_fair iterators
 		 */
diff -NurpP --minimal linux-2.6.32.17/kernel/sched_rt.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/sched_rt.c
--- linux-2.6.32.17/kernel/sched_rt.c	2009-12-03 20:02:58.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/sched_rt.c	2009-12-03 20:04:56.000000000 +0100
@@ -235,18 +235,6 @@ static int rt_se_boosted(struct sched_rt
 	return p->prio != p->normal_prio;
 }
 
-#ifdef CONFIG_SMP
-static inline const struct cpumask *sched_rt_period_mask(void)
-{
-	return cpu_rq(smp_processor_id())->rd->span;
-}
-#else
-static inline const struct cpumask *sched_rt_period_mask(void)
-{
-	return cpu_online_mask;
-}
-#endif
-
 static inline
 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 {
@@ -296,11 +284,6 @@ static inline int rt_rq_throttled(struct
 	return rt_rq->rt_throttled;
 }
 
-static inline const struct cpumask *sched_rt_period_mask(void)
-{
-	return cpu_online_mask;
-}
-
 static inline
 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 {
@@ -373,7 +356,7 @@ next:
 /*
  * Ensure this RQ takes back all the runtime it lend to its neighbours.
  */
-static void __disable_runtime(struct rq *rq)
+static void disable_runtime_rt(struct rq *rq)
 {
 	struct root_domain *rd = rq->rd;
 	struct rt_rq *rt_rq;
@@ -450,16 +433,7 @@ balanced:
 	}
 }
 
-static void disable_runtime(struct rq *rq)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&rq->lock, flags);
-	__disable_runtime(rq);
-	spin_unlock_irqrestore(&rq->lock, flags);
-}
-
-static void __enable_runtime(struct rq *rq)
+static void enable_runtime_rt(struct rq *rq)
 {
 	struct rt_rq *rt_rq;
 
@@ -482,15 +456,6 @@ static void __enable_runtime(struct rq *
 	}
 }
 
-static void enable_runtime(struct rq *rq)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&rq->lock, flags);
-	__enable_runtime(rq);
-	spin_unlock_irqrestore(&rq->lock, flags);
-}
-
 static int balance_runtime(struct rt_rq *rt_rq)
 {
 	int more = 0;
@@ -518,7 +483,7 @@ static int do_sched_rt_period_timer(stru
 	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
 		return 1;
 
-	span = sched_rt_period_mask();
+	span = sched_bw_period_mask();
 	for_each_cpu(i, span) {
 		int enqueue = 0;
 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
@@ -1564,7 +1529,7 @@ static void rq_online_rt(struct rq *rq)
 	if (rq->rt.overloaded)
 		rt_set_overload(rq);
 
-	__enable_runtime(rq);
+	enable_runtime_rt(rq);
 
 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
 }
@@ -1575,7 +1540,7 @@ static void rq_offline_rt(struct rq *rq)
 	if (rq->rt.overloaded)
 		rt_clear_overload(rq);
 
-	__disable_runtime(rq);
+	disable_runtime_rt(rq);
 
 	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
 }
diff -NurpP --minimal linux-2.6.32.17/kernel/signal.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/signal.c
--- linux-2.6.32.17/kernel/signal.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/signal.c	2010-07-08 02:22:31.000000000 +0200
@@ -27,6 +27,8 @@
 #include <linux/freezer.h>
 #include <linux/pid_namespace.h>
 #include <linux/nsproxy.h>
+#include <linux/vs_context.h>
+#include <linux/vs_pid.h>
 #include <trace/events/sched.h>
 
 #include <asm/param.h>
@@ -598,6 +600,14 @@ static int check_kill_permission(int sig
 	if (!valid_signal(sig))
 		return -EINVAL;
 
+	if ((info != SEND_SIG_NOINFO) &&
+		(is_si_special(info) || !SI_FROMUSER(info)))
+		goto skip;
+
+	vxdprintk(VXD_CBIT(misc, 7),
+		"check_kill_permission(%d,%p,%p[#%u,%u])",
+		sig, info, t, vx_task_xid(t), t->pid);
+
 	if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
 		return 0;
 
@@ -627,6 +637,20 @@ static int check_kill_permission(int sig
 		}
 	}
 
+	error = -EPERM;
+	if (t->pid == 1 && current->xid)
+		return error;
+
+	error = -ESRCH;
+	/* FIXME: we shouldn't return ESRCH ever, to avoid
+		  loops, maybe ENOENT or EACCES? */
+	if (!vx_check(vx_task_xid(t), VS_WATCH_P | VS_IDENT)) {
+		vxdprintk(current->xid || VXD_CBIT(misc, 7),
+			"signal %d[%p] xid mismatch %p[#%u,%u] xid=#%u",
+			sig, info, t, vx_task_xid(t), t->pid, current->xid);
+		return error;
+	}
+skip:
 	return security_task_kill(t, info, sig, 0);
 }
 
@@ -1115,7 +1139,7 @@ int kill_pid_info(int sig, struct siginf
 	rcu_read_lock();
 retry:
 	p = pid_task(pid, PIDTYPE_PID);
-	if (p) {
+	if (p && vx_check(vx_task_xid(p), VS_IDENT)) {
 		error = group_send_sig_info(sig, info, p);
 		if (unlikely(error == -ESRCH))
 			/*
@@ -1154,7 +1178,7 @@ int kill_pid_info_as_uid(int sig, struct
 
 	read_lock(&tasklist_lock);
 	p = pid_task(pid, PIDTYPE_PID);
-	if (!p) {
+	if (!p || !vx_check(vx_task_xid(p), VS_IDENT)) {
 		ret = -ESRCH;
 		goto out_unlock;
 	}
@@ -1208,8 +1232,10 @@ static int kill_something_info(int sig, 
 		struct task_struct * p;
 
 		for_each_process(p) {
-			if (task_pid_vnr(p) > 1 &&
-					!same_thread_group(p, current)) {
+			if (vx_check(vx_task_xid(p), VS_ADMIN|VS_IDENT) &&
+				task_pid_vnr(p) > 1 &&
+				!same_thread_group(p, current) &&
+				!vx_current_initpid(p->pid)) {
 				int err = group_send_sig_info(sig, info, p);
 				++count;
 				if (err != -EPERM)
@@ -1874,6 +1900,11 @@ relock:
 				!sig_kernel_only(signr))
 			continue;
 
+		/* virtual init is protected against user signals */
+		if ((info->si_code == SI_USER) &&
+			vx_current_initpid(current->pid))
+			continue;
+
 		if (sig_kernel_stop(signr)) {
 			/*
 			 * The default action is to stop all threads in
diff -NurpP --minimal linux-2.6.32.17/kernel/softirq.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/softirq.c
--- linux-2.6.32.17/kernel/softirq.c	2009-12-03 20:02:58.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/softirq.c	2009-12-03 20:04:56.000000000 +0100
@@ -24,6 +24,7 @@
 #include <linux/ftrace.h>
 #include <linux/smp.h>
 #include <linux/tick.h>
+#include <linux/vs_context.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/irq.h>
diff -NurpP --minimal linux-2.6.32.17/kernel/sys.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/sys.c
--- linux-2.6.32.17/kernel/sys.c	2009-12-03 20:02:58.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/sys.c	2009-12-03 20:04:56.000000000 +0100
@@ -41,6 +41,7 @@
 #include <linux/syscalls.h>
 #include <linux/kprobes.h>
 #include <linux/user_namespace.h>
+#include <linux/vs_pid.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
@@ -130,7 +131,10 @@ static int set_one_prio(struct task_stru
 		goto out;
 	}
 	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
-		error = -EACCES;
+		if (vx_flags(VXF_IGNEG_NICE, 0))
+			error = 0;
+		else
+			error = -EACCES;
 		goto out;
 	}
 	no_nice = security_task_setnice(p, niceval);
@@ -179,6 +183,8 @@ SYSCALL_DEFINE3(setpriority, int, which,
 			else
 				pgrp = task_pgrp(current);
 			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
+				if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
+					continue;
 				error = set_one_prio(p, niceval, error);
 			} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
 			break;
@@ -240,6 +246,8 @@ SYSCALL_DEFINE2(getpriority, int, which,
 			else
 				pgrp = task_pgrp(current);
 			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
+				if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
+					continue;
 				niceval = 20 - task_nice(p);
 				if (niceval > retval)
 					retval = niceval;
@@ -349,6 +357,9 @@ void kernel_power_off(void)
 	machine_power_off();
 }
 EXPORT_SYMBOL_GPL(kernel_power_off);
+
+long vs_reboot(unsigned int, void __user *);
+
 /*
  * Reboot system call: for obvious reasons only root may call it,
  * and even root needs to set up some magic numbers in the registers
@@ -381,6 +392,9 @@ SYSCALL_DEFINE4(reboot, int, magic1, int
 	if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
 		cmd = LINUX_REBOOT_CMD_HALT;
 
+	if (!vx_check(0, VS_ADMIN|VS_WATCH))
+		return vs_reboot(cmd, arg);
+
 	lock_kernel();
 	switch (cmd) {
 	case LINUX_REBOOT_CMD_RESTART:
@@ -1133,7 +1147,7 @@ SYSCALL_DEFINE2(sethostname, char __user
 	int errno;
 	char tmp[__NEW_UTS_LEN];
 
-	if (!capable(CAP_SYS_ADMIN))
+	if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME))
 		return -EPERM;
 	if (len < 0 || len > __NEW_UTS_LEN)
 		return -EINVAL;
@@ -1182,7 +1196,7 @@ SYSCALL_DEFINE2(setdomainname, char __us
 	int errno;
 	char tmp[__NEW_UTS_LEN];
 
-	if (!capable(CAP_SYS_ADMIN))
+	if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME))
 		return -EPERM;
 	if (len < 0 || len > __NEW_UTS_LEN)
 		return -EINVAL;
@@ -1251,7 +1265,7 @@ SYSCALL_DEFINE2(setrlimit, unsigned int,
 		return -EINVAL;
 	old_rlim = current->signal->rlim + resource;
 	if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
-	    !capable(CAP_SYS_RESOURCE))
+	    !vx_capable(CAP_SYS_RESOURCE, VXC_SET_RLIMIT))
 		return -EPERM;
 	if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
 		return -EPERM;
diff -NurpP --minimal linux-2.6.32.17/kernel/sysctl.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/sysctl.c
--- linux-2.6.32.17/kernel/sysctl.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/sysctl.c	2010-01-20 04:21:33.000000000 +0100
@@ -124,6 +124,7 @@ static int ngroups_max = NGROUPS_MAX;
 extern char modprobe_path[];
 extern int modules_disabled;
 #endif
+extern char vshelper_path[];
 #ifdef CONFIG_CHR_DEV_SG
 extern int sg_big_buff;
 #endif
@@ -593,6 +594,15 @@ static struct ctl_table kern_table[] = {
 		.strategy	= &sysctl_string,
 	},
 #endif
+	{
+		.ctl_name	= KERN_VSHELPER,
+		.procname	= "vshelper",
+		.data		= &vshelper_path,
+		.maxlen		= 256,
+		.mode		= 0644,
+		.proc_handler	= &proc_dostring,
+		.strategy	= &sysctl_string,
+	},
 #ifdef CONFIG_CHR_DEV_SG
 	{
 		.ctl_name	= KERN_SG_BIG_BUFF,
diff -NurpP --minimal linux-2.6.32.17/kernel/sysctl_check.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/sysctl_check.c
--- linux-2.6.32.17/kernel/sysctl_check.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/sysctl_check.c	2010-02-12 10:59:55.000000000 +0100
@@ -39,6 +39,7 @@ static const struct trans_ctl_table tran
 
 	{ KERN_PANIC,			"panic" },
 	{ KERN_REALROOTDEV,		"real-root-dev" },
+	{ KERN_VSHELPER,		"vshelper", },
 
 	{ KERN_SPARC_REBOOT,		"reboot-cmd" },
 	{ KERN_CTLALTDEL,		"ctrl-alt-del" },
@@ -1218,6 +1219,22 @@ static const struct trans_ctl_table tran
 	{}
 };
 
+static struct trans_ctl_table trans_vserver_table[] = {
+	{ 1,	"debug_switch" },
+	{ 2,	"debug_xid" },
+	{ 3,	"debug_nid" },
+	{ 4,	"debug_tag" },
+	{ 5,	"debug_net" },
+	{ 6,	"debug_limit" },
+	{ 7,	"debug_cres" },
+	{ 8,	"debug_dlim" },
+	{ 9,	"debug_quota" },
+	{ 10,	"debug_cvirt" },
+	{ 11,	"debug_space" },
+	{ 12,	"debug_misc" },
+	{}
+};
+
 static const struct trans_ctl_table trans_root_table[] = {
 	{ CTL_KERN,	"kernel",	trans_kern_table },
 	{ CTL_VM,	"vm",		trans_vm_table },
@@ -1234,6 +1251,7 @@ static const struct trans_ctl_table tran
 	{ CTL_SUNRPC,	"sunrpc",	trans_sunrpc_table },
 	{ CTL_PM,	"pm",		trans_pm_table },
 	{ CTL_FRV,	"frv",		trans_frv_table },
+	{ CTL_VSERVER,	"vserver",	trans_vserver_table },
 	{}
 };
 
diff -NurpP --minimal linux-2.6.32.17/kernel/time.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/time.c
--- linux-2.6.32.17/kernel/time.c	2009-12-03 20:02:58.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/time.c	2009-12-03 20:04:56.000000000 +0100
@@ -63,6 +63,7 @@ EXPORT_SYMBOL(sys_tz);
 SYSCALL_DEFINE1(time, time_t __user *, tloc)
 {
 	time_t i = get_seconds();
+/*	FIXME: do_gettimeofday(&tv) -> vx_gettimeofday(&tv) */
 
 	if (tloc) {
 		if (put_user(i,tloc))
@@ -93,7 +94,7 @@ SYSCALL_DEFINE1(stime, time_t __user *, 
 	if (err)
 		return err;
 
-	do_settimeofday(&tv);
+	vx_settimeofday(&tv);
 	return 0;
 }
 
@@ -104,7 +105,7 @@ SYSCALL_DEFINE2(gettimeofday, struct tim
 {
 	if (likely(tv != NULL)) {
 		struct timeval ktv;
-		do_gettimeofday(&ktv);
+		vx_gettimeofday(&ktv);
 		if (copy_to_user(tv, &ktv, sizeof(ktv)))
 			return -EFAULT;
 	}
@@ -179,7 +180,7 @@ int do_sys_settimeofday(struct timespec 
 		/* SMP safe, again the code in arch/foo/time.c should
 		 * globally block out interrupts when it runs.
 		 */
-		return do_settimeofday(tv);
+		return vx_settimeofday(tv);
 	}
 	return 0;
 }
@@ -311,7 +312,7 @@ void getnstimeofday(struct timespec *tv)
 {
 	struct timeval x;
 
-	do_gettimeofday(&x);
+	vx_gettimeofday(&x);
 	tv->tv_sec = x.tv_sec;
 	tv->tv_nsec = x.tv_usec * NSEC_PER_USEC;
 }
diff -NurpP --minimal linux-2.6.32.17/kernel/timer.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/timer.c
--- linux-2.6.32.17/kernel/timer.c	2009-12-03 20:02:58.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/timer.c	2009-12-03 20:04:56.000000000 +0100
@@ -39,6 +39,10 @@
 #include <linux/kallsyms.h>
 #include <linux/perf_event.h>
 #include <linux/sched.h>
+#include <linux/vs_base.h>
+#include <linux/vs_cvirt.h>
+#include <linux/vs_pid.h>
+#include <linux/vserver/sched.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -1255,12 +1259,6 @@ SYSCALL_DEFINE1(alarm, unsigned int, sec
 
 #endif
 
-#ifndef __alpha__
-
-/*
- * The Alpha uses getxpid, getxuid, and getxgid instead.  Maybe this
- * should be moved into arch/i386 instead?
- */
 
 /**
  * sys_getpid - return the thread group id of the current process
@@ -1289,10 +1287,23 @@ SYSCALL_DEFINE0(getppid)
 	rcu_read_lock();
 	pid = task_tgid_vnr(current->real_parent);
 	rcu_read_unlock();
+	return vx_map_pid(pid);
+}
 
-	return pid;
+#ifdef __alpha__
+
+/*
+ * The Alpha uses getxpid, getxuid, and getxgid instead.
+ */
+
+asmlinkage long do_getxpid(long *ppid)
+{
+	*ppid = sys_getppid();
+	return sys_getpid();
 }
 
+#else /* _alpha_ */
+
 SYSCALL_DEFINE0(getuid)
 {
 	/* Only we change this so SMP safe */
diff -NurpP --minimal linux-2.6.32.17/kernel/user.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/user.c
--- linux-2.6.32.17/kernel/user.c	2009-12-03 20:02:58.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/user.c	2009-12-03 20:04:56.000000000 +0100
@@ -251,10 +251,10 @@ static struct kobj_type uids_ktype = {
  *
  * See Documentation/scheduler/sched-design-CFS.txt for ramifications.
  */
-static int uids_user_create(struct user_struct *up)
+static int uids_user_create(struct user_namespace *ns, struct user_struct *up)
 {
 	struct kobject *kobj = &up->kobj;
-	int error;
+	int error = 0;
 
 	memset(kobj, 0, sizeof(struct kobject));
 	if (up->user_ns != &init_user_ns)
@@ -282,7 +282,7 @@ int __init uids_sysfs_init(void)
 	if (!uids_kset)
 		return -ENOMEM;
 
-	return uids_user_create(&root_user);
+	return uids_user_create(NULL, &root_user);
 }
 
 /* delayed work function to remove sysfs directory for a user and free up
@@ -353,7 +353,8 @@ static struct user_struct *uid_hash_find
 }
 
 int uids_sysfs_init(void) { return 0; }
-static inline int uids_user_create(struct user_struct *up) { return 0; }
+static inline int uids_user_create(struct user_namespace *ns,
+	struct user_struct *up) { return 0; }
 static inline void uids_mutex_lock(void) { }
 static inline void uids_mutex_unlock(void) { }
 
@@ -450,7 +451,7 @@ struct user_struct *alloc_uid(struct use
 
 		new->user_ns = get_user_ns(ns);
 
-		if (uids_user_create(new))
+		if (uids_user_create(ns, new))
 			goto out_destoy_sched;
 
 		/*
diff -NurpP --minimal linux-2.6.32.17/kernel/user_namespace.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/user_namespace.c
--- linux-2.6.32.17/kernel/user_namespace.c	2009-03-24 14:22:45.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/user_namespace.c	2009-12-03 20:04:56.000000000 +0100
@@ -10,6 +10,7 @@
 #include <linux/slab.h>
 #include <linux/user_namespace.h>
 #include <linux/cred.h>
+#include <linux/vserver/global.h>
 
 /*
  * Create a new user namespace, deriving the creator from the user in the
@@ -30,6 +31,7 @@ int create_user_ns(struct cred *new)
 		return -ENOMEM;
 
 	kref_init(&ns->kref);
+	atomic_inc(&vs_global_user_ns);
 
 	for (n = 0; n < UIDHASH_SZ; ++n)
 		INIT_HLIST_HEAD(ns->uidhash_table + n);
@@ -78,6 +80,8 @@ void free_user_ns(struct kref *kref)
 	struct user_namespace *ns =
 		container_of(kref, struct user_namespace, kref);
 
+	/* FIXME: maybe move into destroyer? */
+	atomic_dec(&vs_global_user_ns);
 	INIT_WORK(&ns->destroyer, free_user_ns_work);
 	schedule_work(&ns->destroyer);
 }
diff -NurpP --minimal linux-2.6.32.17/kernel/utsname.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/utsname.c
--- linux-2.6.32.17/kernel/utsname.c	2009-09-10 15:26:28.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/utsname.c	2009-12-03 20:04:56.000000000 +0100
@@ -14,14 +14,17 @@
 #include <linux/utsname.h>
 #include <linux/err.h>
 #include <linux/slab.h>
+#include <linux/vserver/global.h>
 
 static struct uts_namespace *create_uts_ns(void)
 {
 	struct uts_namespace *uts_ns;
 
 	uts_ns = kmalloc(sizeof(struct uts_namespace), GFP_KERNEL);
-	if (uts_ns)
+	if (uts_ns) {
 		kref_init(&uts_ns->kref);
+		atomic_inc(&vs_global_uts_ns);
+	}
 	return uts_ns;
 }
 
@@ -71,5 +74,6 @@ void free_uts_ns(struct kref *kref)
 	struct uts_namespace *ns;
 
 	ns = container_of(kref, struct uts_namespace, kref);
+	atomic_dec(&vs_global_uts_ns);
 	kfree(ns);
 }
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/cacct.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/cacct.c
--- linux-2.6.32.17/kernel/vserver/cacct.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/cacct.c	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,42 @@
+/*
+ *  linux/kernel/vserver/cacct.c
+ *
+ *  Virtual Server: Context Accounting
+ *
+ *  Copyright (C) 2006-2007 Herbert Pötzl
+ *
+ *  V0.01  added accounting stats
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/vs_context.h>
+#include <linux/vserver/cacct_cmd.h>
+#include <linux/vserver/cacct_int.h>
+
+#include <asm/errno.h>
+#include <asm/uaccess.h>
+
+
+int vc_sock_stat(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_sock_stat_v0 vc_data;
+	int j, field;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	field = vc_data.field;
+	if ((field < 0) || (field >= VXA_SOCK_SIZE))
+		return -EINVAL;
+
+	for (j = 0; j < 3; j++) {
+		vc_data.count[j] = vx_sock_count(&vxi->cacct, field, j);
+		vc_data.total[j] = vx_sock_total(&vxi->cacct, field, j);
+	}
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/cacct_init.h linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/cacct_init.h
--- linux-2.6.32.17/kernel/vserver/cacct_init.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/cacct_init.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,25 @@
+
+
+static inline void vx_info_init_cacct(struct _vx_cacct *cacct)
+{
+	int i, j;
+
+
+	for (i = 0; i < VXA_SOCK_SIZE; i++) {
+		for (j = 0; j < 3; j++) {
+			atomic_long_set(&cacct->sock[i][j].count, 0);
+			atomic_long_set(&cacct->sock[i][j].total, 0);
+		}
+	}
+	for (i = 0; i < 8; i++)
+		atomic_set(&cacct->slab[i], 0);
+	for (i = 0; i < 5; i++)
+		for (j = 0; j < 4; j++)
+			atomic_set(&cacct->page[i][j], 0);
+}
+
+static inline void vx_info_exit_cacct(struct _vx_cacct *cacct)
+{
+	return;
+}
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/cacct_proc.h linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/cacct_proc.h
--- linux-2.6.32.17/kernel/vserver/cacct_proc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/cacct_proc.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,53 @@
+#ifndef _VX_CACCT_PROC_H
+#define _VX_CACCT_PROC_H
+
+#include <linux/vserver/cacct_int.h>
+
+
+#define VX_SOCKA_TOP	\
+	"Type\t    recv #/bytes\t\t   send #/bytes\t\t    fail #/bytes\n"
+
+static inline int vx_info_proc_cacct(struct _vx_cacct *cacct, char *buffer)
+{
+	int i, j, length = 0;
+	static char *type[VXA_SOCK_SIZE] = {
+		"UNSPEC", "UNIX", "INET", "INET6", "PACKET", "OTHER"
+	};
+
+	length += sprintf(buffer + length, VX_SOCKA_TOP);
+	for (i = 0; i < VXA_SOCK_SIZE; i++) {
+		length += sprintf(buffer + length, "%s:", type[i]);
+		for (j = 0; j < 3; j++) {
+			length += sprintf(buffer + length,
+				"\t%10lu/%-10lu",
+				vx_sock_count(cacct, i, j),
+				vx_sock_total(cacct, i, j));
+		}
+		buffer[length++] = '\n';
+	}
+
+	length += sprintf(buffer + length, "\n");
+	length += sprintf(buffer + length,
+		"slab:\t %8u %8u %8u %8u\n",
+		atomic_read(&cacct->slab[1]),
+		atomic_read(&cacct->slab[4]),
+		atomic_read(&cacct->slab[0]),
+		atomic_read(&cacct->slab[2]));
+
+	length += sprintf(buffer + length, "\n");
+	for (i = 0; i < 5; i++) {
+		length += sprintf(buffer + length,
+			"page[%d]: %8u %8u %8u %8u\t %8u %8u %8u %8u\n", i,
+			atomic_read(&cacct->page[i][0]),
+			atomic_read(&cacct->page[i][1]),
+			atomic_read(&cacct->page[i][2]),
+			atomic_read(&cacct->page[i][3]),
+			atomic_read(&cacct->page[i][4]),
+			atomic_read(&cacct->page[i][5]),
+			atomic_read(&cacct->page[i][6]),
+			atomic_read(&cacct->page[i][7]));
+	}
+	return length;
+}
+
+#endif	/* _VX_CACCT_PROC_H */
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/context.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/context.c
--- linux-2.6.32.17/kernel/vserver/context.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/context.c	2010-03-13 21:49:35.000000000 +0100
@@ -0,0 +1,1058 @@
+/*
+ *  linux/kernel/vserver/context.c
+ *
+ *  Virtual Server: Context Support
+ *
+ *  Copyright (C) 2003-2007  Herbert Pötzl
+ *
+ *  V0.01  context helper
+ *  V0.02  vx_ctx_kill syscall command
+ *  V0.03  replaced context_info calls
+ *  V0.04  redesign of struct (de)alloc
+ *  V0.05  rlimit basic implementation
+ *  V0.06  task_xid and info commands
+ *  V0.07  context flags and caps
+ *  V0.08  switch to RCU based hash
+ *  V0.09  revert to non RCU for now
+ *  V0.10  and back to working RCU hash
+ *  V0.11  and back to locking again
+ *  V0.12  referenced context store
+ *  V0.13  separate per cpu data
+ *  V0.14  changed vcmds to vxi arg
+ *  V0.15  added context stat
+ *  V0.16  have __create claim() the vxi
+ *  V0.17  removed older and legacy stuff
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/security.h>
+#include <linux/pid_namespace.h>
+
+#include <linux/vserver/context.h>
+#include <linux/vserver/network.h>
+#include <linux/vserver/debug.h>
+#include <linux/vserver/limit.h>
+#include <linux/vserver/limit_int.h>
+#include <linux/vserver/space.h>
+#include <linux/init_task.h>
+#include <linux/fs_struct.h>
+
+#include <linux/vs_context.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_pid.h>
+#include <linux/vserver/context_cmd.h>
+
+#include "cvirt_init.h"
+#include "cacct_init.h"
+#include "limit_init.h"
+#include "sched_init.h"
+
+
+atomic_t vx_global_ctotal	= ATOMIC_INIT(0);
+atomic_t vx_global_cactive	= ATOMIC_INIT(0);
+
+
+/*	now inactive context structures */
+
+static struct hlist_head vx_info_inactive = HLIST_HEAD_INIT;
+
+static spinlock_t vx_info_inactive_lock = SPIN_LOCK_UNLOCKED;
+
+
+/*	__alloc_vx_info()
+
+	* allocate an initialized vx_info struct
+	* doesn't make it visible (hash)			*/
+
+static struct vx_info *__alloc_vx_info(xid_t xid)
+{
+	struct vx_info *new = NULL;
+	int cpu, index;
+
+	vxdprintk(VXD_CBIT(xid, 0), "alloc_vx_info(%d)*", xid);
+
+	/* would this benefit from a slab cache? */
+	new = kmalloc(sizeof(struct vx_info), GFP_KERNEL);
+	if (!new)
+		return 0;
+
+	memset(new, 0, sizeof(struct vx_info));
+#ifdef CONFIG_SMP
+	new->ptr_pc = alloc_percpu(struct _vx_info_pc);
+	if (!new->ptr_pc)
+		goto error;
+#endif
+	new->vx_id = xid;
+	INIT_HLIST_NODE(&new->vx_hlist);
+	atomic_set(&new->vx_usecnt, 0);
+	atomic_set(&new->vx_tasks, 0);
+	new->vx_parent = NULL;
+	new->vx_state = 0;
+	init_waitqueue_head(&new->vx_wait);
+
+	/* prepare reaper */
+	get_task_struct(init_pid_ns.child_reaper);
+	new->vx_reaper = init_pid_ns.child_reaper;
+	new->vx_badness_bias = 0;
+
+	/* rest of init goes here */
+	vx_info_init_limit(&new->limit);
+	vx_info_init_sched(&new->sched);
+	vx_info_init_cvirt(&new->cvirt);
+	vx_info_init_cacct(&new->cacct);
+
+	/* per cpu data structures */
+	for_each_possible_cpu(cpu) {
+		vx_info_init_sched_pc(
+			&vx_per_cpu(new, sched_pc, cpu), cpu);
+		vx_info_init_cvirt_pc(
+			&vx_per_cpu(new, cvirt_pc, cpu), cpu);
+	}
+
+	new->vx_flags = VXF_INIT_SET;
+	cap_set_init_eff(new->vx_bcaps);
+	new->vx_ccaps = 0;
+	new->vx_umask = 0;
+
+	new->reboot_cmd = 0;
+	new->exit_code = 0;
+
+	// preconfig fs entries
+	for (index = 0; index < VX_SPACES; index++) {
+		write_lock(&init_fs.lock);
+		init_fs.users++;
+		write_unlock(&init_fs.lock);
+		new->vx_fs[index] = &init_fs;
+	}
+
+	vxdprintk(VXD_CBIT(xid, 0),
+		"alloc_vx_info(%d) = %p", xid, new);
+	vxh_alloc_vx_info(new);
+	atomic_inc(&vx_global_ctotal);
+	return new;
+#ifdef CONFIG_SMP
+error:
+	kfree(new);
+	return 0;
+#endif
+}
+
+/*	__dealloc_vx_info()
+
+	* final disposal of vx_info				*/
+
+static void __dealloc_vx_info(struct vx_info *vxi)
+{
+#ifdef	CONFIG_VSERVER_WARN
+	struct vx_info_save vxis;
+	int cpu;
+#endif
+	vxdprintk(VXD_CBIT(xid, 0),
+		"dealloc_vx_info(%p)", vxi);
+	vxh_dealloc_vx_info(vxi);
+
+#ifdef	CONFIG_VSERVER_WARN
+	enter_vx_info(vxi, &vxis);
+	vx_info_exit_limit(&vxi->limit);
+	vx_info_exit_sched(&vxi->sched);
+	vx_info_exit_cvirt(&vxi->cvirt);
+	vx_info_exit_cacct(&vxi->cacct);
+
+	for_each_possible_cpu(cpu) {
+		vx_info_exit_sched_pc(
+			&vx_per_cpu(vxi, sched_pc, cpu), cpu);
+		vx_info_exit_cvirt_pc(
+			&vx_per_cpu(vxi, cvirt_pc, cpu), cpu);
+	}
+	leave_vx_info(&vxis);
+#endif
+
+	vxi->vx_id = -1;
+	vxi->vx_state |= VXS_RELEASED;
+
+#ifdef CONFIG_SMP
+	free_percpu(vxi->ptr_pc);
+#endif
+	kfree(vxi);
+	atomic_dec(&vx_global_ctotal);
+}
+
+static void __shutdown_vx_info(struct vx_info *vxi)
+{
+	struct nsproxy *nsproxy;
+	struct fs_struct *fs;
+	int index, kill;
+
+	might_sleep();
+
+	vxi->vx_state |= VXS_SHUTDOWN;
+	vs_state_change(vxi, VSC_SHUTDOWN);
+
+	for (index = 0; index < VX_SPACES; index++) {
+		nsproxy = xchg(&vxi->vx_nsproxy[index], NULL);
+		if (nsproxy)
+			put_nsproxy(nsproxy);
+
+		fs = xchg(&vxi->vx_fs[index], NULL);
+		write_lock(&fs->lock);
+		kill = !--fs->users;
+		write_unlock(&fs->lock);
+		if (kill)
+			free_fs_struct(fs);
+	}
+}
+
+/* exported stuff */
+
+void free_vx_info(struct vx_info *vxi)
+{
+	unsigned long flags;
+	unsigned index;
+
+	/* check for reference counts first */
+	BUG_ON(atomic_read(&vxi->vx_usecnt));
+	BUG_ON(atomic_read(&vxi->vx_tasks));
+
+	/* context must not be hashed */
+	BUG_ON(vx_info_state(vxi, VXS_HASHED));
+
+	/* context shutdown is mandatory */
+	BUG_ON(!vx_info_state(vxi, VXS_SHUTDOWN));
+
+	/* nsproxy and fs check */
+	for (index = 0; index < VX_SPACES; index++) {
+		BUG_ON(vxi->vx_nsproxy[index]);
+		BUG_ON(vxi->vx_fs[index]);
+	}
+
+	spin_lock_irqsave(&vx_info_inactive_lock, flags);
+	hlist_del(&vxi->vx_hlist);
+	spin_unlock_irqrestore(&vx_info_inactive_lock, flags);
+
+	__dealloc_vx_info(vxi);
+}
+
+
+/*	hash table for vx_info hash */
+
+#define VX_HASH_SIZE	13
+
+static struct hlist_head vx_info_hash[VX_HASH_SIZE] =
+	{ [0 ... VX_HASH_SIZE-1] = HLIST_HEAD_INIT };
+
+static spinlock_t vx_info_hash_lock = SPIN_LOCK_UNLOCKED;
+
+
+static inline unsigned int __hashval(xid_t xid)
+{
+	return (xid % VX_HASH_SIZE);
+}
+
+
+
+/*	__hash_vx_info()
+
+	* add the vxi to the global hash table
+	* requires the hash_lock to be held			*/
+
+static inline void __hash_vx_info(struct vx_info *vxi)
+{
+	struct hlist_head *head;
+
+	vxd_assert_lock(&vx_info_hash_lock);
+	vxdprintk(VXD_CBIT(xid, 4),
+		"__hash_vx_info: %p[#%d]", vxi, vxi->vx_id);
+	vxh_hash_vx_info(vxi);
+
+	/* context must not be hashed */
+	BUG_ON(vx_info_state(vxi, VXS_HASHED));
+
+	vxi->vx_state |= VXS_HASHED;
+	head = &vx_info_hash[__hashval(vxi->vx_id)];
+	hlist_add_head(&vxi->vx_hlist, head);
+	atomic_inc(&vx_global_cactive);
+}
+
+/*	__unhash_vx_info()
+
+	* remove the vxi from the global hash table
+	* requires the hash_lock to be held			*/
+
+static inline void __unhash_vx_info(struct vx_info *vxi)
+{
+	unsigned long flags;
+
+	vxd_assert_lock(&vx_info_hash_lock);
+	vxdprintk(VXD_CBIT(xid, 4),
+		"__unhash_vx_info: %p[#%d.%d.%d]", vxi, vxi->vx_id,
+		atomic_read(&vxi->vx_usecnt), atomic_read(&vxi->vx_tasks));
+	vxh_unhash_vx_info(vxi);
+
+	/* context must be hashed */
+	BUG_ON(!vx_info_state(vxi, VXS_HASHED));
+	/* but without tasks */
+	BUG_ON(atomic_read(&vxi->vx_tasks));
+
+	vxi->vx_state &= ~VXS_HASHED;
+	hlist_del_init(&vxi->vx_hlist);
+	spin_lock_irqsave(&vx_info_inactive_lock, flags);
+	hlist_add_head(&vxi->vx_hlist, &vx_info_inactive);
+	spin_unlock_irqrestore(&vx_info_inactive_lock, flags);
+	atomic_dec(&vx_global_cactive);
+}
+
+
+/*	__lookup_vx_info()
+
+	* requires the hash_lock to be held
+	* doesn't increment the vx_refcnt			*/
+
+static inline struct vx_info *__lookup_vx_info(xid_t xid)
+{
+	struct hlist_head *head = &vx_info_hash[__hashval(xid)];
+	struct hlist_node *pos;
+	struct vx_info *vxi;
+
+	vxd_assert_lock(&vx_info_hash_lock);
+	hlist_for_each(pos, head) {
+		vxi = hlist_entry(pos, struct vx_info, vx_hlist);
+
+		if (vxi->vx_id == xid)
+			goto found;
+	}
+	vxi = NULL;
+found:
+	vxdprintk(VXD_CBIT(xid, 0),
+		"__lookup_vx_info(#%u): %p[#%u]",
+		xid, vxi, vxi ? vxi->vx_id : 0);
+	vxh_lookup_vx_info(vxi, xid);
+	return vxi;
+}
+
+
+/*	__create_vx_info()
+
+	* create the requested context
+	* get(), claim() and hash it				*/
+
+static struct vx_info *__create_vx_info(int id)
+{
+	struct vx_info *new, *vxi = NULL;
+
+	vxdprintk(VXD_CBIT(xid, 1), "create_vx_info(%d)*", id);
+
+	if (!(new = __alloc_vx_info(id)))
+		return ERR_PTR(-ENOMEM);
+
+	/* required to make dynamic xids unique */
+	spin_lock(&vx_info_hash_lock);
+
+	/* static context requested */
+	if ((vxi = __lookup_vx_info(id))) {
+		vxdprintk(VXD_CBIT(xid, 0),
+			"create_vx_info(%d) = %p (already there)", id, vxi);
+		if (vx_info_flags(vxi, VXF_STATE_SETUP, 0))
+			vxi = ERR_PTR(-EBUSY);
+		else
+			vxi = ERR_PTR(-EEXIST);
+		goto out_unlock;
+	}
+	/* new context */
+	vxdprintk(VXD_CBIT(xid, 0),
+		"create_vx_info(%d) = %p (new)", id, new);
+	claim_vx_info(new, NULL);
+	__hash_vx_info(get_vx_info(new));
+	vxi = new, new = NULL;
+
+out_unlock:
+	spin_unlock(&vx_info_hash_lock);
+	vxh_create_vx_info(IS_ERR(vxi) ? NULL : vxi, id);
+	if (new)
+		__dealloc_vx_info(new);
+	return vxi;
+}
+
+
+/*	exported stuff						*/
+
+
+void unhash_vx_info(struct vx_info *vxi)
+{
+	__shutdown_vx_info(vxi);
+	spin_lock(&vx_info_hash_lock);
+	__unhash_vx_info(vxi);
+	spin_unlock(&vx_info_hash_lock);
+	__wakeup_vx_info(vxi);
+}
+
+
+/*	lookup_vx_info()
+
+	* search for a vx_info and get() it
+	* negative id means current				*/
+
+struct vx_info *lookup_vx_info(int id)
+{
+	struct vx_info *vxi = NULL;
+
+	if (id < 0) {
+		vxi = get_vx_info(current_vx_info());
+	} else if (id > 1) {
+		spin_lock(&vx_info_hash_lock);
+		vxi = get_vx_info(__lookup_vx_info(id));
+		spin_unlock(&vx_info_hash_lock);
+	}
+	return vxi;
+}
+
+/*	xid_is_hashed()
+
+	* verify that xid is still hashed			*/
+
+int xid_is_hashed(xid_t xid)
+{
+	int hashed;
+
+	spin_lock(&vx_info_hash_lock);
+	hashed = (__lookup_vx_info(xid) != NULL);
+	spin_unlock(&vx_info_hash_lock);
+	return hashed;
+}
+
+#ifdef	CONFIG_PROC_FS
+
+/*	get_xid_list()
+
+	* get a subset of hashed xids for proc
+	* assumes size is at least one				*/
+
+int get_xid_list(int index, unsigned int *xids, int size)
+{
+	int hindex, nr_xids = 0;
+
+	/* only show current and children */
+	if (!vx_check(0, VS_ADMIN | VS_WATCH)) {
+		if (index > 0)
+			return 0;
+		xids[nr_xids] = vx_current_xid();
+		return 1;
+	}
+
+	for (hindex = 0; hindex < VX_HASH_SIZE; hindex++) {
+		struct hlist_head *head = &vx_info_hash[hindex];
+		struct hlist_node *pos;
+
+		spin_lock(&vx_info_hash_lock);
+		hlist_for_each(pos, head) {
+			struct vx_info *vxi;
+
+			if (--index > 0)
+				continue;
+
+			vxi = hlist_entry(pos, struct vx_info, vx_hlist);
+			xids[nr_xids] = vxi->vx_id;
+			if (++nr_xids >= size) {
+				spin_unlock(&vx_info_hash_lock);
+				goto out;
+			}
+		}
+		/* keep the lock time short */
+		spin_unlock(&vx_info_hash_lock);
+	}
+out:
+	return nr_xids;
+}
+#endif
+
+#ifdef	CONFIG_VSERVER_DEBUG
+
+void	dump_vx_info_inactive(int level)
+{
+	struct hlist_node *entry, *next;
+
+	hlist_for_each_safe(entry, next, &vx_info_inactive) {
+		struct vx_info *vxi =
+			list_entry(entry, struct vx_info, vx_hlist);
+
+		dump_vx_info(vxi, level);
+	}
+}
+
+#endif
+
+#if 0
+int vx_migrate_user(struct task_struct *p, struct vx_info *vxi)
+{
+	struct user_struct *new_user, *old_user;
+
+	if (!p || !vxi)
+		BUG();
+
+	if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0))
+		return -EACCES;
+
+	new_user = alloc_uid(vxi->vx_id, p->uid);
+	if (!new_user)
+		return -ENOMEM;
+
+	old_user = p->user;
+	if (new_user != old_user) {
+		atomic_inc(&new_user->processes);
+		atomic_dec(&old_user->processes);
+		p->user = new_user;
+	}
+	free_uid(old_user);
+	return 0;
+}
+#endif
+
+#if 0
+void vx_mask_cap_bset(struct vx_info *vxi, struct task_struct *p)
+{
+	// p->cap_effective &= vxi->vx_cap_bset;
+	p->cap_effective =
+		cap_intersect(p->cap_effective, vxi->cap_bset);
+	// p->cap_inheritable &= vxi->vx_cap_bset;
+	p->cap_inheritable =
+		cap_intersect(p->cap_inheritable, vxi->cap_bset);
+	// p->cap_permitted &= vxi->vx_cap_bset;
+	p->cap_permitted =
+		cap_intersect(p->cap_permitted, vxi->cap_bset);
+}
+#endif
+
+
+#include <linux/file.h>
+#include <linux/fdtable.h>
+
+static int vx_openfd_task(struct task_struct *tsk)
+{
+	struct files_struct *files = tsk->files;
+	struct fdtable *fdt;
+	const unsigned long *bptr;
+	int count, total;
+
+	/* no rcu_read_lock() because of spin_lock() */
+	spin_lock(&files->file_lock);
+	fdt = files_fdtable(files);
+	bptr = fdt->open_fds->fds_bits;
+	count = fdt->max_fds / (sizeof(unsigned long) * 8);
+	for (total = 0; count > 0; count--) {
+		if (*bptr)
+			total += hweight_long(*bptr);
+		bptr++;
+	}
+	spin_unlock(&files->file_lock);
+	return total;
+}
+
+
+/*	for *space compatibility */
+
+asmlinkage long sys_unshare(unsigned long);
+
+/*
+ *	migrate task to new context
+ *	gets vxi, puts old_vxi on change
+ *	optionally unshares namespaces (hack)
+ */
+
+int vx_migrate_task(struct task_struct *p, struct vx_info *vxi, int unshare)
+{
+	struct vx_info *old_vxi;
+	int ret = 0;
+
+	if (!p || !vxi)
+		BUG();
+
+	vxdprintk(VXD_CBIT(xid, 5),
+		"vx_migrate_task(%p,%p[#%d.%d])", p, vxi,
+		vxi->vx_id, atomic_read(&vxi->vx_usecnt));
+
+	if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0) &&
+		!vx_info_flags(vxi, VXF_STATE_SETUP, 0))
+		return -EACCES;
+
+	if (vx_info_state(vxi, VXS_SHUTDOWN))
+		return -EFAULT;
+
+	old_vxi = task_get_vx_info(p);
+	if (old_vxi == vxi)
+		goto out;
+
+//	if (!(ret = vx_migrate_user(p, vxi))) {
+	{
+		int openfd;
+
+		task_lock(p);
+		openfd = vx_openfd_task(p);
+
+		if (old_vxi) {
+			atomic_dec(&old_vxi->cvirt.nr_threads);
+			atomic_dec(&old_vxi->cvirt.nr_running);
+			__rlim_dec(&old_vxi->limit, RLIMIT_NPROC);
+			/* FIXME: what about the struct files here? */
+			__rlim_sub(&old_vxi->limit, VLIMIT_OPENFD, openfd);
+			/* account for the executable */
+			__rlim_dec(&old_vxi->limit, VLIMIT_DENTRY);
+		}
+		atomic_inc(&vxi->cvirt.nr_threads);
+		atomic_inc(&vxi->cvirt.nr_running);
+		__rlim_inc(&vxi->limit, RLIMIT_NPROC);
+		/* FIXME: what about the struct files here? */
+		__rlim_add(&vxi->limit, VLIMIT_OPENFD, openfd);
+		/* account for the executable */
+		__rlim_inc(&vxi->limit, VLIMIT_DENTRY);
+
+		if (old_vxi) {
+			release_vx_info(old_vxi, p);
+			clr_vx_info(&p->vx_info);
+		}
+		claim_vx_info(vxi, p);
+		set_vx_info(&p->vx_info, vxi);
+		p->xid = vxi->vx_id;
+
+		vxdprintk(VXD_CBIT(xid, 5),
+			"moved task %p into vxi:%p[#%d]",
+			p, vxi, vxi->vx_id);
+
+		// vx_mask_cap_bset(vxi, p);
+		task_unlock(p);
+
+		/* hack for *spaces to provide compatibility */
+		if (unshare) {
+			struct nsproxy *old_nsp, *new_nsp;
+
+			ret = unshare_nsproxy_namespaces(
+				CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER,
+				&new_nsp, NULL);
+			if (ret)
+				goto out;
+
+			old_nsp = xchg(&p->nsproxy, new_nsp);
+			vx_set_space(vxi,
+				CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER, 0);
+			put_nsproxy(old_nsp);
+		}
+	}
+out:
+	put_vx_info(old_vxi);
+	return ret;
+}
+
+int vx_set_reaper(struct vx_info *vxi, struct task_struct *p)
+{
+	struct task_struct *old_reaper;
+
+	if (!vxi)
+		return -EINVAL;
+
+	vxdprintk(VXD_CBIT(xid, 6),
+		"vx_set_reaper(%p[#%d],%p[#%d,%d])",
+		vxi, vxi->vx_id, p, p->xid, p->pid);
+
+	old_reaper = vxi->vx_reaper;
+	if (old_reaper == p)
+		return 0;
+
+	/* set new child reaper */
+	get_task_struct(p);
+	vxi->vx_reaper = p;
+	put_task_struct(old_reaper);
+	return 0;
+}
+
+int vx_set_init(struct vx_info *vxi, struct task_struct *p)
+{
+	if (!vxi)
+		return -EINVAL;
+
+	vxdprintk(VXD_CBIT(xid, 6),
+		"vx_set_init(%p[#%d],%p[#%d,%d,%d])",
+		vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid);
+
+	vxi->vx_flags &= ~VXF_STATE_INIT;
+	// vxi->vx_initpid = p->tgid;
+	vxi->vx_initpid = p->pid;
+	return 0;
+}
+
+void vx_exit_init(struct vx_info *vxi, struct task_struct *p, int code)
+{
+	vxdprintk(VXD_CBIT(xid, 6),
+		"vx_exit_init(%p[#%d],%p[#%d,%d,%d])",
+		vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid);
+
+	vxi->exit_code = code;
+	vxi->vx_initpid = 0;
+}
+
+
+void vx_set_persistent(struct vx_info *vxi)
+{
+	vxdprintk(VXD_CBIT(xid, 6),
+		"vx_set_persistent(%p[#%d])", vxi, vxi->vx_id);
+
+	get_vx_info(vxi);
+	claim_vx_info(vxi, NULL);
+}
+
+void vx_clear_persistent(struct vx_info *vxi)
+{
+	vxdprintk(VXD_CBIT(xid, 6),
+		"vx_clear_persistent(%p[#%d])", vxi, vxi->vx_id);
+
+	release_vx_info(vxi, NULL);
+	put_vx_info(vxi);
+}
+
+void vx_update_persistent(struct vx_info *vxi)
+{
+	if (vx_info_flags(vxi, VXF_PERSISTENT, 0))
+		vx_set_persistent(vxi);
+	else
+		vx_clear_persistent(vxi);
+}
+
+
+/*	task must be current or locked		*/
+
+void	exit_vx_info(struct task_struct *p, int code)
+{
+	struct vx_info *vxi = p->vx_info;
+
+	if (vxi) {
+		atomic_dec(&vxi->cvirt.nr_threads);
+		vx_nproc_dec(p);
+
+		vxi->exit_code = code;
+		release_vx_info(vxi, p);
+	}
+}
+
+void	exit_vx_info_early(struct task_struct *p, int code)
+{
+	struct vx_info *vxi = p->vx_info;
+
+	if (vxi) {
+		if (vxi->vx_initpid == p->pid)
+			vx_exit_init(vxi, p, code);
+		if (vxi->vx_reaper == p)
+			vx_set_reaper(vxi, init_pid_ns.child_reaper);
+	}
+}
+
+
+/* vserver syscall commands below here */
+
+/* taks xid and vx_info functions */
+
+#include <asm/uaccess.h>
+
+
+int vc_task_xid(uint32_t id)
+{
+	xid_t xid;
+
+	if (id) {
+		struct task_struct *tsk;
+
+		read_lock(&tasklist_lock);
+		tsk = find_task_by_real_pid(id);
+		xid = (tsk) ? tsk->xid : -ESRCH;
+		read_unlock(&tasklist_lock);
+	} else
+		xid = vx_current_xid();
+	return xid;
+}
+
+
+int vc_vx_info(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_vx_info_v0 vc_data;
+
+	vc_data.xid = vxi->vx_id;
+	vc_data.initpid = vxi->vx_initpid;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+
+int vc_ctx_stat(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_stat_v0 vc_data;
+
+	vc_data.usecnt = atomic_read(&vxi->vx_usecnt);
+	vc_data.tasks = atomic_read(&vxi->vx_tasks);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+
+/* context functions */
+
+int vc_ctx_create(uint32_t xid, void __user *data)
+{
+	struct vcmd_ctx_create vc_data = { .flagword = VXF_INIT_SET };
+	struct vx_info *new_vxi;
+	int ret;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	if ((xid > MAX_S_CONTEXT) || (xid < 2))
+		return -EINVAL;
+
+	new_vxi = __create_vx_info(xid);
+	if (IS_ERR(new_vxi))
+		return PTR_ERR(new_vxi);
+
+	/* initial flags */
+	new_vxi->vx_flags = vc_data.flagword;
+
+	ret = -ENOEXEC;
+	if (vs_state_change(new_vxi, VSC_STARTUP))
+		goto out;
+
+	ret = vx_migrate_task(current, new_vxi, (!data));
+	if (ret)
+		goto out;
+
+	/* return context id on success */
+	ret = new_vxi->vx_id;
+
+	/* get a reference for persistent contexts */
+	if ((vc_data.flagword & VXF_PERSISTENT))
+		vx_set_persistent(new_vxi);
+out:
+	release_vx_info(new_vxi, NULL);
+	put_vx_info(new_vxi);
+	return ret;
+}
+
+
+int vc_ctx_migrate(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_migrate vc_data = { .flagword = 0 };
+	int ret;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = vx_migrate_task(current, vxi, 0);
+	if (ret)
+		return ret;
+	if (vc_data.flagword & VXM_SET_INIT)
+		ret = vx_set_init(vxi, current);
+	if (ret)
+		return ret;
+	if (vc_data.flagword & VXM_SET_REAPER)
+		ret = vx_set_reaper(vxi, current);
+	return ret;
+}
+
+
+int vc_get_cflags(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_flags_v0 vc_data;
+
+	vc_data.flagword = vxi->vx_flags;
+
+	/* special STATE flag handling */
+	vc_data.mask = vs_mask_flags(~0ULL, vxi->vx_flags, VXF_ONE_TIME);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+int vc_set_cflags(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_flags_v0 vc_data;
+	uint64_t mask, trigger;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	/* special STATE flag handling */
+	mask = vs_mask_mask(vc_data.mask, vxi->vx_flags, VXF_ONE_TIME);
+	trigger = (mask & vxi->vx_flags) ^ (mask & vc_data.flagword);
+
+	if (vxi == current_vx_info()) {
+		/* if (trigger & VXF_STATE_SETUP)
+			vx_mask_cap_bset(vxi, current); */
+		if (trigger & VXF_STATE_INIT) {
+			int ret;
+
+			ret = vx_set_init(vxi, current);
+			if (ret)
+				return ret;
+			ret = vx_set_reaper(vxi, current);
+			if (ret)
+				return ret;
+		}
+	}
+
+	vxi->vx_flags = vs_mask_flags(vxi->vx_flags,
+		vc_data.flagword, mask);
+	if (trigger & VXF_PERSISTENT)
+		vx_update_persistent(vxi);
+
+	return 0;
+}
+
+
+static inline uint64_t caps_from_cap_t(kernel_cap_t c)
+{
+	uint64_t v = c.cap[0] | ((uint64_t)c.cap[1] << 32);
+
+	// printk("caps_from_cap_t(%08x:%08x) = %016llx\n", c.cap[1], c.cap[0], v);
+	return v;
+}
+
+static inline kernel_cap_t cap_t_from_caps(uint64_t v)
+{
+	kernel_cap_t c = __cap_empty_set;
+
+	c.cap[0] = v & 0xFFFFFFFF;
+	c.cap[1] = (v >> 32) & 0xFFFFFFFF;
+
+	// printk("cap_t_from_caps(%016llx) = %08x:%08x\n", v, c.cap[1], c.cap[0]);
+	return c;
+}
+
+
+static int do_get_caps(struct vx_info *vxi, uint64_t *bcaps, uint64_t *ccaps)
+{
+	if (bcaps)
+		*bcaps = caps_from_cap_t(vxi->vx_bcaps);
+	if (ccaps)
+		*ccaps = vxi->vx_ccaps;
+
+	return 0;
+}
+
+int vc_get_ccaps(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_caps_v1 vc_data;
+	int ret;
+
+	ret = do_get_caps(vxi, NULL, &vc_data.ccaps);
+	if (ret)
+		return ret;
+	vc_data.cmask = ~0ULL;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+static int do_set_caps(struct vx_info *vxi,
+	uint64_t bcaps, uint64_t bmask, uint64_t ccaps, uint64_t cmask)
+{
+	uint64_t bcold = caps_from_cap_t(vxi->vx_bcaps);
+
+#if 0
+	printk("do_set_caps(%16llx, %16llx, %16llx, %16llx)\n",
+		bcaps, bmask, ccaps, cmask);
+#endif
+	vxi->vx_bcaps = cap_t_from_caps(
+		vs_mask_flags(bcold, bcaps, bmask));
+	vxi->vx_ccaps = vs_mask_flags(vxi->vx_ccaps, ccaps, cmask);
+
+	return 0;
+}
+
+int vc_set_ccaps(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_caps_v1 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_set_caps(vxi, 0, 0, vc_data.ccaps, vc_data.cmask);
+}
+
+int vc_get_bcaps(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_bcaps vc_data;
+	int ret;
+
+	ret = do_get_caps(vxi, &vc_data.bcaps, NULL);
+	if (ret)
+		return ret;
+	vc_data.bmask = ~0ULL;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+int vc_set_bcaps(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_bcaps vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_set_caps(vxi, vc_data.bcaps, vc_data.bmask, 0, 0);
+}
+
+
+int vc_get_umask(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_umask vc_data;
+
+	vc_data.umask = vxi->vx_umask;
+	vc_data.mask = ~0ULL;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+int vc_set_umask(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_umask vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	vxi->vx_umask = vs_mask_flags(vxi->vx_umask,
+		vc_data.umask, vc_data.mask);
+	return 0;
+}
+
+
+int vc_get_badness(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_badness_v0 vc_data;
+
+	vc_data.bias = vxi->vx_badness_bias;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+int vc_set_badness(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_badness_v0 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	vxi->vx_badness_bias = vc_data.bias;
+	return 0;
+}
+
+#include <linux/module.h>
+
+EXPORT_SYMBOL_GPL(free_vx_info);
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/cvirt.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/cvirt.c
--- linux-2.6.32.17/kernel/vserver/cvirt.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/cvirt.c	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,304 @@
+/*
+ *  linux/kernel/vserver/cvirt.c
+ *
+ *  Virtual Server: Context Virtualization
+ *
+ *  Copyright (C) 2004-2007  Herbert Pötzl
+ *
+ *  V0.01  broken out from limit.c
+ *  V0.02  added utsname stuff
+ *  V0.03  changed vcmds to vxi arg
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/utsname.h>
+#include <linux/vs_cvirt.h>
+#include <linux/vserver/switch.h>
+#include <linux/vserver/cvirt_cmd.h>
+
+#include <asm/uaccess.h>
+
+
+void vx_vsi_uptime(struct timespec *uptime, struct timespec *idle)
+{
+	struct vx_info *vxi = current_vx_info();
+
+	set_normalized_timespec(uptime,
+		uptime->tv_sec - vxi->cvirt.bias_uptime.tv_sec,
+		uptime->tv_nsec - vxi->cvirt.bias_uptime.tv_nsec);
+	if (!idle)
+		return;
+	set_normalized_timespec(idle,
+		idle->tv_sec - vxi->cvirt.bias_idle.tv_sec,
+		idle->tv_nsec - vxi->cvirt.bias_idle.tv_nsec);
+	return;
+}
+
+uint64_t vx_idle_jiffies(void)
+{
+	return init_task.utime + init_task.stime;
+}
+
+
+
+static inline uint32_t __update_loadavg(uint32_t load,
+	int wsize, int delta, int n)
+{
+	unsigned long long calc, prev;
+
+	/* just set it to n */
+	if (unlikely(delta >= wsize))
+		return (n << FSHIFT);
+
+	calc = delta * n;
+	calc <<= FSHIFT;
+	prev = (wsize - delta);
+	prev *= load;
+	calc += prev;
+	do_div(calc, wsize);
+	return calc;
+}
+
+
+void vx_update_load(struct vx_info *vxi)
+{
+	uint32_t now, last, delta;
+	unsigned int nr_running, nr_uninterruptible;
+	unsigned int total;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vxi->cvirt.load_lock, flags);
+
+	now = jiffies;
+	last = vxi->cvirt.load_last;
+	delta = now - last;
+
+	if (delta < 5*HZ)
+		goto out;
+
+	nr_running = atomic_read(&vxi->cvirt.nr_running);
+	nr_uninterruptible = atomic_read(&vxi->cvirt.nr_uninterruptible);
+	total = nr_running + nr_uninterruptible;
+
+	vxi->cvirt.load[0] = __update_loadavg(vxi->cvirt.load[0],
+		60*HZ, delta, total);
+	vxi->cvirt.load[1] = __update_loadavg(vxi->cvirt.load[1],
+		5*60*HZ, delta, total);
+	vxi->cvirt.load[2] = __update_loadavg(vxi->cvirt.load[2],
+		15*60*HZ, delta, total);
+
+	vxi->cvirt.load_last = now;
+out:
+	atomic_inc(&vxi->cvirt.load_updates);
+	spin_unlock_irqrestore(&vxi->cvirt.load_lock, flags);
+}
+
+
+/*
+ * Commands to do_syslog:
+ *
+ *      0 -- Close the log.  Currently a NOP.
+ *      1 -- Open the log. Currently a NOP.
+ *      2 -- Read from the log.
+ *      3 -- Read all messages remaining in the ring buffer.
+ *      4 -- Read and clear all messages remaining in the ring buffer
+ *      5 -- Clear ring buffer.
+ *      6 -- Disable printk's to console
+ *      7 -- Enable printk's to console
+ *      8 -- Set level of messages printed to console
+ *      9 -- Return number of unread characters in the log buffer
+ *     10 -- Return size of the log buffer
+ */
+int vx_do_syslog(int type, char __user *buf, int len)
+{
+	int error = 0;
+	int do_clear = 0;
+	struct vx_info *vxi = current_vx_info();
+	struct _vx_syslog *log;
+
+	if (!vxi)
+		return -EINVAL;
+	log = &vxi->cvirt.syslog;
+
+	switch (type) {
+	case 0:		/* Close log */
+	case 1:		/* Open log */
+		break;
+	case 2:		/* Read from log */
+		error = wait_event_interruptible(log->log_wait,
+			(log->log_start - log->log_end));
+		if (error)
+			break;
+		spin_lock_irq(&log->logbuf_lock);
+		spin_unlock_irq(&log->logbuf_lock);
+		break;
+	case 4:		/* Read/clear last kernel messages */
+		do_clear = 1;
+		/* fall through */
+	case 3:		/* Read last kernel messages */
+		return 0;
+
+	case 5:		/* Clear ring buffer */
+		return 0;
+
+	case 6:		/* Disable logging to console */
+	case 7:		/* Enable logging to console */
+	case 8:		/* Set level of messages printed to console */
+		break;
+
+	case 9:		/* Number of chars in the log buffer */
+		return 0;
+	case 10:	/* Size of the log buffer */
+		return 0;
+	default:
+		error = -EINVAL;
+		break;
+	}
+	return error;
+}
+
+
+/* virtual host info names */
+
+static char *vx_vhi_name(struct vx_info *vxi, int id)
+{
+	struct nsproxy *nsproxy;
+	struct uts_namespace *uts;
+
+	if (id == VHIN_CONTEXT)
+		return vxi->vx_name;
+
+	nsproxy = vxi->vx_nsproxy[0];
+	if (!nsproxy)
+		return NULL;
+
+	uts = nsproxy->uts_ns;
+	if (!uts)
+		return NULL;
+
+	switch (id) {
+	case VHIN_SYSNAME:
+		return uts->name.sysname;
+	case VHIN_NODENAME:
+		return uts->name.nodename;
+	case VHIN_RELEASE:
+		return uts->name.release;
+	case VHIN_VERSION:
+		return uts->name.version;
+	case VHIN_MACHINE:
+		return uts->name.machine;
+	case VHIN_DOMAINNAME:
+		return uts->name.domainname;
+	default:
+		return NULL;
+	}
+	return NULL;
+}
+
+int vc_set_vhi_name(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_vhi_name_v0 vc_data;
+	char *name;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	name = vx_vhi_name(vxi, vc_data.field);
+	if (!name)
+		return -EINVAL;
+
+	memcpy(name, vc_data.name, 65);
+	return 0;
+}
+
+int vc_get_vhi_name(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_vhi_name_v0 vc_data;
+	char *name;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	name = vx_vhi_name(vxi, vc_data.field);
+	if (!name)
+		return -EINVAL;
+
+	memcpy(vc_data.name, name, 65);
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+
+int vc_virt_stat(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_virt_stat_v0 vc_data;
+	struct _vx_cvirt *cvirt = &vxi->cvirt;
+	struct timespec uptime;
+
+	do_posix_clock_monotonic_gettime(&uptime);
+	set_normalized_timespec(&uptime,
+		uptime.tv_sec - cvirt->bias_uptime.tv_sec,
+		uptime.tv_nsec - cvirt->bias_uptime.tv_nsec);
+
+	vc_data.offset = timeval_to_ns(&cvirt->bias_tv);
+	vc_data.uptime = timespec_to_ns(&uptime);
+	vc_data.nr_threads = atomic_read(&cvirt->nr_threads);
+	vc_data.nr_running = atomic_read(&cvirt->nr_running);
+	vc_data.nr_uninterruptible = atomic_read(&cvirt->nr_uninterruptible);
+	vc_data.nr_onhold = atomic_read(&cvirt->nr_onhold);
+	vc_data.nr_forks = atomic_read(&cvirt->total_forks);
+	vc_data.load[0] = cvirt->load[0];
+	vc_data.load[1] = cvirt->load[1];
+	vc_data.load[2] = cvirt->load[2];
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+
+#ifdef CONFIG_VSERVER_VTIME
+
+/* virtualized time base */
+
+void vx_gettimeofday(struct timeval *tv)
+{
+	struct vx_info *vxi;
+
+	do_gettimeofday(tv);
+	if (!vx_flags(VXF_VIRT_TIME, 0))
+		return;
+
+	vxi = current_vx_info();
+	tv->tv_sec += vxi->cvirt.bias_tv.tv_sec;
+	tv->tv_usec += vxi->cvirt.bias_tv.tv_usec;
+
+	if (tv->tv_usec >= USEC_PER_SEC) {
+		tv->tv_sec++;
+		tv->tv_usec -= USEC_PER_SEC;
+	} else if (tv->tv_usec < 0) {
+		tv->tv_sec--;
+		tv->tv_usec += USEC_PER_SEC;
+	}
+}
+
+int vx_settimeofday(struct timespec *ts)
+{
+	struct timeval tv;
+	struct vx_info *vxi;
+
+	if (!vx_flags(VXF_VIRT_TIME, 0))
+		return do_settimeofday(ts);
+
+	do_gettimeofday(&tv);
+	vxi = current_vx_info();
+	vxi->cvirt.bias_tv.tv_sec = ts->tv_sec - tv.tv_sec;
+	vxi->cvirt.bias_tv.tv_usec =
+		(ts->tv_nsec/NSEC_PER_USEC) - tv.tv_usec;
+	return 0;
+}
+
+#endif
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/cvirt_init.h linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/cvirt_init.h
--- linux-2.6.32.17/kernel/vserver/cvirt_init.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/cvirt_init.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,69 @@
+
+
+extern uint64_t vx_idle_jiffies(void);
+
+static inline void vx_info_init_cvirt(struct _vx_cvirt *cvirt)
+{
+	uint64_t idle_jiffies = vx_idle_jiffies();
+	uint64_t nsuptime;
+
+	do_posix_clock_monotonic_gettime(&cvirt->bias_uptime);
+	nsuptime = (unsigned long long)cvirt->bias_uptime.tv_sec
+		* NSEC_PER_SEC + cvirt->bias_uptime.tv_nsec;
+	cvirt->bias_clock = nsec_to_clock_t(nsuptime);
+	cvirt->bias_tv.tv_sec = 0;
+	cvirt->bias_tv.tv_usec = 0;
+
+	jiffies_to_timespec(idle_jiffies, &cvirt->bias_idle);
+	atomic_set(&cvirt->nr_threads, 0);
+	atomic_set(&cvirt->nr_running, 0);
+	atomic_set(&cvirt->nr_uninterruptible, 0);
+	atomic_set(&cvirt->nr_onhold, 0);
+
+	spin_lock_init(&cvirt->load_lock);
+	cvirt->load_last = jiffies;
+	atomic_set(&cvirt->load_updates, 0);
+	cvirt->load[0] = 0;
+	cvirt->load[1] = 0;
+	cvirt->load[2] = 0;
+	atomic_set(&cvirt->total_forks, 0);
+
+	spin_lock_init(&cvirt->syslog.logbuf_lock);
+	init_waitqueue_head(&cvirt->syslog.log_wait);
+	cvirt->syslog.log_start = 0;
+	cvirt->syslog.log_end = 0;
+	cvirt->syslog.con_start = 0;
+	cvirt->syslog.logged_chars = 0;
+}
+
+static inline
+void vx_info_init_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc, int cpu)
+{
+	// cvirt_pc->cpustat = { 0 };
+}
+
+static inline void vx_info_exit_cvirt(struct _vx_cvirt *cvirt)
+{
+	int value;
+
+	vxwprintk_xid((value = atomic_read(&cvirt->nr_threads)),
+		"!!! cvirt: %p[nr_threads] = %d on exit.",
+		cvirt, value);
+	vxwprintk_xid((value = atomic_read(&cvirt->nr_running)),
+		"!!! cvirt: %p[nr_running] = %d on exit.",
+		cvirt, value);
+	vxwprintk_xid((value = atomic_read(&cvirt->nr_uninterruptible)),
+		"!!! cvirt: %p[nr_uninterruptible] = %d on exit.",
+		cvirt, value);
+	vxwprintk_xid((value = atomic_read(&cvirt->nr_onhold)),
+		"!!! cvirt: %p[nr_onhold] = %d on exit.",
+		cvirt, value);
+	return;
+}
+
+static inline
+void vx_info_exit_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc, int cpu)
+{
+	return;
+}
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/cvirt_proc.h linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/cvirt_proc.h
--- linux-2.6.32.17/kernel/vserver/cvirt_proc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/cvirt_proc.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,135 @@
+#ifndef _VX_CVIRT_PROC_H
+#define _VX_CVIRT_PROC_H
+
+#include <linux/nsproxy.h>
+#include <linux/mnt_namespace.h>
+#include <linux/ipc_namespace.h>
+#include <linux/utsname.h>
+#include <linux/ipc.h>
+
+
+static inline
+int vx_info_proc_nsproxy(struct nsproxy *nsproxy, char *buffer)
+{
+	struct mnt_namespace *ns;
+	struct uts_namespace *uts;
+	struct ipc_namespace *ipc;
+	struct path path;
+	char *pstr, *root;
+	int length = 0;
+
+	if (!nsproxy)
+		goto out;
+
+	length += sprintf(buffer + length,
+		"NSProxy:\t%p [%p,%p,%p]\n",
+		nsproxy, nsproxy->mnt_ns,
+		nsproxy->uts_ns, nsproxy->ipc_ns);
+
+	ns = nsproxy->mnt_ns;
+	if (!ns)
+		goto skip_ns;
+
+	pstr = kmalloc(PATH_MAX, GFP_KERNEL);
+	if (!pstr)
+		goto skip_ns;
+
+	path.mnt = ns->root;
+	path.dentry = ns->root->mnt_root;
+	root = d_path(&path, pstr, PATH_MAX - 2);
+	length += sprintf(buffer + length,
+		"Namespace:\t%p [#%u]\n"
+		"RootPath:\t%s\n",
+		ns, atomic_read(&ns->count),
+		root);
+	kfree(pstr);
+skip_ns:
+
+	uts = nsproxy->uts_ns;
+	if (!uts)
+		goto skip_uts;
+
+	length += sprintf(buffer + length,
+		"SysName:\t%.*s\n"
+		"NodeName:\t%.*s\n"
+		"Release:\t%.*s\n"
+		"Version:\t%.*s\n"
+		"Machine:\t%.*s\n"
+		"DomainName:\t%.*s\n",
+		__NEW_UTS_LEN, uts->name.sysname,
+		__NEW_UTS_LEN, uts->name.nodename,
+		__NEW_UTS_LEN, uts->name.release,
+		__NEW_UTS_LEN, uts->name.version,
+		__NEW_UTS_LEN, uts->name.machine,
+		__NEW_UTS_LEN, uts->name.domainname);
+skip_uts:
+
+	ipc = nsproxy->ipc_ns;
+	if (!ipc)
+		goto skip_ipc;
+
+	length += sprintf(buffer + length,
+		"SEMS:\t\t%d %d %d %d  %d\n"
+		"MSG:\t\t%d %d %d\n"
+		"SHM:\t\t%lu %lu  %d %d\n",
+		ipc->sem_ctls[0], ipc->sem_ctls[1],
+		ipc->sem_ctls[2], ipc->sem_ctls[3],
+		ipc->used_sems,
+		ipc->msg_ctlmax, ipc->msg_ctlmnb, ipc->msg_ctlmni,
+		(unsigned long)ipc->shm_ctlmax,
+		(unsigned long)ipc->shm_ctlall,
+		ipc->shm_ctlmni, ipc->shm_tot);
+skip_ipc:
+out:
+	return length;
+}
+
+
+#include <linux/sched.h>
+
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1 - 1)) * 100)
+
+static inline
+int vx_info_proc_cvirt(struct _vx_cvirt *cvirt, char *buffer)
+{
+	int length = 0;
+	int a, b, c;
+
+	length += sprintf(buffer + length,
+		"BiasUptime:\t%lu.%02lu\n",
+		(unsigned long)cvirt->bias_uptime.tv_sec,
+		(cvirt->bias_uptime.tv_nsec / (NSEC_PER_SEC / 100)));
+
+	a = cvirt->load[0] + (FIXED_1 / 200);
+	b = cvirt->load[1] + (FIXED_1 / 200);
+	c = cvirt->load[2] + (FIXED_1 / 200);
+	length += sprintf(buffer + length,
+		"nr_threads:\t%d\n"
+		"nr_running:\t%d\n"
+		"nr_unintr:\t%d\n"
+		"nr_onhold:\t%d\n"
+		"load_updates:\t%d\n"
+		"loadavg:\t%d.%02d %d.%02d %d.%02d\n"
+		"total_forks:\t%d\n",
+		atomic_read(&cvirt->nr_threads),
+		atomic_read(&cvirt->nr_running),
+		atomic_read(&cvirt->nr_uninterruptible),
+		atomic_read(&cvirt->nr_onhold),
+		atomic_read(&cvirt->load_updates),
+		LOAD_INT(a), LOAD_FRAC(a),
+		LOAD_INT(b), LOAD_FRAC(b),
+		LOAD_INT(c), LOAD_FRAC(c),
+		atomic_read(&cvirt->total_forks));
+	return length;
+}
+
+static inline
+int vx_info_proc_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc,
+	char *buffer, int cpu)
+{
+	int length = 0;
+	return length;
+}
+
+#endif	/* _VX_CVIRT_PROC_H */
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/debug.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/debug.c
--- linux-2.6.32.17/kernel/vserver/debug.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/debug.c	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,32 @@
+/*
+ *  kernel/vserver/debug.c
+ *
+ *  Copyright (C) 2005-2007 Herbert Pötzl
+ *
+ *  V0.01  vx_info dump support
+ *
+ */
+
+#include <linux/module.h>
+
+#include <linux/vserver/context.h>
+
+
+void	dump_vx_info(struct vx_info *vxi, int level)
+{
+	printk("vx_info %p[#%d, %d.%d, %4x]\n", vxi, vxi->vx_id,
+		atomic_read(&vxi->vx_usecnt),
+		atomic_read(&vxi->vx_tasks),
+		vxi->vx_state);
+	if (level > 0) {
+		__dump_vx_limit(&vxi->limit);
+		__dump_vx_sched(&vxi->sched);
+		__dump_vx_cvirt(&vxi->cvirt);
+		__dump_vx_cacct(&vxi->cacct);
+	}
+	printk("---\n");
+}
+
+
+EXPORT_SYMBOL_GPL(dump_vx_info);
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/device.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/device.c
--- linux-2.6.32.17/kernel/vserver/device.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/device.c	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,443 @@
+/*
+ *  linux/kernel/vserver/device.c
+ *
+ *  Linux-VServer: Device Support
+ *
+ *  Copyright (C) 2006  Herbert Pötzl
+ *  Copyright (C) 2007  Daniel Hokka Zakrisson
+ *
+ *  V0.01  device mapping basics
+ *  V0.02  added defaults
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/hash.h>
+
+#include <asm/errno.h>
+#include <asm/uaccess.h>
+#include <linux/vserver/base.h>
+#include <linux/vserver/debug.h>
+#include <linux/vserver/context.h>
+#include <linux/vserver/device.h>
+#include <linux/vserver/device_cmd.h>
+
+
+#define DMAP_HASH_BITS	4
+
+
+struct vs_mapping {
+	union {
+		struct hlist_node hlist;
+		struct list_head list;
+	} u;
+#define dm_hlist	u.hlist
+#define dm_list		u.list
+	xid_t xid;
+	dev_t device;
+	struct vx_dmap_target target;
+};
+
+
+static struct hlist_head dmap_main_hash[1 << DMAP_HASH_BITS];
+
+static spinlock_t dmap_main_hash_lock = SPIN_LOCK_UNLOCKED;
+
+static struct vx_dmap_target dmap_defaults[2] = {
+	{ .flags = DATTR_OPEN },
+	{ .flags = DATTR_OPEN },
+};
+
+
+struct kmem_cache *dmap_cachep __read_mostly;
+
+int __init dmap_cache_init(void)
+{
+	dmap_cachep = kmem_cache_create("dmap_cache",
+		sizeof(struct vs_mapping), 0,
+		SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+	return 0;
+}
+
+__initcall(dmap_cache_init);
+
+
+static inline unsigned int __hashval(dev_t dev, int bits)
+{
+	return hash_long((unsigned long)dev, bits);
+}
+
+
+/*	__hash_mapping()
+ *	add the mapping to the hash table
+ */
+static inline void __hash_mapping(struct vx_info *vxi, struct vs_mapping *vdm)
+{
+	spinlock_t *hash_lock = &dmap_main_hash_lock;
+	struct hlist_head *head, *hash = dmap_main_hash;
+	int device = vdm->device;
+
+	spin_lock(hash_lock);
+	vxdprintk(VXD_CBIT(misc, 8), "__hash_mapping: %p[#%d] %08x:%08x",
+		vxi, vxi ? vxi->vx_id : 0, device, vdm->target.target);
+
+	head = &hash[__hashval(device, DMAP_HASH_BITS)];
+	hlist_add_head(&vdm->dm_hlist, head);
+	spin_unlock(hash_lock);
+}
+
+
+static inline int __mode_to_default(umode_t mode)
+{
+	switch (mode) {
+	case S_IFBLK:
+		return 0;
+	case S_IFCHR:
+		return 1;
+	default:
+		BUG();
+	}
+}
+
+
+/*	__set_default()
+ *	set a default
+ */
+static inline void __set_default(struct vx_info *vxi, umode_t mode,
+	struct vx_dmap_target *vdmt)
+{
+	spinlock_t *hash_lock = &dmap_main_hash_lock;
+	spin_lock(hash_lock);
+
+	if (vxi)
+		vxi->dmap.targets[__mode_to_default(mode)] = *vdmt;
+	else
+		dmap_defaults[__mode_to_default(mode)] = *vdmt;
+
+
+	spin_unlock(hash_lock);
+
+	vxdprintk(VXD_CBIT(misc, 8), "__set_default: %p[#%u] %08x %04x",
+		  vxi, vxi ? vxi->vx_id : 0, vdmt->target, vdmt->flags);
+}
+
+
+/*	__remove_default()
+ *	remove a default
+ */
+static inline int __remove_default(struct vx_info *vxi, umode_t mode)
+{
+	spinlock_t *hash_lock = &dmap_main_hash_lock;
+	spin_lock(hash_lock);
+
+	if (vxi)
+		vxi->dmap.targets[__mode_to_default(mode)].flags = 0;
+	else	/* remove == reset */
+		dmap_defaults[__mode_to_default(mode)].flags = DATTR_OPEN | mode;
+
+	spin_unlock(hash_lock);
+	return 0;
+}
+
+
+/*	__find_mapping()
+ *	find a mapping in the hash table
+ *
+ *	caller must hold hash_lock
+ */
+static inline int __find_mapping(xid_t xid, dev_t device, umode_t mode,
+	struct vs_mapping **local, struct vs_mapping **global)
+{
+	struct hlist_head *hash = dmap_main_hash;
+	struct hlist_head *head = &hash[__hashval(device, DMAP_HASH_BITS)];
+	struct hlist_node *pos;
+	struct vs_mapping *vdm;
+
+	*local = NULL;
+	if (global)
+		*global = NULL;
+
+	hlist_for_each(pos, head) {
+		vdm = hlist_entry(pos, struct vs_mapping, dm_hlist);
+
+		if ((vdm->device == device) &&
+			!((vdm->target.flags ^ mode) & S_IFMT)) {
+			if (vdm->xid == xid) {
+				*local = vdm;
+				return 1;
+			} else if (global && vdm->xid == 0)
+				*global = vdm;
+		}
+	}
+
+	if (global && *global)
+		return 0;
+	else
+		return -ENOENT;
+}
+
+
+/*	__lookup_mapping()
+ *	find a mapping and store the result in target and flags
+ */
+static inline int __lookup_mapping(struct vx_info *vxi,
+	dev_t device, dev_t *target, int *flags, umode_t mode)
+{
+	spinlock_t *hash_lock = &dmap_main_hash_lock;
+	struct vs_mapping *vdm, *global;
+	struct vx_dmap_target *vdmt;
+	int ret = 0;
+	xid_t xid = vxi->vx_id;
+	int index;
+
+	spin_lock(hash_lock);
+	if (__find_mapping(xid, device, mode, &vdm, &global) > 0) {
+		ret = 1;
+		vdmt = &vdm->target;
+		goto found;
+	}
+
+	index = __mode_to_default(mode);
+	if (vxi && vxi->dmap.targets[index].flags) {
+		ret = 2;
+		vdmt = &vxi->dmap.targets[index];
+	} else if (global) {
+		ret = 3;
+		vdmt = &global->target;
+		goto found;
+	} else {
+		ret = 4;
+		vdmt = &dmap_defaults[index];
+	}
+
+found:
+	if (target && (vdmt->flags & DATTR_REMAP))
+		*target = vdmt->target;
+	else if (target)
+		*target = device;
+	if (flags)
+		*flags = vdmt->flags;
+
+	spin_unlock(hash_lock);
+
+	return ret;
+}
+
+
+/*	__remove_mapping()
+ *	remove a mapping from the hash table
+ */
+static inline int __remove_mapping(struct vx_info *vxi, dev_t device,
+	umode_t mode)
+{
+	spinlock_t *hash_lock = &dmap_main_hash_lock;
+	struct vs_mapping *vdm = NULL;
+	int ret = 0;
+
+	spin_lock(hash_lock);
+
+	ret = __find_mapping((vxi ? vxi->vx_id : 0), device, mode, &vdm,
+		NULL);
+	vxdprintk(VXD_CBIT(misc, 8), "__remove_mapping: %p[#%d] %08x %04x",
+		vxi, vxi ? vxi->vx_id : 0, device, mode);
+	if (ret < 0)
+		goto out;
+	hlist_del(&vdm->dm_hlist);
+
+out:
+	spin_unlock(hash_lock);
+	if (vdm)
+		kmem_cache_free(dmap_cachep, vdm);
+	return ret;
+}
+
+
+
+int vs_map_device(struct vx_info *vxi,
+	dev_t device, dev_t *target, umode_t mode)
+{
+	int ret, flags = DATTR_MASK;
+
+	if (!vxi) {
+		if (target)
+			*target = device;
+		goto out;
+	}
+	ret = __lookup_mapping(vxi, device, target, &flags, mode);
+	vxdprintk(VXD_CBIT(misc, 8), "vs_map_device: %08x target: %08x flags: %04x mode: %04x mapped=%d",
+		device, target ? *target : 0, flags, mode, ret);
+out:
+	return (flags & DATTR_MASK);
+}
+
+
+
+static int do_set_mapping(struct vx_info *vxi,
+	dev_t device, dev_t target, int flags, umode_t mode)
+{
+	if (device) {
+		struct vs_mapping *new;
+
+		new = kmem_cache_alloc(dmap_cachep, GFP_KERNEL);
+		if (!new)
+			return -ENOMEM;
+
+		INIT_HLIST_NODE(&new->dm_hlist);
+		new->device = device;
+		new->target.target = target;
+		new->target.flags = flags | mode;
+		new->xid = (vxi ? vxi->vx_id : 0);
+
+		vxdprintk(VXD_CBIT(misc, 8), "do_set_mapping: %08x target: %08x flags: %04x", device, target, flags);
+		__hash_mapping(vxi, new);
+	} else {
+		struct vx_dmap_target new = {
+			.target = target,
+			.flags = flags | mode,
+		};
+		__set_default(vxi, mode, &new);
+	}
+	return 0;
+}
+
+
+static int do_unset_mapping(struct vx_info *vxi,
+	dev_t device, dev_t target, int flags, umode_t mode)
+{
+	int ret = -EINVAL;
+
+	if (device) {
+		ret = __remove_mapping(vxi, device, mode);
+		if (ret < 0)
+			goto out;
+	} else {
+		ret = __remove_default(vxi, mode);
+		if (ret < 0)
+			goto out;
+	}
+
+out:
+	return ret;
+}
+
+
+static inline int __user_device(const char __user *name, dev_t *dev,
+	umode_t *mode)
+{
+	struct nameidata nd;
+	int ret;
+
+	if (!name) {
+		*dev = 0;
+		return 0;
+	}
+	ret = user_lpath(name, &nd.path);
+	if (ret)
+		return ret;
+	if (nd.path.dentry->d_inode) {
+		*dev = nd.path.dentry->d_inode->i_rdev;
+		*mode = nd.path.dentry->d_inode->i_mode;
+	}
+	path_put(&nd.path);
+	return 0;
+}
+
+static inline int __mapping_mode(dev_t device, dev_t target,
+	umode_t device_mode, umode_t target_mode, umode_t *mode)
+{
+	if (device)
+		*mode = device_mode & S_IFMT;
+	else if (target)
+		*mode = target_mode & S_IFMT;
+	else
+		return -EINVAL;
+
+	/* if both given, device and target mode have to match */
+	if (device && target &&
+		((device_mode ^ target_mode) & S_IFMT))
+		return -EINVAL;
+	return 0;
+}
+
+
+static inline int do_mapping(struct vx_info *vxi, const char __user *device_path,
+	const char __user *target_path, int flags, int set)
+{
+	dev_t device = ~0, target = ~0;
+	umode_t device_mode = 0, target_mode = 0, mode;
+	int ret;
+
+	ret = __user_device(device_path, &device, &device_mode);
+	if (ret)
+		return ret;
+	ret = __user_device(target_path, &target, &target_mode);
+	if (ret)
+		return ret;
+
+	ret = __mapping_mode(device, target,
+		device_mode, target_mode, &mode);
+	if (ret)
+		return ret;
+
+	if (set)
+		return do_set_mapping(vxi, device, target,
+			flags, mode);
+	else
+		return do_unset_mapping(vxi, device, target,
+			flags, mode);
+}
+
+
+int vc_set_mapping(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_set_mapping_v0 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_mapping(vxi, vc_data.device, vc_data.target,
+		vc_data.flags, 1);
+}
+
+int vc_unset_mapping(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_set_mapping_v0 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_mapping(vxi, vc_data.device, vc_data.target,
+		vc_data.flags, 0);
+}
+
+
+#ifdef	CONFIG_COMPAT
+
+int vc_set_mapping_x32(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_set_mapping_v0_x32 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_mapping(vxi, compat_ptr(vc_data.device_ptr),
+		compat_ptr(vc_data.target_ptr), vc_data.flags, 1);
+}
+
+int vc_unset_mapping_x32(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_set_mapping_v0_x32 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_mapping(vxi, compat_ptr(vc_data.device_ptr),
+		compat_ptr(vc_data.target_ptr), vc_data.flags, 0);
+}
+
+#endif	/* CONFIG_COMPAT */
+
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/dlimit.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/dlimit.c
--- linux-2.6.32.17/kernel/vserver/dlimit.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/dlimit.c	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,529 @@
+/*
+ *  linux/kernel/vserver/dlimit.c
+ *
+ *  Virtual Server: Context Disk Limits
+ *
+ *  Copyright (C) 2004-2009  Herbert Pötzl
+ *
+ *  V0.01  initial version
+ *  V0.02  compat32 splitup
+ *  V0.03  extended interface
+ *
+ */
+
+#include <linux/statfs.h>
+#include <linux/sched.h>
+#include <linux/namei.h>
+#include <linux/vs_tag.h>
+#include <linux/vs_dlimit.h>
+#include <linux/vserver/dlimit_cmd.h>
+
+#include <asm/uaccess.h>
+
+/*	__alloc_dl_info()
+
+	* allocate an initialized dl_info struct
+	* doesn't make it visible (hash)			*/
+
+static struct dl_info *__alloc_dl_info(struct super_block *sb, tag_t tag)
+{
+	struct dl_info *new = NULL;
+
+	vxdprintk(VXD_CBIT(dlim, 5),
+		"alloc_dl_info(%p,%d)*", sb, tag);
+
+	/* would this benefit from a slab cache? */
+	new = kmalloc(sizeof(struct dl_info), GFP_KERNEL);
+	if (!new)
+		return 0;
+
+	memset(new, 0, sizeof(struct dl_info));
+	new->dl_tag = tag;
+	new->dl_sb = sb;
+	INIT_RCU_HEAD(&new->dl_rcu);
+	INIT_HLIST_NODE(&new->dl_hlist);
+	spin_lock_init(&new->dl_lock);
+	atomic_set(&new->dl_refcnt, 0);
+	atomic_set(&new->dl_usecnt, 0);
+
+	/* rest of init goes here */
+
+	vxdprintk(VXD_CBIT(dlim, 4),
+		"alloc_dl_info(%p,%d) = %p", sb, tag, new);
+	return new;
+}
+
+/*	__dealloc_dl_info()
+
+	* final disposal of dl_info				*/
+
+static void __dealloc_dl_info(struct dl_info *dli)
+{
+	vxdprintk(VXD_CBIT(dlim, 4),
+		"dealloc_dl_info(%p)", dli);
+
+	dli->dl_hlist.next = LIST_POISON1;
+	dli->dl_tag = -1;
+	dli->dl_sb = 0;
+
+	BUG_ON(atomic_read(&dli->dl_usecnt));
+	BUG_ON(atomic_read(&dli->dl_refcnt));
+
+	kfree(dli);
+}
+
+
+/*	hash table for dl_info hash */
+
+#define DL_HASH_SIZE	13
+
+struct hlist_head dl_info_hash[DL_HASH_SIZE];
+
+static spinlock_t dl_info_hash_lock = SPIN_LOCK_UNLOCKED;
+
+
+static inline unsigned int __hashval(struct super_block *sb, tag_t tag)
+{
+	return ((tag ^ (unsigned long)sb) % DL_HASH_SIZE);
+}
+
+
+
+/*	__hash_dl_info()
+
+	* add the dli to the global hash table
+	* requires the hash_lock to be held			*/
+
+static inline void __hash_dl_info(struct dl_info *dli)
+{
+	struct hlist_head *head;
+
+	vxdprintk(VXD_CBIT(dlim, 6),
+		"__hash_dl_info: %p[#%d]", dli, dli->dl_tag);
+	get_dl_info(dli);
+	head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_tag)];
+	hlist_add_head_rcu(&dli->dl_hlist, head);
+}
+
+/*	__unhash_dl_info()
+
+	* remove the dli from the global hash table
+	* requires the hash_lock to be held			*/
+
+static inline void __unhash_dl_info(struct dl_info *dli)
+{
+	vxdprintk(VXD_CBIT(dlim, 6),
+		"__unhash_dl_info: %p[#%d]", dli, dli->dl_tag);
+	hlist_del_rcu(&dli->dl_hlist);
+	put_dl_info(dli);
+}
+
+
+/*	__lookup_dl_info()
+
+	* requires the rcu_read_lock()
+	* doesn't increment the dl_refcnt			*/
+
+static inline struct dl_info *__lookup_dl_info(struct super_block *sb, tag_t tag)
+{
+	struct hlist_head *head = &dl_info_hash[__hashval(sb, tag)];
+	struct hlist_node *pos;
+	struct dl_info *dli;
+
+	hlist_for_each_entry_rcu(dli, pos, head, dl_hlist) {
+
+		if (dli->dl_tag == tag && dli->dl_sb == sb) {
+			return dli;
+		}
+	}
+	return NULL;
+}
+
+
+struct dl_info *locate_dl_info(struct super_block *sb, tag_t tag)
+{
+	struct dl_info *dli;
+
+	rcu_read_lock();
+	dli = get_dl_info(__lookup_dl_info(sb, tag));
+	vxdprintk(VXD_CBIT(dlim, 7),
+		"locate_dl_info(%p,#%d) = %p", sb, tag, dli);
+	rcu_read_unlock();
+	return dli;
+}
+
+void rcu_free_dl_info(struct rcu_head *head)
+{
+	struct dl_info *dli = container_of(head, struct dl_info, dl_rcu);
+	int usecnt, refcnt;
+
+	BUG_ON(!dli || !head);
+
+	usecnt = atomic_read(&dli->dl_usecnt);
+	BUG_ON(usecnt < 0);
+
+	refcnt = atomic_read(&dli->dl_refcnt);
+	BUG_ON(refcnt < 0);
+
+	vxdprintk(VXD_CBIT(dlim, 3),
+		"rcu_free_dl_info(%p)", dli);
+	if (!usecnt)
+		__dealloc_dl_info(dli);
+	else
+		printk("!!! rcu didn't free\n");
+}
+
+
+
+
+static int do_addrem_dlimit(uint32_t id, const char __user *name,
+	uint32_t flags, int add)
+{
+	struct path path;
+	int ret;
+
+	ret = user_lpath(name, &path);
+	if (!ret) {
+		struct super_block *sb;
+		struct dl_info *dli;
+
+		ret = -EINVAL;
+		if (!path.dentry->d_inode)
+			goto out_release;
+		if (!(sb = path.dentry->d_inode->i_sb))
+			goto out_release;
+
+		if (add) {
+			dli = __alloc_dl_info(sb, id);
+			spin_lock(&dl_info_hash_lock);
+
+			ret = -EEXIST;
+			if (__lookup_dl_info(sb, id))
+				goto out_unlock;
+			__hash_dl_info(dli);
+			dli = NULL;
+		} else {
+			spin_lock(&dl_info_hash_lock);
+			dli = __lookup_dl_info(sb, id);
+
+			ret = -ESRCH;
+			if (!dli)
+				goto out_unlock;
+			__unhash_dl_info(dli);
+		}
+		ret = 0;
+	out_unlock:
+		spin_unlock(&dl_info_hash_lock);
+		if (add && dli)
+			__dealloc_dl_info(dli);
+	out_release:
+		path_put(&path);
+	}
+	return ret;
+}
+
+int vc_add_dlimit(uint32_t id, void __user *data)
+{
+	struct vcmd_ctx_dlimit_base_v0 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 1);
+}
+
+int vc_rem_dlimit(uint32_t id, void __user *data)
+{
+	struct vcmd_ctx_dlimit_base_v0 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 0);
+}
+
+#ifdef	CONFIG_COMPAT
+
+int vc_add_dlimit_x32(uint32_t id, void __user *data)
+{
+	struct vcmd_ctx_dlimit_base_v0_x32 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_addrem_dlimit(id,
+		compat_ptr(vc_data.name_ptr), vc_data.flags, 1);
+}
+
+int vc_rem_dlimit_x32(uint32_t id, void __user *data)
+{
+	struct vcmd_ctx_dlimit_base_v0_x32 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_addrem_dlimit(id,
+		compat_ptr(vc_data.name_ptr), vc_data.flags, 0);
+}
+
+#endif	/* CONFIG_COMPAT */
+
+
+static inline
+int do_set_dlimit(uint32_t id, const char __user *name,
+	uint32_t space_used, uint32_t space_total,
+	uint32_t inodes_used, uint32_t inodes_total,
+	uint32_t reserved, uint32_t flags)
+{
+	struct path path;
+	int ret;
+
+	ret = user_lpath(name, &path);
+	if (!ret) {
+		struct super_block *sb;
+		struct dl_info *dli;
+
+		ret = -EINVAL;
+		if (!path.dentry->d_inode)
+			goto out_release;
+		if (!(sb = path.dentry->d_inode->i_sb))
+			goto out_release;
+
+		/* sanity checks */
+		if ((reserved != CDLIM_KEEP &&
+			reserved > 100) ||
+			(inodes_used != CDLIM_KEEP &&
+			inodes_used > inodes_total) ||
+			(space_used != CDLIM_KEEP &&
+			space_used > space_total))
+			goto out_release;
+
+		ret = -ESRCH;
+		dli = locate_dl_info(sb, id);
+		if (!dli)
+			goto out_release;
+
+		spin_lock(&dli->dl_lock);
+
+		if (inodes_used != CDLIM_KEEP)
+			dli->dl_inodes_used = inodes_used;
+		if (inodes_total != CDLIM_KEEP)
+			dli->dl_inodes_total = inodes_total;
+		if (space_used != CDLIM_KEEP)
+			dli->dl_space_used = dlimit_space_32to64(
+				space_used, flags, DLIMS_USED);
+
+		if (space_total == CDLIM_INFINITY)
+			dli->dl_space_total = DLIM_INFINITY;
+		else if (space_total != CDLIM_KEEP)
+			dli->dl_space_total = dlimit_space_32to64(
+				space_total, flags, DLIMS_TOTAL);
+
+		if (reserved != CDLIM_KEEP)
+			dli->dl_nrlmult = (1 << 10) * (100 - reserved) / 100;
+
+		spin_unlock(&dli->dl_lock);
+
+		put_dl_info(dli);
+		ret = 0;
+
+	out_release:
+		path_put(&path);
+	}
+	return ret;
+}
+
+int vc_set_dlimit(uint32_t id, void __user *data)
+{
+	struct vcmd_ctx_dlimit_v0 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_set_dlimit(id, vc_data.name,
+		vc_data.space_used, vc_data.space_total,
+		vc_data.inodes_used, vc_data.inodes_total,
+		vc_data.reserved, vc_data.flags);
+}
+
+#ifdef	CONFIG_COMPAT
+
+int vc_set_dlimit_x32(uint32_t id, void __user *data)
+{
+	struct vcmd_ctx_dlimit_v0_x32 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_set_dlimit(id, compat_ptr(vc_data.name_ptr),
+		vc_data.space_used, vc_data.space_total,
+		vc_data.inodes_used, vc_data.inodes_total,
+		vc_data.reserved, vc_data.flags);
+}
+
+#endif	/* CONFIG_COMPAT */
+
+
+static inline
+int do_get_dlimit(uint32_t id, const char __user *name,
+	uint32_t *space_used, uint32_t *space_total,
+	uint32_t *inodes_used, uint32_t *inodes_total,
+	uint32_t *reserved, uint32_t *flags)
+{
+	struct path path;
+	int ret;
+
+	ret = user_lpath(name, &path);
+	if (!ret) {
+		struct super_block *sb;
+		struct dl_info *dli;
+
+		ret = -EINVAL;
+		if (!path.dentry->d_inode)
+			goto out_release;
+		if (!(sb = path.dentry->d_inode->i_sb))
+			goto out_release;
+
+		ret = -ESRCH;
+		dli = locate_dl_info(sb, id);
+		if (!dli)
+			goto out_release;
+
+		spin_lock(&dli->dl_lock);
+		*inodes_used = dli->dl_inodes_used;
+		*inodes_total = dli->dl_inodes_total;
+
+		*space_used = dlimit_space_64to32(
+			dli->dl_space_used, flags, DLIMS_USED);
+
+		if (dli->dl_space_total == DLIM_INFINITY)
+			*space_total = CDLIM_INFINITY;
+		else
+			*space_total = dlimit_space_64to32(
+				dli->dl_space_total, flags, DLIMS_TOTAL);
+
+		*reserved = 100 - ((dli->dl_nrlmult * 100 + 512) >> 10);
+		spin_unlock(&dli->dl_lock);
+
+		put_dl_info(dli);
+		ret = -EFAULT;
+
+		ret = 0;
+	out_release:
+		path_put(&path);
+	}
+	return ret;
+}
+
+
+int vc_get_dlimit(uint32_t id, void __user *data)
+{
+	struct vcmd_ctx_dlimit_v0 vc_data;
+	int ret;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = do_get_dlimit(id, vc_data.name,
+		&vc_data.space_used, &vc_data.space_total,
+		&vc_data.inodes_used, &vc_data.inodes_total,
+		&vc_data.reserved, &vc_data.flags);
+	if (ret)
+		return ret;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+#ifdef	CONFIG_COMPAT
+
+int vc_get_dlimit_x32(uint32_t id, void __user *data)
+{
+	struct vcmd_ctx_dlimit_v0_x32 vc_data;
+	int ret;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = do_get_dlimit(id, compat_ptr(vc_data.name_ptr),
+		&vc_data.space_used, &vc_data.space_total,
+		&vc_data.inodes_used, &vc_data.inodes_total,
+		&vc_data.reserved, &vc_data.flags);
+	if (ret)
+		return ret;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+#endif	/* CONFIG_COMPAT */
+
+
+void vx_vsi_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	struct dl_info *dli;
+	__u64 blimit, bfree, bavail;
+	__u32 ifree;
+
+	dli = locate_dl_info(sb, dx_current_tag());
+	if (!dli)
+		return;
+
+	spin_lock(&dli->dl_lock);
+	if (dli->dl_inodes_total == (unsigned long)DLIM_INFINITY)
+		goto no_ilim;
+
+	/* reduce max inodes available to limit */
+	if (buf->f_files > dli->dl_inodes_total)
+		buf->f_files = dli->dl_inodes_total;
+
+	ifree = dli->dl_inodes_total - dli->dl_inodes_used;
+	/* reduce free inodes to min */
+	if (ifree < buf->f_ffree)
+		buf->f_ffree = ifree;
+
+no_ilim:
+	if (dli->dl_space_total == DLIM_INFINITY)
+		goto no_blim;
+
+	blimit = dli->dl_space_total >> sb->s_blocksize_bits;
+
+	if (dli->dl_space_total < dli->dl_space_used)
+		bfree = 0;
+	else
+		bfree = (dli->dl_space_total - dli->dl_space_used)
+			>> sb->s_blocksize_bits;
+
+	bavail = ((dli->dl_space_total >> 10) * dli->dl_nrlmult);
+	if (bavail < dli->dl_space_used)
+		bavail = 0;
+	else
+		bavail = (bavail - dli->dl_space_used)
+			>> sb->s_blocksize_bits;
+
+	/* reduce max space available to limit */
+	if (buf->f_blocks > blimit)
+		buf->f_blocks = blimit;
+
+	/* reduce free space to min */
+	if (bfree < buf->f_bfree)
+		buf->f_bfree = bfree;
+
+	/* reduce avail space to min */
+	if (bavail < buf->f_bavail)
+		buf->f_bavail = bavail;
+
+no_blim:
+	spin_unlock(&dli->dl_lock);
+	put_dl_info(dli);
+
+	return;
+}
+
+#include <linux/module.h>
+
+EXPORT_SYMBOL_GPL(locate_dl_info);
+EXPORT_SYMBOL_GPL(rcu_free_dl_info);
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/helper.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/helper.c
--- linux-2.6.32.17/kernel/vserver/helper.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/helper.c	2009-12-03 22:20:22.000000000 +0100
@@ -0,0 +1,223 @@
+/*
+ *  linux/kernel/vserver/helper.c
+ *
+ *  Virtual Context Support
+ *
+ *  Copyright (C) 2004-2007  Herbert Pötzl
+ *
+ *  V0.01  basic helper
+ *
+ */
+
+#include <linux/kmod.h>
+#include <linux/reboot.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vserver/signal.h>
+
+
+char vshelper_path[255] = "/sbin/vshelper";
+
+
+static int do_vshelper(char *name, char *argv[], char *envp[], int sync)
+{
+	int ret;
+
+	if ((ret = call_usermodehelper(name, argv, envp, sync))) {
+		printk(	KERN_WARNING
+			"%s: (%s %s) returned %s with %d\n",
+			name, argv[1], argv[2],
+			sync ? "sync" : "async", ret);
+	}
+	vxdprintk(VXD_CBIT(switch, 4),
+		"%s: (%s %s) returned %s with %d",
+		name, argv[1], argv[2], sync ? "sync" : "async", ret);
+	return ret;
+}
+
+/*
+ *      vshelper path is set via /proc/sys
+ *      invoked by vserver sys_reboot(), with
+ *      the following arguments
+ *
+ *      argv [0] = vshelper_path;
+ *      argv [1] = action: "restart", "halt", "poweroff", ...
+ *      argv [2] = context identifier
+ *
+ *      envp [*] = type-specific parameters
+ */
+
+long vs_reboot_helper(struct vx_info *vxi, int cmd, void __user *arg)
+{
+	char id_buf[8], cmd_buf[16];
+	char uid_buf[16], pid_buf[16];
+	int ret;
+
+	char *argv[] = {vshelper_path, NULL, id_buf, 0};
+	char *envp[] = {"HOME=/", "TERM=linux",
+			"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+			uid_buf, pid_buf, cmd_buf, 0};
+
+	if (vx_info_state(vxi, VXS_HELPER))
+		return -EAGAIN;
+	vxi->vx_state |= VXS_HELPER;
+
+	snprintf(id_buf, sizeof(id_buf)-1, "%d", vxi->vx_id);
+
+	snprintf(cmd_buf, sizeof(cmd_buf)-1, "VS_CMD=%08x", cmd);
+	snprintf(uid_buf, sizeof(uid_buf)-1, "VS_UID=%d", current_uid());
+	snprintf(pid_buf, sizeof(pid_buf)-1, "VS_PID=%d", current->pid);
+
+	switch (cmd) {
+	case LINUX_REBOOT_CMD_RESTART:
+		argv[1] = "restart";
+		break;
+
+	case LINUX_REBOOT_CMD_HALT:
+		argv[1] = "halt";
+		break;
+
+	case LINUX_REBOOT_CMD_POWER_OFF:
+		argv[1] = "poweroff";
+		break;
+
+	case LINUX_REBOOT_CMD_SW_SUSPEND:
+		argv[1] = "swsusp";
+		break;
+
+	case LINUX_REBOOT_CMD_OOM:
+		argv[1] = "oom";
+		break;
+
+	default:
+		vxi->vx_state &= ~VXS_HELPER;
+		return 0;
+	}
+
+	ret = do_vshelper(vshelper_path, argv, envp, 0);
+	vxi->vx_state &= ~VXS_HELPER;
+	__wakeup_vx_info(vxi);
+	return (ret) ? -EPERM : 0;
+}
+
+
+long vs_reboot(unsigned int cmd, void __user *arg)
+{
+	struct vx_info *vxi = current_vx_info();
+	long ret = 0;
+
+	vxdprintk(VXD_CBIT(misc, 5),
+		"vs_reboot(%p[#%d],%u)",
+		vxi, vxi ? vxi->vx_id : 0, cmd);
+
+	ret = vs_reboot_helper(vxi, cmd, arg);
+	if (ret)
+		return ret;
+
+	vxi->reboot_cmd = cmd;
+	if (vx_info_flags(vxi, VXF_REBOOT_KILL, 0)) {
+		switch (cmd) {
+		case LINUX_REBOOT_CMD_RESTART:
+		case LINUX_REBOOT_CMD_HALT:
+		case LINUX_REBOOT_CMD_POWER_OFF:
+			vx_info_kill(vxi, 0, SIGKILL);
+			vx_info_kill(vxi, 1, SIGKILL);
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+long vs_oom_action(unsigned int cmd)
+{
+	struct vx_info *vxi = current_vx_info();
+	long ret = 0;
+
+	vxdprintk(VXD_CBIT(misc, 5),
+		"vs_oom_action(%p[#%d],%u)",
+		vxi, vxi ? vxi->vx_id : 0, cmd);
+
+	ret = vs_reboot_helper(vxi, cmd, NULL);
+	if (ret)
+		return ret;
+
+	vxi->reboot_cmd = cmd;
+	if (vx_info_flags(vxi, VXF_REBOOT_KILL, 0)) {
+		vx_info_kill(vxi, 0, SIGKILL);
+		vx_info_kill(vxi, 1, SIGKILL);
+	}
+	return 0;
+}
+
+/*
+ *      argv [0] = vshelper_path;
+ *      argv [1] = action: "startup", "shutdown"
+ *      argv [2] = context identifier
+ *
+ *      envp [*] = type-specific parameters
+ */
+
+long vs_state_change(struct vx_info *vxi, unsigned int cmd)
+{
+	char id_buf[8], cmd_buf[16];
+	char *argv[] = {vshelper_path, NULL, id_buf, 0};
+	char *envp[] = {"HOME=/", "TERM=linux",
+			"PATH=/sbin:/usr/sbin:/bin:/usr/bin", cmd_buf, 0};
+
+	if (!vx_info_flags(vxi, VXF_SC_HELPER, 0))
+		return 0;
+
+	snprintf(id_buf, sizeof(id_buf)-1, "%d", vxi->vx_id);
+	snprintf(cmd_buf, sizeof(cmd_buf)-1, "VS_CMD=%08x", cmd);
+
+	switch (cmd) {
+	case VSC_STARTUP:
+		argv[1] = "startup";
+		break;
+	case VSC_SHUTDOWN:
+		argv[1] = "shutdown";
+		break;
+	default:
+		return 0;
+	}
+
+	return do_vshelper(vshelper_path, argv, envp, 1);
+}
+
+
+/*
+ *      argv [0] = vshelper_path;
+ *      argv [1] = action: "netup", "netdown"
+ *      argv [2] = context identifier
+ *
+ *      envp [*] = type-specific parameters
+ */
+
+long vs_net_change(struct nx_info *nxi, unsigned int cmd)
+{
+	char id_buf[8], cmd_buf[16];
+	char *argv[] = {vshelper_path, NULL, id_buf, 0};
+	char *envp[] = {"HOME=/", "TERM=linux",
+			"PATH=/sbin:/usr/sbin:/bin:/usr/bin", cmd_buf, 0};
+
+	if (!nx_info_flags(nxi, NXF_SC_HELPER, 0))
+		return 0;
+
+	snprintf(id_buf, sizeof(id_buf)-1, "%d", nxi->nx_id);
+	snprintf(cmd_buf, sizeof(cmd_buf)-1, "VS_CMD=%08x", cmd);
+
+	switch (cmd) {
+	case VSC_NETUP:
+		argv[1] = "netup";
+		break;
+	case VSC_NETDOWN:
+		argv[1] = "netdown";
+		break;
+	default:
+		return 0;
+	}
+
+	return do_vshelper(vshelper_path, argv, envp, 1);
+}
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/history.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/history.c
--- linux-2.6.32.17/kernel/vserver/history.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/history.c	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,258 @@
+/*
+ *  kernel/vserver/history.c
+ *
+ *  Virtual Context History Backtrace
+ *
+ *  Copyright (C) 2004-2007  Herbert Pötzl
+ *
+ *  V0.01  basic structure
+ *  V0.02  hash/unhash and trace
+ *  V0.03  preemption fixes
+ *
+ */
+
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+#include <linux/vserver/context.h>
+#include <linux/vserver/debug.h>
+#include <linux/vserver/debug_cmd.h>
+#include <linux/vserver/history.h>
+
+
+#ifdef	CONFIG_VSERVER_HISTORY
+#define VXH_SIZE	CONFIG_VSERVER_HISTORY_SIZE
+#else
+#define VXH_SIZE	64
+#endif
+
+struct _vx_history {
+	unsigned int counter;
+
+	struct _vx_hist_entry entry[VXH_SIZE + 1];
+};
+
+
+DEFINE_PER_CPU(struct _vx_history, vx_history_buffer);
+
+unsigned volatile int vxh_active = 1;
+
+static atomic_t sequence = ATOMIC_INIT(0);
+
+
+/*	vxh_advance()
+
+	* requires disabled preemption				*/
+
+struct _vx_hist_entry *vxh_advance(void *loc)
+{
+	unsigned int cpu = smp_processor_id();
+	struct _vx_history *hist = &per_cpu(vx_history_buffer, cpu);
+	struct _vx_hist_entry *entry;
+	unsigned int index;
+
+	index = vxh_active ? (hist->counter++ % VXH_SIZE) : VXH_SIZE;
+	entry = &hist->entry[index];
+
+	entry->seq = atomic_inc_return(&sequence);
+	entry->loc = loc;
+	return entry;
+}
+
+EXPORT_SYMBOL_GPL(vxh_advance);
+
+
+#define VXH_LOC_FMTS	"(#%04x,*%d):%p"
+
+#define VXH_LOC_ARGS(e)	(e)->seq, cpu, (e)->loc
+
+
+#define VXH_VXI_FMTS	"%p[#%d,%d.%d]"
+
+#define VXH_VXI_ARGS(e)	(e)->vxi.ptr,				\
+			(e)->vxi.ptr ? (e)->vxi.xid : 0,	\
+			(e)->vxi.ptr ? (e)->vxi.usecnt : 0,	\
+			(e)->vxi.ptr ? (e)->vxi.tasks : 0
+
+void	vxh_dump_entry(struct _vx_hist_entry *e, unsigned cpu)
+{
+	switch (e->type) {
+	case VXH_THROW_OOPS:
+		printk( VXH_LOC_FMTS " oops \n", VXH_LOC_ARGS(e));
+		break;
+
+	case VXH_GET_VX_INFO:
+	case VXH_PUT_VX_INFO:
+		printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS "\n",
+			VXH_LOC_ARGS(e),
+			(e->type == VXH_GET_VX_INFO) ? "get" : "put",
+			VXH_VXI_ARGS(e));
+		break;
+
+	case VXH_INIT_VX_INFO:
+	case VXH_SET_VX_INFO:
+	case VXH_CLR_VX_INFO:
+		printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS " @%p\n",
+			VXH_LOC_ARGS(e),
+			(e->type == VXH_INIT_VX_INFO) ? "init" :
+			((e->type == VXH_SET_VX_INFO) ? "set" : "clr"),
+			VXH_VXI_ARGS(e), e->sc.data);
+		break;
+
+	case VXH_CLAIM_VX_INFO:
+	case VXH_RELEASE_VX_INFO:
+		printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS " @%p\n",
+			VXH_LOC_ARGS(e),
+			(e->type == VXH_CLAIM_VX_INFO) ? "claim" : "release",
+			VXH_VXI_ARGS(e), e->sc.data);
+		break;
+
+	case VXH_ALLOC_VX_INFO:
+	case VXH_DEALLOC_VX_INFO:
+		printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS "\n",
+			VXH_LOC_ARGS(e),
+			(e->type == VXH_ALLOC_VX_INFO) ? "alloc" : "dealloc",
+			VXH_VXI_ARGS(e));
+		break;
+
+	case VXH_HASH_VX_INFO:
+	case VXH_UNHASH_VX_INFO:
+		printk( VXH_LOC_FMTS " __%s_vx_info " VXH_VXI_FMTS "\n",
+			VXH_LOC_ARGS(e),
+			(e->type == VXH_HASH_VX_INFO) ? "hash" : "unhash",
+			VXH_VXI_ARGS(e));
+		break;
+
+	case VXH_LOC_VX_INFO:
+	case VXH_LOOKUP_VX_INFO:
+	case VXH_CREATE_VX_INFO:
+		printk( VXH_LOC_FMTS " __%s_vx_info [#%d] -> " VXH_VXI_FMTS "\n",
+			VXH_LOC_ARGS(e),
+			(e->type == VXH_CREATE_VX_INFO) ? "create" :
+			((e->type == VXH_LOC_VX_INFO) ? "loc" : "lookup"),
+			e->ll.arg, VXH_VXI_ARGS(e));
+		break;
+	}
+}
+
+static void __vxh_dump_history(void)
+{
+	unsigned int i, cpu;
+
+	printk("History:\tSEQ: %8x\tNR_CPUS: %d\n",
+		atomic_read(&sequence), NR_CPUS);
+
+	for (i = 0; i < VXH_SIZE; i++) {
+		for_each_online_cpu(cpu) {
+			struct _vx_history *hist =
+				&per_cpu(vx_history_buffer, cpu);
+			unsigned int index = (hist->counter - i) % VXH_SIZE;
+			struct _vx_hist_entry *entry = &hist->entry[index];
+
+			vxh_dump_entry(entry, cpu);
+		}
+	}
+}
+
+void	vxh_dump_history(void)
+{
+	vxh_active = 0;
+#ifdef CONFIG_SMP
+	local_irq_enable();
+	smp_send_stop();
+	local_irq_disable();
+#endif
+	__vxh_dump_history();
+}
+
+
+/* vserver syscall commands below here */
+
+
+int vc_dump_history(uint32_t id)
+{
+	vxh_active = 0;
+	__vxh_dump_history();
+	vxh_active = 1;
+
+	return 0;
+}
+
+
+int do_read_history(struct __user _vx_hist_entry *data,
+	int cpu, uint32_t *index, uint32_t *count)
+{
+	int pos, ret = 0;
+	struct _vx_history *hist = &per_cpu(vx_history_buffer, cpu);
+	int end = hist->counter;
+	int start = end - VXH_SIZE + 2;
+	int idx = *index;
+
+	/* special case: get current pos */
+	if (!*count) {
+		*index = end;
+		return 0;
+	}
+
+	/* have we lost some data? */
+	if (idx < start)
+		idx = start;
+
+	for (pos = 0; (pos < *count) && (idx < end); pos++, idx++) {
+		struct _vx_hist_entry *entry =
+			&hist->entry[idx % VXH_SIZE];
+
+		/* send entry to userspace */
+		ret = copy_to_user(&data[pos], entry, sizeof(*entry));
+		if (ret)
+			break;
+	}
+	/* save new index and count */
+	*index = idx;
+	*count = pos;
+	return ret ? ret : (*index < end);
+}
+
+int vc_read_history(uint32_t id, void __user *data)
+{
+	struct vcmd_read_history_v0 vc_data;
+	int ret;
+
+	if (id >= NR_CPUS)
+		return -EINVAL;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = do_read_history((struct __user _vx_hist_entry *)vc_data.data,
+		id, &vc_data.index, &vc_data.count);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return ret;
+}
+
+#ifdef	CONFIG_COMPAT
+
+int vc_read_history_x32(uint32_t id, void __user *data)
+{
+	struct vcmd_read_history_v0_x32 vc_data;
+	int ret;
+
+	if (id >= NR_CPUS)
+		return -EINVAL;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = do_read_history((struct __user _vx_hist_entry *)
+		compat_ptr(vc_data.data_ptr),
+		id, &vc_data.index, &vc_data.count);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return ret;
+}
+
+#endif	/* CONFIG_COMPAT */
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/inet.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/inet.c
--- linux-2.6.32.17/kernel/vserver/inet.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/inet.c	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,225 @@
+
+#include <linux/in.h>
+#include <linux/inetdevice.h>
+#include <linux/vs_inet.h>
+#include <linux/vs_inet6.h>
+#include <linux/vserver/debug.h>
+#include <net/route.h>
+#include <net/addrconf.h>
+
+
+int nx_v4_addr_conflict(struct nx_info *nxi1, struct nx_info *nxi2)
+{
+	int ret = 0;
+
+	if (!nxi1 || !nxi2 || nxi1 == nxi2)
+		ret = 1;
+	else {
+		struct nx_addr_v4 *ptr;
+
+		for (ptr = &nxi1->v4; ptr; ptr = ptr->next) {
+			if (v4_nx_addr_in_nx_info(nxi2, ptr, -1)) {
+				ret = 1;
+				break;
+			}
+		}
+	}
+
+	vxdprintk(VXD_CBIT(net, 2),
+		"nx_v4_addr_conflict(%p,%p): %d",
+		nxi1, nxi2, ret);
+
+	return ret;
+}
+
+
+#ifdef	CONFIG_IPV6
+
+int nx_v6_addr_conflict(struct nx_info *nxi1, struct nx_info *nxi2)
+{
+	int ret = 0;
+
+	if (!nxi1 || !nxi2 || nxi1 == nxi2)
+		ret = 1;
+	else {
+		struct nx_addr_v6 *ptr;
+
+		for (ptr = &nxi1->v6; ptr; ptr = ptr->next) {
+			if (v6_nx_addr_in_nx_info(nxi2, ptr, -1)) {
+				ret = 1;
+				break;
+			}
+		}
+	}
+
+	vxdprintk(VXD_CBIT(net, 2),
+		"nx_v6_addr_conflict(%p,%p): %d",
+		nxi1, nxi2, ret);
+
+	return ret;
+}
+
+#endif
+
+int v4_dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
+{
+	struct in_device *in_dev;
+	struct in_ifaddr **ifap;
+	struct in_ifaddr *ifa;
+	int ret = 0;
+
+	if (!dev)
+		goto out;
+	in_dev = in_dev_get(dev);
+	if (!in_dev)
+		goto out;
+
+	for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
+		ifap = &ifa->ifa_next) {
+		if (v4_addr_in_nx_info(nxi, ifa->ifa_local, NXA_MASK_SHOW)) {
+			ret = 1;
+			break;
+		}
+	}
+	in_dev_put(in_dev);
+out:
+	return ret;
+}
+
+
+#ifdef	CONFIG_IPV6
+
+int v6_dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
+{
+	struct inet6_dev *in_dev;
+	struct inet6_ifaddr **ifap;
+	struct inet6_ifaddr *ifa;
+	int ret = 0;
+
+	if (!dev)
+		goto out;
+	in_dev = in6_dev_get(dev);
+	if (!in_dev)
+		goto out;
+
+	for (ifap = &in_dev->addr_list; (ifa = *ifap) != NULL;
+		ifap = &ifa->if_next) {
+		if (v6_addr_in_nx_info(nxi, &ifa->addr, -1)) {
+			ret = 1;
+			break;
+		}
+	}
+	in6_dev_put(in_dev);
+out:
+	return ret;
+}
+
+#endif
+
+int dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
+{
+	int ret = 1;
+
+	if (!nxi)
+		goto out;
+	if (nxi->v4.type && v4_dev_in_nx_info(dev, nxi))
+		goto out;
+#ifdef	CONFIG_IPV6
+	ret = 2;
+	if (nxi->v6.type && v6_dev_in_nx_info(dev, nxi))
+		goto out;
+#endif
+	ret = 0;
+out:
+	vxdprintk(VXD_CBIT(net, 3),
+		"dev_in_nx_info(%p,%p[#%d]) = %d",
+		dev, nxi, nxi ? nxi->nx_id : 0, ret);
+	return ret;
+}
+
+int ip_v4_find_src(struct net *net, struct nx_info *nxi,
+	struct rtable **rp, struct flowi *fl)
+{
+	if (!nxi)
+		return 0;
+
+	/* FIXME: handle lback only case */
+	if (!NX_IPV4(nxi))
+		return -EPERM;
+
+	vxdprintk(VXD_CBIT(net, 4),
+		"ip_v4_find_src(%p[#%u]) " NIPQUAD_FMT " -> " NIPQUAD_FMT,
+		nxi, nxi ? nxi->nx_id : 0,
+		NIPQUAD(fl->fl4_src), NIPQUAD(fl->fl4_dst));
+
+	/* single IP is unconditional */
+	if (nx_info_flags(nxi, NXF_SINGLE_IP, 0) &&
+		(fl->fl4_src == INADDR_ANY))
+		fl->fl4_src = nxi->v4.ip[0].s_addr;
+
+	if (fl->fl4_src == INADDR_ANY) {
+		struct nx_addr_v4 *ptr;
+		__be32 found = 0;
+		int err;
+
+		err = __ip_route_output_key(net, rp, fl);
+		if (!err) {
+			found = (*rp)->rt_src;
+			ip_rt_put(*rp);
+			vxdprintk(VXD_CBIT(net, 4),
+				"ip_v4_find_src(%p[#%u]) rok[%u]: " NIPQUAD_FMT,
+				nxi, nxi ? nxi->nx_id : 0, fl->oif, NIPQUAD(found));
+			if (v4_addr_in_nx_info(nxi, found, NXA_MASK_BIND))
+				goto found;
+		}
+
+		for (ptr = &nxi->v4; ptr; ptr = ptr->next) {
+			__be32 primary = ptr->ip[0].s_addr;
+			__be32 mask = ptr->mask.s_addr;
+			__be32 neta = primary & mask;
+
+			vxdprintk(VXD_CBIT(net, 4), "ip_v4_find_src(%p[#%u]) chk: "
+				NIPQUAD_FMT "/" NIPQUAD_FMT "/" NIPQUAD_FMT,
+				nxi, nxi ? nxi->nx_id : 0, NIPQUAD(primary),
+				NIPQUAD(mask), NIPQUAD(neta));
+			if ((found & mask) != neta)
+				continue;
+
+			fl->fl4_src = primary;
+			err = __ip_route_output_key(net, rp, fl);
+			vxdprintk(VXD_CBIT(net, 4),
+				"ip_v4_find_src(%p[#%u]) rok[%u]: " NIPQUAD_FMT,
+				nxi, nxi ? nxi->nx_id : 0, fl->oif, NIPQUAD(primary));
+			if (!err) {
+				found = (*rp)->rt_src;
+				ip_rt_put(*rp);
+				if (found == primary)
+					goto found;
+			}
+		}
+		/* still no source ip? */
+		found = ipv4_is_loopback(fl->fl4_dst)
+			? IPI_LOOPBACK : nxi->v4.ip[0].s_addr;
+	found:
+		/* assign src ip to flow */
+		fl->fl4_src = found;
+
+	} else {
+		if (!v4_addr_in_nx_info(nxi, fl->fl4_src, NXA_MASK_BIND))
+			return -EPERM;
+	}
+
+	if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0)) {
+		if (ipv4_is_loopback(fl->fl4_dst))
+			fl->fl4_dst = nxi->v4_lback.s_addr;
+		if (ipv4_is_loopback(fl->fl4_src))
+			fl->fl4_src = nxi->v4_lback.s_addr;
+	} else if (ipv4_is_loopback(fl->fl4_dst) &&
+		!nx_info_flags(nxi, NXF_LBACK_ALLOW, 0))
+		return -EPERM;
+
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(ip_v4_find_src);
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/init.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/init.c
--- linux-2.6.32.17/kernel/vserver/init.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/init.c	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,45 @@
+/*
+ *  linux/kernel/init.c
+ *
+ *  Virtual Server Init
+ *
+ *  Copyright (C) 2004-2007  Herbert Pötzl
+ *
+ *  V0.01  basic structure
+ *
+ */
+
+#include <linux/init.h>
+
+int	vserver_register_sysctl(void);
+void	vserver_unregister_sysctl(void);
+
+
+static int __init init_vserver(void)
+{
+	int ret = 0;
+
+#ifdef	CONFIG_VSERVER_DEBUG
+	vserver_register_sysctl();
+#endif
+	return ret;
+}
+
+
+static void __exit exit_vserver(void)
+{
+
+#ifdef	CONFIG_VSERVER_DEBUG
+	vserver_unregister_sysctl();
+#endif
+	return;
+}
+
+/* FIXME: GFP_ZONETYPES gone
+long vx_slab[GFP_ZONETYPES]; */
+long vx_area;
+
+
+module_init(init_vserver);
+module_exit(exit_vserver);
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/inode.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/inode.c
--- linux-2.6.32.17/kernel/vserver/inode.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/inode.c	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,433 @@
+/*
+ *  linux/kernel/vserver/inode.c
+ *
+ *  Virtual Server: File System Support
+ *
+ *  Copyright (C) 2004-2007  Herbert Pötzl
+ *
+ *  V0.01  separated from vcontext V0.05
+ *  V0.02  moved to tag (instead of xid)
+ *
+ */
+
+#include <linux/tty.h>
+#include <linux/proc_fs.h>
+#include <linux/devpts_fs.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mount.h>
+#include <linux/parser.h>
+#include <linux/namei.h>
+#include <linux/vserver/inode.h>
+#include <linux/vserver/inode_cmd.h>
+#include <linux/vs_base.h>
+#include <linux/vs_tag.h>
+
+#include <asm/uaccess.h>
+
+
+static int __vc_get_iattr(struct inode *in, uint32_t *tag, uint32_t *flags, uint32_t *mask)
+{
+	struct proc_dir_entry *entry;
+
+	if (!in || !in->i_sb)
+		return -ESRCH;
+
+	*flags = IATTR_TAG
+		| (IS_IMMUTABLE(in) ? IATTR_IMMUTABLE : 0)
+		| (IS_IXUNLINK(in) ? IATTR_IXUNLINK : 0)
+		| (IS_BARRIER(in) ? IATTR_BARRIER : 0)
+		| (IS_COW(in) ? IATTR_COW : 0);
+	*mask = IATTR_IXUNLINK | IATTR_IMMUTABLE | IATTR_COW;
+
+	if (S_ISDIR(in->i_mode))
+		*mask |= IATTR_BARRIER;
+
+	if (IS_TAGGED(in)) {
+		*tag = in->i_tag;
+		*mask |= IATTR_TAG;
+	}
+
+	switch (in->i_sb->s_magic) {
+	case PROC_SUPER_MAGIC:
+		entry = PROC_I(in)->pde;
+
+		/* check for specific inodes? */
+		if (entry)
+			*mask |= IATTR_FLAGS;
+		if (entry)
+			*flags |= (entry->vx_flags & IATTR_FLAGS);
+		else
+			*flags |= (PROC_I(in)->vx_flags & IATTR_FLAGS);
+		break;
+
+	case DEVPTS_SUPER_MAGIC:
+		*tag = in->i_tag;
+		*mask |= IATTR_TAG;
+		break;
+
+	default:
+		break;
+	}
+	return 0;
+}
+
+int vc_get_iattr(void __user *data)
+{
+	struct path path;
+	struct vcmd_ctx_iattr_v1 vc_data = { .tag = -1 };
+	int ret;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = user_lpath(vc_data.name, &path);
+	if (!ret) {
+		ret = __vc_get_iattr(path.dentry->d_inode,
+			&vc_data.tag, &vc_data.flags, &vc_data.mask);
+		path_put(&path);
+	}
+	if (ret)
+		return ret;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		ret = -EFAULT;
+	return ret;
+}
+
+#ifdef	CONFIG_COMPAT
+
+int vc_get_iattr_x32(void __user *data)
+{
+	struct path path;
+	struct vcmd_ctx_iattr_v1_x32 vc_data = { .tag = -1 };
+	int ret;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = user_lpath(compat_ptr(vc_data.name_ptr), &path);
+	if (!ret) {
+		ret = __vc_get_iattr(path.dentry->d_inode,
+			&vc_data.tag, &vc_data.flags, &vc_data.mask);
+		path_put(&path);
+	}
+	if (ret)
+		return ret;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		ret = -EFAULT;
+	return ret;
+}
+
+#endif	/* CONFIG_COMPAT */
+
+
+int vc_fget_iattr(uint32_t fd, void __user *data)
+{
+	struct file *filp;
+	struct vcmd_ctx_fiattr_v0 vc_data = { .tag = -1 };
+	int ret;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	filp = fget(fd);
+	if (!filp || !filp->f_dentry || !filp->f_dentry->d_inode)
+		return -EBADF;
+
+	ret = __vc_get_iattr(filp->f_dentry->d_inode,
+		&vc_data.tag, &vc_data.flags, &vc_data.mask);
+
+	fput(filp);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		ret = -EFAULT;
+	return ret;
+}
+
+
+static int __vc_set_iattr(struct dentry *de, uint32_t *tag, uint32_t *flags, uint32_t *mask)
+{
+	struct inode *in = de->d_inode;
+	int error = 0, is_proc = 0, has_tag = 0;
+	struct iattr attr = { 0 };
+
+	if (!in || !in->i_sb)
+		return -ESRCH;
+
+	is_proc = (in->i_sb->s_magic == PROC_SUPER_MAGIC);
+	if ((*mask & IATTR_FLAGS) && !is_proc)
+		return -EINVAL;
+
+	has_tag = IS_TAGGED(in) ||
+		(in->i_sb->s_magic == DEVPTS_SUPER_MAGIC);
+	if ((*mask & IATTR_TAG) && !has_tag)
+		return -EINVAL;
+
+	mutex_lock(&in->i_mutex);
+	if (*mask & IATTR_TAG) {
+		attr.ia_tag = *tag;
+		attr.ia_valid |= ATTR_TAG;
+	}
+
+	if (*mask & IATTR_FLAGS) {
+		struct proc_dir_entry *entry = PROC_I(in)->pde;
+		unsigned int iflags = PROC_I(in)->vx_flags;
+
+		iflags = (iflags & ~(*mask & IATTR_FLAGS))
+			| (*flags & IATTR_FLAGS);
+		PROC_I(in)->vx_flags = iflags;
+		if (entry)
+			entry->vx_flags = iflags;
+	}
+
+	if (*mask & (IATTR_IMMUTABLE | IATTR_IXUNLINK |
+		IATTR_BARRIER | IATTR_COW)) {
+		int iflags = in->i_flags;
+		int vflags = in->i_vflags;
+
+		if (*mask & IATTR_IMMUTABLE) {
+			if (*flags & IATTR_IMMUTABLE)
+				iflags |= S_IMMUTABLE;
+			else
+				iflags &= ~S_IMMUTABLE;
+		}
+		if (*mask & IATTR_IXUNLINK) {
+			if (*flags & IATTR_IXUNLINK)
+				iflags |= S_IXUNLINK;
+			else
+				iflags &= ~S_IXUNLINK;
+		}
+		if (S_ISDIR(in->i_mode) && (*mask & IATTR_BARRIER)) {
+			if (*flags & IATTR_BARRIER)
+				vflags |= V_BARRIER;
+			else
+				vflags &= ~V_BARRIER;
+		}
+		if (S_ISREG(in->i_mode) && (*mask & IATTR_COW)) {
+			if (*flags & IATTR_COW)
+				vflags |= V_COW;
+			else
+				vflags &= ~V_COW;
+		}
+		if (in->i_op && in->i_op->sync_flags) {
+			error = in->i_op->sync_flags(in, iflags, vflags);
+			if (error)
+				goto out;
+		}
+	}
+
+	if (attr.ia_valid) {
+		if (in->i_op && in->i_op->setattr)
+			error = in->i_op->setattr(de, &attr);
+		else {
+			error = inode_change_ok(in, &attr);
+			if (!error)
+				error = inode_setattr(in, &attr);
+		}
+	}
+
+out:
+	mutex_unlock(&in->i_mutex);
+	return error;
+}
+
+int vc_set_iattr(void __user *data)
+{
+	struct path path;
+	struct vcmd_ctx_iattr_v1 vc_data;
+	int ret;
+
+	if (!capable(CAP_LINUX_IMMUTABLE))
+		return -EPERM;
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = user_lpath(vc_data.name, &path);
+	if (!ret) {
+		ret = __vc_set_iattr(path.dentry,
+			&vc_data.tag, &vc_data.flags, &vc_data.mask);
+		path_put(&path);
+	}
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		ret = -EFAULT;
+	return ret;
+}
+
+#ifdef	CONFIG_COMPAT
+
+int vc_set_iattr_x32(void __user *data)
+{
+	struct path path;
+	struct vcmd_ctx_iattr_v1_x32 vc_data;
+	int ret;
+
+	if (!capable(CAP_LINUX_IMMUTABLE))
+		return -EPERM;
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = user_lpath(compat_ptr(vc_data.name_ptr), &path);
+	if (!ret) {
+		ret = __vc_set_iattr(path.dentry,
+			&vc_data.tag, &vc_data.flags, &vc_data.mask);
+		path_put(&path);
+	}
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		ret = -EFAULT;
+	return ret;
+}
+
+#endif	/* CONFIG_COMPAT */
+
+int vc_fset_iattr(uint32_t fd, void __user *data)
+{
+	struct file *filp;
+	struct vcmd_ctx_fiattr_v0 vc_data;
+	int ret;
+
+	if (!capable(CAP_LINUX_IMMUTABLE))
+		return -EPERM;
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	filp = fget(fd);
+	if (!filp || !filp->f_dentry || !filp->f_dentry->d_inode)
+		return -EBADF;
+
+	ret = __vc_set_iattr(filp->f_dentry, &vc_data.tag,
+		&vc_data.flags, &vc_data.mask);
+
+	fput(filp);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return ret;
+}
+
+
+enum { Opt_notagcheck, Opt_tag, Opt_notag, Opt_tagid, Opt_err };
+
+static match_table_t tokens = {
+	{Opt_notagcheck, "notagcheck"},
+#ifdef	CONFIG_PROPAGATE
+	{Opt_notag, "notag"},
+	{Opt_tag, "tag"},
+	{Opt_tagid, "tagid=%u"},
+#endif
+	{Opt_err, NULL}
+};
+
+
+static void __dx_parse_remove(char *string, char *opt)
+{
+	char *p = strstr(string, opt);
+	char *q = p;
+
+	if (p) {
+		while (*q != '\0' && *q != ',')
+			q++;
+		while (*q)
+			*p++ = *q++;
+		while (*p)
+			*p++ = '\0';
+	}
+}
+
+int dx_parse_tag(char *string, tag_t *tag, int remove, int *mnt_flags,
+		 unsigned long *flags)
+{
+	int set = 0;
+	substring_t args[MAX_OPT_ARGS];
+	int token, option = 0;
+	char *s, *p, *opts;
+
+	if (!string)
+		return 0;
+	s = kstrdup(string, GFP_KERNEL | GFP_ATOMIC);
+	if (!s)
+		return 0;
+
+	opts = s;
+	while ((p = strsep(&opts, ",")) != NULL) {
+		token = match_token(p, tokens, args);
+
+		vxdprintk(VXD_CBIT(tag, 7),
+			"dx_parse_tag(»%s«): %d:#%d",
+			p, token, option);
+
+		switch (token) {
+#ifdef CONFIG_PROPAGATE
+		case Opt_tag:
+			if (tag)
+				*tag = 0;
+			if (remove)
+				__dx_parse_remove(s, "tag");
+			*mnt_flags |= MNT_TAGID;
+			set |= MNT_TAGID;
+			break;
+		case Opt_notag:
+			if (remove)
+				__dx_parse_remove(s, "notag");
+			*mnt_flags |= MNT_NOTAG;
+			set |= MNT_NOTAG;
+			break;
+		case Opt_tagid:
+			if (tag && !match_int(args, &option))
+				*tag = option;
+			if (remove)
+				__dx_parse_remove(s, "tagid");
+			*mnt_flags |= MNT_TAGID;
+			set |= MNT_TAGID;
+			break;
+#endif
+		case Opt_notagcheck:
+			if (remove)
+				__dx_parse_remove(s, "notagcheck");
+			*flags |= MS_NOTAGCHECK;
+			set |= MS_NOTAGCHECK;
+			break;
+		}
+	}
+	if (set)
+		strcpy(string, s);
+	kfree(s);
+	return set;
+}
+
+#ifdef	CONFIG_PROPAGATE
+
+void __dx_propagate_tag(struct nameidata *nd, struct inode *inode)
+{
+	tag_t new_tag = 0;
+	struct vfsmount *mnt;
+	int propagate;
+
+	if (!nd)
+		return;
+	mnt = nd->path.mnt;
+	if (!mnt)
+		return;
+
+	propagate = (mnt->mnt_flags & MNT_TAGID);
+	if (propagate)
+		new_tag = mnt->mnt_tag;
+
+	vxdprintk(VXD_CBIT(tag, 7),
+		"dx_propagate_tag(%p[#%lu.%d]): %d,%d",
+		inode, inode->i_ino, inode->i_tag,
+		new_tag, (propagate) ? 1 : 0);
+
+	if (propagate)
+		inode->i_tag = new_tag;
+}
+
+#include <linux/module.h>
+
+EXPORT_SYMBOL_GPL(__dx_propagate_tag);
+
+#endif	/* CONFIG_PROPAGATE */
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/Kconfig linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/Kconfig
--- linux-2.6.32.17/kernel/vserver/Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/Kconfig	2010-02-14 01:02:13.000000000 +0100
@@ -0,0 +1,251 @@
+#
+# Linux VServer configuration
+#
+
+menu "Linux VServer"
+
+config	VSERVER_AUTO_LBACK
+	bool    "Automatically Assign Loopback IP"
+	default y
+	help
+	  Automatically assign a guest specific loopback
+	  IP and add it to the kernel network stack on
+	  startup.
+
+config	VSERVER_AUTO_SINGLE
+	bool	"Automatic Single IP Special Casing"
+	depends on EXPERIMENTAL
+	default y
+	help
+	  This allows network contexts with a single IP to
+	  automatically remap 0.0.0.0 bindings to that IP,
+	  avoiding further network checks and improving
+	  performance.
+
+	  (note: such guests do not allow to change the ip
+	   on the fly and do not show loopback addresses)
+
+config	VSERVER_COWBL
+	bool	"Enable COW Immutable Link Breaking"
+	default y
+	help
+	  This enables the COW (Copy-On-Write) link break code.
+	  It allows you to treat unified files like normal files
+	  when writing to them (which will implicitely break the
+	  link and create a copy of the unified file)
+
+config	VSERVER_VTIME
+	bool	"Enable Virtualized Guest Time"
+	depends on EXPERIMENTAL
+	default n
+	help
+	  This enables per guest time offsets to allow for
+	  adjusting the system clock individually per guest.
+	  this adds some overhead to the time functions and
+	  therefore should not be enabled without good reason.
+
+config	VSERVER_DEVICE
+	bool	"Enable Guest Device Mapping"
+	depends on EXPERIMENTAL
+	default n
+	help
+	  This enables generic device remapping.
+
+config	VSERVER_PROC_SECURE
+	bool	"Enable Proc Security"
+	depends on PROC_FS
+	default y
+	help
+	  This configures ProcFS security to initially hide
+	  non-process entries for all contexts except the main and
+	  spectator context (i.e. for all guests), which is a secure
+	  default.
+
+	  (note: on 1.2x the entries were visible by default)
+
+config	VSERVER_HARDCPU
+	bool	"Enable Hard CPU Limits"
+	default y
+	help
+	  Activate the Hard CPU Limits
+
+	  This will compile in code that allows the Token Bucket
+	  Scheduler to put processes on hold when a context's
+	  tokens are depleted (provided that its per-context
+	  sched_hard flag is set).
+
+	  Processes belonging to that context will not be able
+	  to consume CPU resources again until a per-context
+	  configured minimum of tokens has been reached.
+
+config	VSERVER_IDLETIME
+	bool	"Avoid idle CPUs by skipping Time"
+	depends on VSERVER_HARDCPU
+	default y
+	help
+	  This option allows the scheduler to artificially
+	  advance time (per cpu) when otherwise the idle
+	  task would be scheduled, thus keeping the cpu
+	  busy and sharing the available resources among
+	  certain contexts.
+
+config	VSERVER_IDLELIMIT
+	bool	"Limit the IDLE task"
+	depends on VSERVER_HARDCPU
+	default n
+	help
+	  Limit the idle slices, so the the next context
+	  will be scheduled as soon as possible.
+
+	  This might improve interactivity and latency, but
+	  will also marginally increase scheduling overhead.
+
+choice
+	prompt	"Persistent Inode Tagging"
+	default	TAGGING_ID24
+	help
+	  This adds persistent context information to filesystems
+	  mounted with the tagxid option. Tagging is a requirement
+	  for per-context disk limits and per-context quota.
+
+
+config	TAGGING_NONE
+	bool	"Disabled"
+	help
+	  do not store per-context information in inodes.
+
+config	TAGGING_UID16
+	bool	"UID16/GID32"
+	help
+	  reduces UID to 16 bit, but leaves GID at 32 bit.
+
+config	TAGGING_GID16
+	bool	"UID32/GID16"
+	help
+	  reduces GID to 16 bit, but leaves UID at 32 bit.
+
+config	TAGGING_ID24
+	bool	"UID24/GID24"
+	help
+	  uses the upper 8bit from UID and GID for XID tagging
+	  which leaves 24bit for UID/GID each, which should be
+	  more than sufficient for normal use.
+
+config	TAGGING_INTERN
+	bool	"UID32/GID32"
+	help
+	  this uses otherwise reserved inode fields in the on
+	  disk representation, which limits the use to a few
+	  filesystems (currently ext2 and ext3)
+
+endchoice
+
+config	TAG_NFSD
+	bool	"Tag NFSD User Auth and Files"
+	default n
+	help
+	  Enable this if you do want the in-kernel NFS
+	  Server to use the tagging specified above.
+	  (will require patched clients too)
+
+config	VSERVER_PRIVACY
+	bool	"Honor Privacy Aspects of Guests"
+	default n
+	help
+	  When enabled, most context checks will disallow
+	  access to structures assigned to a specific context,
+	  like ptys or loop devices.
+
+config	VSERVER_CONTEXTS
+	int	"Maximum number of Contexts (1-65533)"	if EMBEDDED
+	range 1 65533
+	default "768"	if 64BIT
+	default "256"
+	help
+	  This setting will optimize certain data structures
+	  and memory allocations according to the expected
+	  maximum.
+
+	  note: this is not a strict upper limit.
+
+config	VSERVER_WARN
+	bool	"VServer Warnings"
+	default y
+	help
+	  This enables various runtime warnings, which will
+	  notify about potential manipulation attempts or
+	  resource shortage. It is generally considered to
+	  be a good idea to have that enabled.
+
+config	VSERVER_DEBUG
+	bool	"VServer Debugging Code"
+	default n
+	help
+	  Set this to yes if you want to be able to activate
+	  debugging output at runtime. It adds a very small
+	  overhead to all vserver related functions and
+	  increases the kernel size by about 20k.
+
+config	VSERVER_HISTORY
+	bool	"VServer History Tracing"
+	depends on VSERVER_DEBUG
+	default n
+	help
+	  Set this to yes if you want to record the history of
+	  linux-vserver activities, so they can be replayed in
+	  the event of a kernel panic or oops.
+
+config	VSERVER_HISTORY_SIZE
+	int	"Per-CPU History Size (32-65536)"
+	depends on VSERVER_HISTORY
+	range 32 65536
+	default 64
+	help
+	  This allows you to specify the number of entries in
+	  the per-CPU history buffer.
+
+config	VSERVER_MONITOR
+	bool	"VServer Scheduling Monitor"
+	depends on VSERVER_DISABLED
+	default n
+	help
+	  Set this to yes if you want to record the scheduling
+	  decisions, so that they can be relayed to userspace
+	  for detailed analysis.
+
+config	VSERVER_MONITOR_SIZE
+	int	"Per-CPU Monitor Queue Size (32-65536)"
+	depends on VSERVER_MONITOR
+	range 32 65536
+	default 1024
+	help
+	  This allows you to specify the number of entries in
+	  the per-CPU scheduling monitor buffer.
+
+config	VSERVER_MONITOR_SYNC
+	int	"Per-CPU Monitor Sync Interval (0-65536)"
+	depends on VSERVER_MONITOR
+	range 0 65536
+	default 256
+	help
+	  This allows you to specify the interval in ticks
+	  when a time sync entry is inserted.
+
+endmenu
+
+
+config	VSERVER
+	bool
+	default y
+	select NAMESPACES
+	select UTS_NS
+	select IPC_NS
+	select USER_NS
+	select SYSVIPC
+
+config	VSERVER_SECURITY
+	bool
+	depends on SECURITY
+	default y
+	select SECURITY_CAPABILITIES
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/limit.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/limit.c
--- linux-2.6.32.17/kernel/vserver/limit.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/limit.c	2010-04-05 21:24:21.000000000 +0200
@@ -0,0 +1,392 @@
+/*
+ *  linux/kernel/vserver/limit.c
+ *
+ *  Virtual Server: Context Limits
+ *
+ *  Copyright (C) 2004-2010  Herbert Pötzl
+ *
+ *  V0.01  broken out from vcontext V0.05
+ *  V0.02  changed vcmds to vxi arg
+ *  V0.03  added memory cgroup support
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/memcontrol.h>
+#include <linux/res_counter.h>
+#include <linux/vs_limit.h>
+#include <linux/vserver/limit.h>
+#include <linux/vserver/limit_cmd.h>
+
+#include <asm/uaccess.h>
+
+
+const char *vlimit_name[NUM_LIMITS] = {
+	[RLIMIT_CPU]		= "CPU",
+	[RLIMIT_RSS]		= "RSS",
+	[RLIMIT_NPROC]		= "NPROC",
+	[RLIMIT_NOFILE]		= "NOFILE",
+	[RLIMIT_MEMLOCK]	= "VML",
+	[RLIMIT_AS]		= "VM",
+	[RLIMIT_LOCKS]		= "LOCKS",
+	[RLIMIT_SIGPENDING]	= "SIGP",
+	[RLIMIT_MSGQUEUE]	= "MSGQ",
+
+	[VLIMIT_NSOCK]		= "NSOCK",
+	[VLIMIT_OPENFD]		= "OPENFD",
+	[VLIMIT_ANON]		= "ANON",
+	[VLIMIT_SHMEM]		= "SHMEM",
+	[VLIMIT_DENTRY]		= "DENTRY",
+};
+
+EXPORT_SYMBOL_GPL(vlimit_name);
+
+#define MASK_ENTRY(x)	(1 << (x))
+
+const struct vcmd_ctx_rlimit_mask_v0 vlimit_mask = {
+		/* minimum */
+	0
+	,	/* softlimit */
+	MASK_ENTRY( RLIMIT_RSS		) |
+	MASK_ENTRY( VLIMIT_ANON		) |
+	0
+	,       /* maximum */
+	MASK_ENTRY( RLIMIT_RSS		) |
+	MASK_ENTRY( RLIMIT_NPROC	) |
+	MASK_ENTRY( RLIMIT_NOFILE	) |
+	MASK_ENTRY( RLIMIT_MEMLOCK	) |
+	MASK_ENTRY( RLIMIT_AS		) |
+	MASK_ENTRY( RLIMIT_LOCKS	) |
+	MASK_ENTRY( RLIMIT_MSGQUEUE	) |
+
+	MASK_ENTRY( VLIMIT_NSOCK	) |
+	MASK_ENTRY( VLIMIT_OPENFD	) |
+	MASK_ENTRY( VLIMIT_ANON		) |
+	MASK_ENTRY( VLIMIT_SHMEM	) |
+	MASK_ENTRY( VLIMIT_DENTRY	) |
+	0
+};
+		/* accounting only */
+uint32_t account_mask =
+	MASK_ENTRY( VLIMIT_SEMARY	) |
+	MASK_ENTRY( VLIMIT_NSEMS	) |
+	MASK_ENTRY( VLIMIT_MAPPED	) |
+	0;
+
+
+static int is_valid_vlimit(int id)
+{
+	uint32_t mask = vlimit_mask.minimum |
+		vlimit_mask.softlimit | vlimit_mask.maximum;
+	return mask & (1 << id);
+}
+
+static int is_accounted_vlimit(int id)
+{
+	if (is_valid_vlimit(id))
+		return 1;
+	return account_mask & (1 << id);
+}
+
+
+static inline uint64_t vc_get_soft(struct vx_info *vxi, int id)
+{
+	rlim_t limit = __rlim_soft(&vxi->limit, id);
+	return VX_VLIM(limit);
+}
+
+static inline uint64_t vc_get_hard(struct vx_info *vxi, int id)
+{
+	rlim_t limit = __rlim_hard(&vxi->limit, id);
+	return VX_VLIM(limit);
+}
+
+static int do_get_rlimit(struct vx_info *vxi, uint32_t id,
+	uint64_t *minimum, uint64_t *softlimit, uint64_t *maximum)
+{
+	if (!is_valid_vlimit(id))
+		return -EINVAL;
+
+	if (minimum)
+		*minimum = CRLIM_UNSET;
+	if (softlimit)
+		*softlimit = vc_get_soft(vxi, id);
+	if (maximum)
+		*maximum = vc_get_hard(vxi, id);
+	return 0;
+}
+
+int vc_get_rlimit(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_rlimit_v0 vc_data;
+	int ret;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = do_get_rlimit(vxi, vc_data.id,
+		&vc_data.minimum, &vc_data.softlimit, &vc_data.maximum);
+	if (ret)
+		return ret;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+static int do_set_rlimit(struct vx_info *vxi, uint32_t id,
+	uint64_t minimum, uint64_t softlimit, uint64_t maximum)
+{
+	if (!is_valid_vlimit(id))
+		return -EINVAL;
+
+	if (maximum != CRLIM_KEEP)
+		__rlim_hard(&vxi->limit, id) = VX_RLIM(maximum);
+	if (softlimit != CRLIM_KEEP)
+		__rlim_soft(&vxi->limit, id) = VX_RLIM(softlimit);
+
+	/* clamp soft limit */
+	if (__rlim_soft(&vxi->limit, id) > __rlim_hard(&vxi->limit, id))
+		__rlim_soft(&vxi->limit, id) = __rlim_hard(&vxi->limit, id);
+
+	return 0;
+}
+
+int vc_set_rlimit(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_rlimit_v0 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_set_rlimit(vxi, vc_data.id,
+		vc_data.minimum, vc_data.softlimit, vc_data.maximum);
+}
+
+#ifdef	CONFIG_IA32_EMULATION
+
+int vc_set_rlimit_x32(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_rlimit_v0_x32 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_set_rlimit(vxi, vc_data.id,
+		vc_data.minimum, vc_data.softlimit, vc_data.maximum);
+}
+
+int vc_get_rlimit_x32(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_rlimit_v0_x32 vc_data;
+	int ret;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = do_get_rlimit(vxi, vc_data.id,
+		&vc_data.minimum, &vc_data.softlimit, &vc_data.maximum);
+	if (ret)
+		return ret;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+#endif	/* CONFIG_IA32_EMULATION */
+
+
+int vc_get_rlimit_mask(uint32_t id, void __user *data)
+{
+	if (copy_to_user(data, &vlimit_mask, sizeof(vlimit_mask)))
+		return -EFAULT;
+	return 0;
+}
+
+
+static inline void vx_reset_hits(struct _vx_limit *limit)
+{
+	int lim;
+
+	for (lim = 0; lim < NUM_LIMITS; lim++) {
+		atomic_set(&__rlim_lhit(limit, lim), 0);
+	}
+}
+
+int vc_reset_hits(struct vx_info *vxi, void __user *data)
+{
+	vx_reset_hits(&vxi->limit);
+	return 0;
+}
+
+static inline void vx_reset_minmax(struct _vx_limit *limit)
+{
+	rlim_t value;
+	int lim;
+
+	for (lim = 0; lim < NUM_LIMITS; lim++) {
+		value = __rlim_get(limit, lim);
+		__rlim_rmax(limit, lim) = value;
+		__rlim_rmin(limit, lim) = value;
+	}
+}
+
+int vc_reset_minmax(struct vx_info *vxi, void __user *data)
+{
+	vx_reset_minmax(&vxi->limit);
+	return 0;
+}
+
+
+int vc_rlimit_stat(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_rlimit_stat_v0 vc_data;
+	struct _vx_limit *limit = &vxi->limit;
+	int id;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	id = vc_data.id;
+	if (!is_accounted_vlimit(id))
+		return -EINVAL;
+
+	vx_limit_fixup(limit, id);
+	vc_data.hits = atomic_read(&__rlim_lhit(limit, id));
+	vc_data.value = __rlim_get(limit, id);
+	vc_data.minimum = __rlim_rmin(limit, id);
+	vc_data.maximum = __rlim_rmax(limit, id);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+
+void vx_vsi_meminfo(struct sysinfo *val)
+{
+#ifdef	CONFIG_CGROUP_MEM_RES_CTLR
+	struct mem_cgroup *mcg = mem_cgroup_from_task(current);
+	u64 res_limit, res_usage;
+
+	if (!mcg)
+		return;
+
+	res_limit = mem_cgroup_res_read_u64(mcg, RES_LIMIT);
+	res_usage = mem_cgroup_res_read_u64(mcg, RES_USAGE);
+
+	if (res_limit != RESOURCE_MAX)
+		val->totalram = (res_limit >> PAGE_SHIFT);
+	val->freeram = val->totalram - (res_usage >> PAGE_SHIFT);
+	val->bufferram = 0;
+#else	/* !CONFIG_CGROUP_MEM_RES_CTLR */
+	struct vx_info *vxi = current_vx_info();
+	unsigned long totalram, freeram;
+	rlim_t v;
+
+	/* we blindly accept the max */
+	v = __rlim_soft(&vxi->limit, RLIMIT_RSS);
+	totalram = (v != RLIM_INFINITY) ? v : val->totalram;
+
+	/* total minus used equals free */
+	v = __vx_cres_array_fixup(&vxi->limit, VLA_RSS);
+	freeram = (v < totalram) ? totalram - v : 0;
+
+	val->totalram = totalram;
+	val->freeram = freeram;
+#endif	/* CONFIG_CGROUP_MEM_RES_CTLR */
+	val->totalhigh = 0;
+	val->freehigh = 0;
+	return;
+}
+
+void vx_vsi_swapinfo(struct sysinfo *val)
+{
+#ifdef	CONFIG_CGROUP_MEM_RES_CTLR
+#ifdef	CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+	struct mem_cgroup *mcg = mem_cgroup_from_task(current);
+	u64 res_limit, res_usage, memsw_limit, memsw_usage;
+	s64 swap_limit, swap_usage;
+
+	if (!mcg)
+		return;
+
+	res_limit = mem_cgroup_res_read_u64(mcg, RES_LIMIT);
+	res_usage = mem_cgroup_res_read_u64(mcg, RES_USAGE);
+	memsw_limit = mem_cgroup_memsw_read_u64(mcg, RES_LIMIT);
+	memsw_usage = mem_cgroup_memsw_read_u64(mcg, RES_USAGE);
+
+	if (res_limit == RESOURCE_MAX)
+		return;
+
+	swap_limit = memsw_limit - res_limit;
+	if (memsw_limit != RESOURCE_MAX)
+		val->totalswap = swap_limit >> PAGE_SHIFT;
+
+	swap_usage = memsw_usage - res_usage;
+	val->freeswap = (swap_usage < swap_limit) ?
+		val->totalswap - (swap_usage >> PAGE_SHIFT) : 0;
+#else	/* !CONFIG_CGROUP_MEM_RES_CTLR_SWAP */
+	val->totalswap = 0;
+	val->freeswap = 0;
+#endif	/* !CONFIG_CGROUP_MEM_RES_CTLR_SWAP */
+#else	/* !CONFIG_CGROUP_MEM_RES_CTLR */
+	struct vx_info *vxi = current_vx_info();
+	unsigned long totalswap, freeswap;
+	rlim_t v, w;
+
+	v = __rlim_soft(&vxi->limit, RLIMIT_RSS);
+	if (v == RLIM_INFINITY) {
+		val->freeswap = val->totalswap;
+		return;
+	}
+
+	/* we blindly accept the max */
+	w = __rlim_hard(&vxi->limit, RLIMIT_RSS);
+	totalswap = (w != RLIM_INFINITY) ? (w - v) : val->totalswap;
+
+	/* currently 'used' swap */
+	w = __vx_cres_array_fixup(&vxi->limit, VLA_RSS);
+	w -= (w > v) ? v : w;
+
+	/* total minus used equals free */
+	freeswap = (w < totalswap) ? totalswap - w : 0;
+
+	val->totalswap = totalswap;
+	val->freeswap = freeswap;
+#endif	/* CONFIG_CGROUP_MEM_RES_CTLR */
+	return;
+}
+
+long vx_vsi_cached(struct sysinfo *val)
+{
+#ifdef	CONFIG_CGROUP_MEM_RES_CTLR
+	struct mem_cgroup *mcg = mem_cgroup_from_task(current);
+
+	return mem_cgroup_stat_read_cache(mcg);
+#else
+	return 0;
+#endif
+}
+
+
+unsigned long vx_badness(struct task_struct *task, struct mm_struct *mm)
+{
+	struct vx_info *vxi = mm->mm_vx_info;
+	unsigned long points;
+	rlim_t v, w;
+
+	if (!vxi)
+		return 0;
+
+	points = vxi->vx_badness_bias;
+
+	v = __vx_cres_array_fixup(&vxi->limit, VLA_RSS);
+	w = __rlim_soft(&vxi->limit, RLIMIT_RSS);
+	points += (v > w) ? (v - w) : 0;
+
+	return points;
+}
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/limit_init.h linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/limit_init.h
--- linux-2.6.32.17/kernel/vserver/limit_init.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/limit_init.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,31 @@
+
+
+static inline void vx_info_init_limit(struct _vx_limit *limit)
+{
+	int lim;
+
+	for (lim = 0; lim < NUM_LIMITS; lim++) {
+		__rlim_soft(limit, lim) = RLIM_INFINITY;
+		__rlim_hard(limit, lim) = RLIM_INFINITY;
+		__rlim_set(limit, lim, 0);
+		atomic_set(&__rlim_lhit(limit, lim), 0);
+		__rlim_rmin(limit, lim) = 0;
+		__rlim_rmax(limit, lim) = 0;
+	}
+}
+
+static inline void vx_info_exit_limit(struct _vx_limit *limit)
+{
+	rlim_t value;
+	int lim;
+
+	for (lim = 0; lim < NUM_LIMITS; lim++) {
+		if ((1 << lim) & VLIM_NOCHECK)
+			continue;
+		value = __rlim_get(limit, lim);
+		vxwprintk_xid(value,
+			"!!! limit: %p[%s,%d] = %ld on exit.",
+			limit, vlimit_name[lim], lim, (long)value);
+	}
+}
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/limit_proc.h linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/limit_proc.h
--- linux-2.6.32.17/kernel/vserver/limit_proc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/limit_proc.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,57 @@
+#ifndef _VX_LIMIT_PROC_H
+#define _VX_LIMIT_PROC_H
+
+#include <linux/vserver/limit_int.h>
+
+
+#define VX_LIMIT_FMT	":\t%8ld\t%8ld/%8ld\t%8lld/%8lld\t%6d\n"
+#define VX_LIMIT_TOP	\
+	"Limit\t current\t     min/max\t\t    soft/hard\t\thits\n"
+
+#define VX_LIMIT_ARG(r)				\
+	(unsigned long)__rlim_get(limit, r),	\
+	(unsigned long)__rlim_rmin(limit, r),	\
+	(unsigned long)__rlim_rmax(limit, r),	\
+	VX_VLIM(__rlim_soft(limit, r)),		\
+	VX_VLIM(__rlim_hard(limit, r)),		\
+	atomic_read(&__rlim_lhit(limit, r))
+
+static inline int vx_info_proc_limit(struct _vx_limit *limit, char *buffer)
+{
+	vx_limit_fixup(limit, -1);
+	return sprintf(buffer, VX_LIMIT_TOP
+		"PROC"	VX_LIMIT_FMT
+		"VM"	VX_LIMIT_FMT
+		"VML"	VX_LIMIT_FMT
+		"RSS"	VX_LIMIT_FMT
+		"ANON"	VX_LIMIT_FMT
+		"RMAP"	VX_LIMIT_FMT
+		"FILES" VX_LIMIT_FMT
+		"OFD"	VX_LIMIT_FMT
+		"LOCKS" VX_LIMIT_FMT
+		"SOCK"	VX_LIMIT_FMT
+		"MSGQ"	VX_LIMIT_FMT
+		"SHM"	VX_LIMIT_FMT
+		"SEMA"	VX_LIMIT_FMT
+		"SEMS"	VX_LIMIT_FMT
+		"DENT"	VX_LIMIT_FMT,
+		VX_LIMIT_ARG(RLIMIT_NPROC),
+		VX_LIMIT_ARG(RLIMIT_AS),
+		VX_LIMIT_ARG(RLIMIT_MEMLOCK),
+		VX_LIMIT_ARG(RLIMIT_RSS),
+		VX_LIMIT_ARG(VLIMIT_ANON),
+		VX_LIMIT_ARG(VLIMIT_MAPPED),
+		VX_LIMIT_ARG(RLIMIT_NOFILE),
+		VX_LIMIT_ARG(VLIMIT_OPENFD),
+		VX_LIMIT_ARG(RLIMIT_LOCKS),
+		VX_LIMIT_ARG(VLIMIT_NSOCK),
+		VX_LIMIT_ARG(RLIMIT_MSGQUEUE),
+		VX_LIMIT_ARG(VLIMIT_SHMEM),
+		VX_LIMIT_ARG(VLIMIT_SEMARY),
+		VX_LIMIT_ARG(VLIMIT_NSEMS),
+		VX_LIMIT_ARG(VLIMIT_DENTRY));
+}
+
+#endif	/* _VX_LIMIT_PROC_H */
+
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/Makefile linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/Makefile
--- linux-2.6.32.17/kernel/vserver/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/Makefile	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,18 @@
+#
+# Makefile for the Linux vserver routines.
+#
+
+
+obj-y		+= vserver.o
+
+vserver-y	:= switch.o context.o space.o sched.o network.o inode.o \
+		   limit.o cvirt.o cacct.o signal.o helper.o init.o \
+		   dlimit.o tag.o
+
+vserver-$(CONFIG_INET) += inet.o
+vserver-$(CONFIG_PROC_FS) += proc.o
+vserver-$(CONFIG_VSERVER_DEBUG) += sysctl.o debug.o
+vserver-$(CONFIG_VSERVER_HISTORY) += history.o
+vserver-$(CONFIG_VSERVER_MONITOR) += monitor.o
+vserver-$(CONFIG_VSERVER_DEVICE) += device.o
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/monitor.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/monitor.c
--- linux-2.6.32.17/kernel/vserver/monitor.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/monitor.c	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,138 @@
+/*
+ *  kernel/vserver/monitor.c
+ *
+ *  Virtual Context Scheduler Monitor
+ *
+ *  Copyright (C) 2006-2007 Herbert Pötzl
+ *
+ *  V0.01  basic design
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <asm/uaccess.h>
+#include <asm/atomic.h>
+
+#include <linux/vserver/monitor.h>
+#include <linux/vserver/debug_cmd.h>
+
+
+#ifdef	CONFIG_VSERVER_MONITOR
+#define VXM_SIZE	CONFIG_VSERVER_MONITOR_SIZE
+#else
+#define VXM_SIZE	64
+#endif
+
+struct _vx_monitor {
+	unsigned int counter;
+
+	struct _vx_mon_entry entry[VXM_SIZE+1];
+};
+
+
+DEFINE_PER_CPU(struct _vx_monitor, vx_monitor_buffer);
+
+unsigned volatile int vxm_active = 1;
+
+static atomic_t sequence = ATOMIC_INIT(0);
+
+
+/*	vxm_advance()
+
+	* requires disabled preemption				*/
+
+struct _vx_mon_entry *vxm_advance(int cpu)
+{
+	struct _vx_monitor *mon = &per_cpu(vx_monitor_buffer, cpu);
+	struct _vx_mon_entry *entry;
+	unsigned int index;
+
+	index = vxm_active ? (mon->counter++ % VXM_SIZE) : VXM_SIZE;
+	entry = &mon->entry[index];
+
+	entry->ev.seq = atomic_inc_return(&sequence);
+	entry->ev.jif = jiffies;
+	return entry;
+}
+
+EXPORT_SYMBOL_GPL(vxm_advance);
+
+
+int do_read_monitor(struct __user _vx_mon_entry *data,
+	int cpu, uint32_t *index, uint32_t *count)
+{
+	int pos, ret = 0;
+	struct _vx_monitor *mon = &per_cpu(vx_monitor_buffer, cpu);
+	int end = mon->counter;
+	int start = end - VXM_SIZE + 2;
+	int idx = *index;
+
+	/* special case: get current pos */
+	if (!*count) {
+		*index = end;
+		return 0;
+	}
+
+	/* have we lost some data? */
+	if (idx < start)
+		idx = start;
+
+	for (pos = 0; (pos < *count) && (idx < end); pos++, idx++) {
+		struct _vx_mon_entry *entry =
+			&mon->entry[idx % VXM_SIZE];
+
+		/* send entry to userspace */
+		ret = copy_to_user(&data[pos], entry, sizeof(*entry));
+		if (ret)
+			break;
+	}
+	/* save new index and count */
+	*index = idx;
+	*count = pos;
+	return ret ? ret : (*index < end);
+}
+
+int vc_read_monitor(uint32_t id, void __user *data)
+{
+	struct vcmd_read_monitor_v0 vc_data;
+	int ret;
+
+	if (id >= NR_CPUS)
+		return -EINVAL;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = do_read_monitor((struct __user _vx_mon_entry *)vc_data.data,
+		id, &vc_data.index, &vc_data.count);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return ret;
+}
+
+#ifdef	CONFIG_COMPAT
+
+int vc_read_monitor_x32(uint32_t id, void __user *data)
+{
+	struct vcmd_read_monitor_v0_x32 vc_data;
+	int ret;
+
+	if (id >= NR_CPUS)
+		return -EINVAL;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = do_read_monitor((struct __user _vx_mon_entry *)
+		compat_ptr(vc_data.data_ptr),
+		id, &vc_data.index, &vc_data.count);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return ret;
+}
+
+#endif	/* CONFIG_COMPAT */
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/network.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/network.c
--- linux-2.6.32.17/kernel/vserver/network.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/network.c	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,864 @@
+/*
+ *  linux/kernel/vserver/network.c
+ *
+ *  Virtual Server: Network Support
+ *
+ *  Copyright (C) 2003-2007  Herbert Pötzl
+ *
+ *  V0.01  broken out from vcontext V0.05
+ *  V0.02  cleaned up implementation
+ *  V0.03  added equiv nx commands
+ *  V0.04  switch to RCU based hash
+ *  V0.05  and back to locking again
+ *  V0.06  changed vcmds to nxi arg
+ *  V0.07  have __create claim() the nxi
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+
+#include <linux/vs_network.h>
+#include <linux/vs_pid.h>
+#include <linux/vserver/network_cmd.h>
+
+
+atomic_t nx_global_ctotal	= ATOMIC_INIT(0);
+atomic_t nx_global_cactive	= ATOMIC_INIT(0);
+
+static struct kmem_cache *nx_addr_v4_cachep = NULL;
+static struct kmem_cache *nx_addr_v6_cachep = NULL;
+
+
+static int __init init_network(void)
+{
+	nx_addr_v4_cachep = kmem_cache_create("nx_v4_addr_cache",
+		sizeof(struct nx_addr_v4), 0,
+		SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+	nx_addr_v6_cachep = kmem_cache_create("nx_v6_addr_cache",
+		sizeof(struct nx_addr_v6), 0,
+		SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+	return 0;
+}
+
+
+/*	__alloc_nx_addr_v4()					*/
+
+static inline struct nx_addr_v4 *__alloc_nx_addr_v4(void)
+{
+	struct nx_addr_v4 *nxa = kmem_cache_alloc(
+		nx_addr_v4_cachep, GFP_KERNEL);
+
+	if (!IS_ERR(nxa))
+		memset(nxa, 0, sizeof(*nxa));
+	return nxa;
+}
+
+/*	__dealloc_nx_addr_v4()					*/
+
+static inline void __dealloc_nx_addr_v4(struct nx_addr_v4 *nxa)
+{
+	kmem_cache_free(nx_addr_v4_cachep, nxa);
+}
+
+/*	__dealloc_nx_addr_v4_all()				*/
+
+static inline void __dealloc_nx_addr_v4_all(struct nx_addr_v4 *nxa)
+{
+	while (nxa) {
+		struct nx_addr_v4 *next = nxa->next;
+
+		__dealloc_nx_addr_v4(nxa);
+		nxa = next;
+	}
+}
+
+
+#ifdef CONFIG_IPV6
+
+/*	__alloc_nx_addr_v6()					*/
+
+static inline struct nx_addr_v6 *__alloc_nx_addr_v6(void)
+{
+	struct nx_addr_v6 *nxa = kmem_cache_alloc(
+		nx_addr_v6_cachep, GFP_KERNEL);
+
+	if (!IS_ERR(nxa))
+		memset(nxa, 0, sizeof(*nxa));
+	return nxa;
+}
+
+/*	__dealloc_nx_addr_v6()					*/
+
+static inline void __dealloc_nx_addr_v6(struct nx_addr_v6 *nxa)
+{
+	kmem_cache_free(nx_addr_v6_cachep, nxa);
+}
+
+/*	__dealloc_nx_addr_v6_all()				*/
+
+static inline void __dealloc_nx_addr_v6_all(struct nx_addr_v6 *nxa)
+{
+	while (nxa) {
+		struct nx_addr_v6 *next = nxa->next;
+
+		__dealloc_nx_addr_v6(nxa);
+		nxa = next;
+	}
+}
+
+#endif	/* CONFIG_IPV6 */
+
+/*	__alloc_nx_info()
+
+	* allocate an initialized nx_info struct
+	* doesn't make it visible (hash)			*/
+
+static struct nx_info *__alloc_nx_info(nid_t nid)
+{
+	struct nx_info *new = NULL;
+
+	vxdprintk(VXD_CBIT(nid, 1), "alloc_nx_info(%d)*", nid);
+
+	/* would this benefit from a slab cache? */
+	new = kmalloc(sizeof(struct nx_info), GFP_KERNEL);
+	if (!new)
+		return 0;
+
+	memset(new, 0, sizeof(struct nx_info));
+	new->nx_id = nid;
+	INIT_HLIST_NODE(&new->nx_hlist);
+	atomic_set(&new->nx_usecnt, 0);
+	atomic_set(&new->nx_tasks, 0);
+	new->nx_state = 0;
+
+	new->nx_flags = NXF_INIT_SET;
+
+	/* rest of init goes here */
+
+	new->v4_lback.s_addr = htonl(INADDR_LOOPBACK);
+	new->v4_bcast.s_addr = htonl(INADDR_BROADCAST);
+
+	vxdprintk(VXD_CBIT(nid, 0),
+		"alloc_nx_info(%d) = %p", nid, new);
+	atomic_inc(&nx_global_ctotal);
+	return new;
+}
+
+/*	__dealloc_nx_info()
+
+	* final disposal of nx_info				*/
+
+static void __dealloc_nx_info(struct nx_info *nxi)
+{
+	vxdprintk(VXD_CBIT(nid, 0),
+		"dealloc_nx_info(%p)", nxi);
+
+	nxi->nx_hlist.next = LIST_POISON1;
+	nxi->nx_id = -1;
+
+	BUG_ON(atomic_read(&nxi->nx_usecnt));
+	BUG_ON(atomic_read(&nxi->nx_tasks));
+
+	__dealloc_nx_addr_v4_all(nxi->v4.next);
+
+	nxi->nx_state |= NXS_RELEASED;
+	kfree(nxi);
+	atomic_dec(&nx_global_ctotal);
+}
+
+static void __shutdown_nx_info(struct nx_info *nxi)
+{
+	nxi->nx_state |= NXS_SHUTDOWN;
+	vs_net_change(nxi, VSC_NETDOWN);
+}
+
+/*	exported stuff						*/
+
+void free_nx_info(struct nx_info *nxi)
+{
+	/* context shutdown is mandatory */
+	BUG_ON(nxi->nx_state != NXS_SHUTDOWN);
+
+	/* context must not be hashed */
+	BUG_ON(nxi->nx_state & NXS_HASHED);
+
+	BUG_ON(atomic_read(&nxi->nx_usecnt));
+	BUG_ON(atomic_read(&nxi->nx_tasks));
+
+	__dealloc_nx_info(nxi);
+}
+
+
+void __nx_set_lback(struct nx_info *nxi)
+{
+	int nid = nxi->nx_id;
+	__be32 lback = htonl(INADDR_LOOPBACK ^ ((nid & 0xFFFF) << 8));
+
+	nxi->v4_lback.s_addr = lback;
+}
+
+extern int __nx_inet_add_lback(__be32 addr);
+extern int __nx_inet_del_lback(__be32 addr);
+
+
+/*	hash table for nx_info hash */
+
+#define NX_HASH_SIZE	13
+
+struct hlist_head nx_info_hash[NX_HASH_SIZE];
+
+static spinlock_t nx_info_hash_lock = SPIN_LOCK_UNLOCKED;
+
+
+static inline unsigned int __hashval(nid_t nid)
+{
+	return (nid % NX_HASH_SIZE);
+}
+
+
+
+/*	__hash_nx_info()
+
+	* add the nxi to the global hash table
+	* requires the hash_lock to be held			*/
+
+static inline void __hash_nx_info(struct nx_info *nxi)
+{
+	struct hlist_head *head;
+
+	vxd_assert_lock(&nx_info_hash_lock);
+	vxdprintk(VXD_CBIT(nid, 4),
+		"__hash_nx_info: %p[#%d]", nxi, nxi->nx_id);
+
+	/* context must not be hashed */
+	BUG_ON(nx_info_state(nxi, NXS_HASHED));
+
+	nxi->nx_state |= NXS_HASHED;
+	head = &nx_info_hash[__hashval(nxi->nx_id)];
+	hlist_add_head(&nxi->nx_hlist, head);
+	atomic_inc(&nx_global_cactive);
+}
+
+/*	__unhash_nx_info()
+
+	* remove the nxi from the global hash table
+	* requires the hash_lock to be held			*/
+
+static inline void __unhash_nx_info(struct nx_info *nxi)
+{
+	vxd_assert_lock(&nx_info_hash_lock);
+	vxdprintk(VXD_CBIT(nid, 4),
+		"__unhash_nx_info: %p[#%d.%d.%d]", nxi, nxi->nx_id,
+		atomic_read(&nxi->nx_usecnt), atomic_read(&nxi->nx_tasks));
+
+	/* context must be hashed */
+	BUG_ON(!nx_info_state(nxi, NXS_HASHED));
+	/* but without tasks */
+	BUG_ON(atomic_read(&nxi->nx_tasks));
+
+	nxi->nx_state &= ~NXS_HASHED;
+	hlist_del(&nxi->nx_hlist);
+	atomic_dec(&nx_global_cactive);
+}
+
+
+/*	__lookup_nx_info()
+
+	* requires the hash_lock to be held
+	* doesn't increment the nx_refcnt			*/
+
+static inline struct nx_info *__lookup_nx_info(nid_t nid)
+{
+	struct hlist_head *head = &nx_info_hash[__hashval(nid)];
+	struct hlist_node *pos;
+	struct nx_info *nxi;
+
+	vxd_assert_lock(&nx_info_hash_lock);
+	hlist_for_each(pos, head) {
+		nxi = hlist_entry(pos, struct nx_info, nx_hlist);
+
+		if (nxi->nx_id == nid)
+			goto found;
+	}
+	nxi = NULL;
+found:
+	vxdprintk(VXD_CBIT(nid, 0),
+		"__lookup_nx_info(#%u): %p[#%u]",
+		nid, nxi, nxi ? nxi->nx_id : 0);
+	return nxi;
+}
+
+
+/*	__create_nx_info()
+
+	* create the requested context
+	* get(), claim() and hash it				*/
+
+static struct nx_info *__create_nx_info(int id)
+{
+	struct nx_info *new, *nxi = NULL;
+
+	vxdprintk(VXD_CBIT(nid, 1), "create_nx_info(%d)*", id);
+
+	if (!(new = __alloc_nx_info(id)))
+		return ERR_PTR(-ENOMEM);
+
+	/* required to make dynamic xids unique */
+	spin_lock(&nx_info_hash_lock);
+
+	/* static context requested */
+	if ((nxi = __lookup_nx_info(id))) {
+		vxdprintk(VXD_CBIT(nid, 0),
+			"create_nx_info(%d) = %p (already there)", id, nxi);
+		if (nx_info_flags(nxi, NXF_STATE_SETUP, 0))
+			nxi = ERR_PTR(-EBUSY);
+		else
+			nxi = ERR_PTR(-EEXIST);
+		goto out_unlock;
+	}
+	/* new context */
+	vxdprintk(VXD_CBIT(nid, 0),
+		"create_nx_info(%d) = %p (new)", id, new);
+	claim_nx_info(new, NULL);
+	__nx_set_lback(new);
+	__hash_nx_info(get_nx_info(new));
+	nxi = new, new = NULL;
+
+out_unlock:
+	spin_unlock(&nx_info_hash_lock);
+	if (new)
+		__dealloc_nx_info(new);
+	return nxi;
+}
+
+
+
+/*	exported stuff						*/
+
+
+void unhash_nx_info(struct nx_info *nxi)
+{
+	__shutdown_nx_info(nxi);
+	spin_lock(&nx_info_hash_lock);
+	__unhash_nx_info(nxi);
+	spin_unlock(&nx_info_hash_lock);
+}
+
+/*	lookup_nx_info()
+
+	* search for a nx_info and get() it
+	* negative id means current				*/
+
+struct nx_info *lookup_nx_info(int id)
+{
+	struct nx_info *nxi = NULL;
+
+	if (id < 0) {
+		nxi = get_nx_info(current_nx_info());
+	} else if (id > 1) {
+		spin_lock(&nx_info_hash_lock);
+		nxi = get_nx_info(__lookup_nx_info(id));
+		spin_unlock(&nx_info_hash_lock);
+	}
+	return nxi;
+}
+
+/*	nid_is_hashed()
+
+	* verify that nid is still hashed			*/
+
+int nid_is_hashed(nid_t nid)
+{
+	int hashed;
+
+	spin_lock(&nx_info_hash_lock);
+	hashed = (__lookup_nx_info(nid) != NULL);
+	spin_unlock(&nx_info_hash_lock);
+	return hashed;
+}
+
+
+#ifdef	CONFIG_PROC_FS
+
+/*	get_nid_list()
+
+	* get a subset of hashed nids for proc
+	* assumes size is at least one				*/
+
+int get_nid_list(int index, unsigned int *nids, int size)
+{
+	int hindex, nr_nids = 0;
+
+	/* only show current and children */
+	if (!nx_check(0, VS_ADMIN | VS_WATCH)) {
+		if (index > 0)
+			return 0;
+		nids[nr_nids] = nx_current_nid();
+		return 1;
+	}
+
+	for (hindex = 0; hindex < NX_HASH_SIZE; hindex++) {
+		struct hlist_head *head = &nx_info_hash[hindex];
+		struct hlist_node *pos;
+
+		spin_lock(&nx_info_hash_lock);
+		hlist_for_each(pos, head) {
+			struct nx_info *nxi;
+
+			if (--index > 0)
+				continue;
+
+			nxi = hlist_entry(pos, struct nx_info, nx_hlist);
+			nids[nr_nids] = nxi->nx_id;
+			if (++nr_nids >= size) {
+				spin_unlock(&nx_info_hash_lock);
+				goto out;
+			}
+		}
+		/* keep the lock time short */
+		spin_unlock(&nx_info_hash_lock);
+	}
+out:
+	return nr_nids;
+}
+#endif
+
+
+/*
+ *	migrate task to new network
+ *	gets nxi, puts old_nxi on change
+ */
+
+int nx_migrate_task(struct task_struct *p, struct nx_info *nxi)
+{
+	struct nx_info *old_nxi;
+	int ret = 0;
+
+	if (!p || !nxi)
+		BUG();
+
+	vxdprintk(VXD_CBIT(nid, 5),
+		"nx_migrate_task(%p,%p[#%d.%d.%d])",
+		p, nxi, nxi->nx_id,
+		atomic_read(&nxi->nx_usecnt),
+		atomic_read(&nxi->nx_tasks));
+
+	if (nx_info_flags(nxi, NXF_INFO_PRIVATE, 0) &&
+		!nx_info_flags(nxi, NXF_STATE_SETUP, 0))
+		return -EACCES;
+
+	if (nx_info_state(nxi, NXS_SHUTDOWN))
+		return -EFAULT;
+
+	/* maybe disallow this completely? */
+	old_nxi = task_get_nx_info(p);
+	if (old_nxi == nxi)
+		goto out;
+
+	task_lock(p);
+	if (old_nxi)
+		clr_nx_info(&p->nx_info);
+	claim_nx_info(nxi, p);
+	set_nx_info(&p->nx_info, nxi);
+	p->nid = nxi->nx_id;
+	task_unlock(p);
+
+	vxdprintk(VXD_CBIT(nid, 5),
+		"moved task %p into nxi:%p[#%d]",
+		p, nxi, nxi->nx_id);
+
+	if (old_nxi)
+		release_nx_info(old_nxi, p);
+	ret = 0;
+out:
+	put_nx_info(old_nxi);
+	return ret;
+}
+
+
+void nx_set_persistent(struct nx_info *nxi)
+{
+	vxdprintk(VXD_CBIT(nid, 6),
+		"nx_set_persistent(%p[#%d])", nxi, nxi->nx_id);
+
+	get_nx_info(nxi);
+	claim_nx_info(nxi, NULL);
+}
+
+void nx_clear_persistent(struct nx_info *nxi)
+{
+	vxdprintk(VXD_CBIT(nid, 6),
+		"nx_clear_persistent(%p[#%d])", nxi, nxi->nx_id);
+
+	release_nx_info(nxi, NULL);
+	put_nx_info(nxi);
+}
+
+void nx_update_persistent(struct nx_info *nxi)
+{
+	if (nx_info_flags(nxi, NXF_PERSISTENT, 0))
+		nx_set_persistent(nxi);
+	else
+		nx_clear_persistent(nxi);
+}
+
+/* vserver syscall commands below here */
+
+/* taks nid and nx_info functions */
+
+#include <asm/uaccess.h>
+
+
+int vc_task_nid(uint32_t id)
+{
+	nid_t nid;
+
+	if (id) {
+		struct task_struct *tsk;
+
+		read_lock(&tasklist_lock);
+		tsk = find_task_by_real_pid(id);
+		nid = (tsk) ? tsk->nid : -ESRCH;
+		read_unlock(&tasklist_lock);
+	} else
+		nid = nx_current_nid();
+	return nid;
+}
+
+
+int vc_nx_info(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_nx_info_v0 vc_data;
+
+	vc_data.nid = nxi->nx_id;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+
+/* network functions */
+
+int vc_net_create(uint32_t nid, void __user *data)
+{
+	struct vcmd_net_create vc_data = { .flagword = NXF_INIT_SET };
+	struct nx_info *new_nxi;
+	int ret;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	if ((nid > MAX_S_CONTEXT) || (nid < 2))
+		return -EINVAL;
+
+	new_nxi = __create_nx_info(nid);
+	if (IS_ERR(new_nxi))
+		return PTR_ERR(new_nxi);
+
+	/* initial flags */
+	new_nxi->nx_flags = vc_data.flagword;
+
+	ret = -ENOEXEC;
+	if (vs_net_change(new_nxi, VSC_NETUP))
+		goto out;
+
+	ret = nx_migrate_task(current, new_nxi);
+	if (ret)
+		goto out;
+
+	/* return context id on success */
+	ret = new_nxi->nx_id;
+
+	/* get a reference for persistent contexts */
+	if ((vc_data.flagword & NXF_PERSISTENT))
+		nx_set_persistent(new_nxi);
+out:
+	release_nx_info(new_nxi, NULL);
+	put_nx_info(new_nxi);
+	return ret;
+}
+
+
+int vc_net_migrate(struct nx_info *nxi, void __user *data)
+{
+	return nx_migrate_task(current, nxi);
+}
+
+
+
+int do_add_v4_addr(struct nx_info *nxi, __be32 ip, __be32 ip2, __be32 mask,
+	uint16_t type, uint16_t flags)
+{
+	struct nx_addr_v4 *nxa = &nxi->v4;
+
+	if (NX_IPV4(nxi)) {
+		/* locate last entry */
+		for (; nxa->next; nxa = nxa->next);
+		nxa->next = __alloc_nx_addr_v4();
+		nxa = nxa->next;
+
+		if (IS_ERR(nxa))
+			return PTR_ERR(nxa);
+	}
+
+	if (nxi->v4.next)
+		/* remove single ip for ip list */
+		nxi->nx_flags &= ~NXF_SINGLE_IP;
+
+	nxa->ip[0].s_addr = ip;
+	nxa->ip[1].s_addr = ip2;
+	nxa->mask.s_addr = mask;
+	nxa->type = type;
+	nxa->flags = flags;
+	return 0;
+}
+
+
+int vc_net_add(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_addr_v0 vc_data;
+	int index, ret = 0;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	switch (vc_data.type) {
+	case NXA_TYPE_IPV4:
+		if ((vc_data.count < 1) || (vc_data.count > 4))
+			return -EINVAL;
+
+		index = 0;
+		while (index < vc_data.count) {
+			ret = do_add_v4_addr(nxi, vc_data.ip[index].s_addr, 0,
+				vc_data.mask[index].s_addr, NXA_TYPE_ADDR, 0);
+			if (ret)
+				return ret;
+			index++;
+		}
+		ret = index;
+		break;
+
+	case NXA_TYPE_IPV4|NXA_MOD_BCAST:
+		nxi->v4_bcast = vc_data.ip[0];
+		ret = 1;
+		break;
+
+	case NXA_TYPE_IPV4|NXA_MOD_LBACK:
+		nxi->v4_lback = vc_data.ip[0];
+		ret = 1;
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+int vc_net_remove(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_addr_v0 vc_data;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	switch (vc_data.type) {
+	case NXA_TYPE_ANY:
+		__dealloc_nx_addr_v4_all(xchg(&nxi->v4.next, NULL));
+		memset(&nxi->v4, 0, sizeof(nxi->v4));
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+
+int vc_net_add_ipv4(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_addr_ipv4_v1 vc_data;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	switch (vc_data.type) {
+	case NXA_TYPE_ADDR:
+	case NXA_TYPE_RANGE:
+	case NXA_TYPE_MASK:
+		return do_add_v4_addr(nxi, vc_data.ip.s_addr, 0,
+			vc_data.mask.s_addr, vc_data.type, vc_data.flags);
+
+	case NXA_TYPE_ADDR | NXA_MOD_BCAST:
+		nxi->v4_bcast = vc_data.ip;
+		break;
+
+	case NXA_TYPE_ADDR | NXA_MOD_LBACK:
+		nxi->v4_lback = vc_data.ip;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int vc_net_remove_ipv4(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_addr_ipv4_v1 vc_data;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	switch (vc_data.type) {
+/*	case NXA_TYPE_ADDR:
+		break;		*/
+
+	case NXA_TYPE_ANY:
+		__dealloc_nx_addr_v4_all(xchg(&nxi->v4.next, NULL));
+		memset(&nxi->v4, 0, sizeof(nxi->v4));
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+
+#ifdef CONFIG_IPV6
+
+int do_add_v6_addr(struct nx_info *nxi,
+	struct in6_addr *ip, struct in6_addr *mask,
+	uint32_t prefix, uint16_t type, uint16_t flags)
+{
+	struct nx_addr_v6 *nxa = &nxi->v6;
+
+	if (NX_IPV6(nxi)) {
+		/* locate last entry */
+		for (; nxa->next; nxa = nxa->next);
+		nxa->next = __alloc_nx_addr_v6();
+		nxa = nxa->next;
+
+		if (IS_ERR(nxa))
+			return PTR_ERR(nxa);
+	}
+
+	nxa->ip = *ip;
+	nxa->mask = *mask;
+	nxa->prefix = prefix;
+	nxa->type = type;
+	nxa->flags = flags;
+	return 0;
+}
+
+
+int vc_net_add_ipv6(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_addr_ipv6_v1 vc_data;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	switch (vc_data.type) {
+	case NXA_TYPE_ADDR:
+	case NXA_TYPE_MASK:
+		return do_add_v6_addr(nxi, &vc_data.ip, &vc_data.mask,
+			vc_data.prefix, vc_data.type, vc_data.flags);
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int vc_net_remove_ipv6(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_addr_ipv6_v1 vc_data;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	switch (vc_data.type) {
+	case NXA_TYPE_ANY:
+		__dealloc_nx_addr_v6_all(xchg(&nxi->v6.next, NULL));
+		memset(&nxi->v6, 0, sizeof(nxi->v6));
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#endif	/* CONFIG_IPV6 */
+
+
+int vc_get_nflags(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_flags_v0 vc_data;
+
+	vc_data.flagword = nxi->nx_flags;
+
+	/* special STATE flag handling */
+	vc_data.mask = vs_mask_flags(~0ULL, nxi->nx_flags, NXF_ONE_TIME);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+int vc_set_nflags(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_flags_v0 vc_data;
+	uint64_t mask, trigger;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	/* special STATE flag handling */
+	mask = vs_mask_mask(vc_data.mask, nxi->nx_flags, NXF_ONE_TIME);
+	trigger = (mask & nxi->nx_flags) ^ (mask & vc_data.flagword);
+
+	nxi->nx_flags = vs_mask_flags(nxi->nx_flags,
+		vc_data.flagword, mask);
+	if (trigger & NXF_PERSISTENT)
+		nx_update_persistent(nxi);
+
+	return 0;
+}
+
+int vc_get_ncaps(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_caps_v0 vc_data;
+
+	vc_data.ncaps = nxi->nx_ncaps;
+	vc_data.cmask = ~0ULL;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+int vc_set_ncaps(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_caps_v0 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	nxi->nx_ncaps = vs_mask_flags(nxi->nx_ncaps,
+		vc_data.ncaps, vc_data.cmask);
+	return 0;
+}
+
+
+#include <linux/module.h>
+
+module_init(init_network);
+
+EXPORT_SYMBOL_GPL(free_nx_info);
+EXPORT_SYMBOL_GPL(unhash_nx_info);
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/proc.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/proc.c
--- linux-2.6.32.17/kernel/vserver/proc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/proc.c	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,1098 @@
+/*
+ *  linux/kernel/vserver/proc.c
+ *
+ *  Virtual Context Support
+ *
+ *  Copyright (C) 2003-2007  Herbert Pötzl
+ *
+ *  V0.01  basic structure
+ *  V0.02  adaptation vs1.3.0
+ *  V0.03  proc permissions
+ *  V0.04  locking/generic
+ *  V0.05  next generation procfs
+ *  V0.06  inode validation
+ *  V0.07  generic rewrite vid
+ *  V0.08  remove inode type
+ *
+ */
+
+#include <linux/proc_fs.h>
+#include <linux/fs_struct.h>
+#include <linux/mount.h>
+#include <asm/unistd.h>
+
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vs_cvirt.h>
+
+#include <linux/in.h>
+#include <linux/inetdevice.h>
+#include <linux/vs_inet.h>
+#include <linux/vs_inet6.h>
+
+#include <linux/vserver/global.h>
+
+#include "cvirt_proc.h"
+#include "cacct_proc.h"
+#include "limit_proc.h"
+#include "sched_proc.h"
+#include "vci_config.h"
+
+
+static inline char *print_cap_t(char *buffer, kernel_cap_t *c)
+{
+	unsigned __capi;
+
+	CAP_FOR_EACH_U32(__capi) {
+		buffer += sprintf(buffer, "%08x",
+			c->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
+	}
+	return buffer;
+}
+
+
+static struct proc_dir_entry *proc_virtual;
+
+static struct proc_dir_entry *proc_virtnet;
+
+
+/* first the actual feeds */
+
+
+static int proc_vci(char *buffer)
+{
+	return sprintf(buffer,
+		"VCIVersion:\t%04x:%04x\n"
+		"VCISyscall:\t%d\n"
+		"VCIKernel:\t%08x\n",
+		VCI_VERSION >> 16,
+		VCI_VERSION & 0xFFFF,
+		__NR_vserver,
+		vci_kernel_config());
+}
+
+static int proc_virtual_info(char *buffer)
+{
+	return proc_vci(buffer);
+}
+
+static int proc_virtual_status(char *buffer)
+{
+	return sprintf(buffer,
+		"#CTotal:\t%d\n"
+		"#CActive:\t%d\n"
+		"#NSProxy:\t%d\t%d %d %d %d %d %d\n"
+		"#InitTask:\t%d\t%d %d\n",
+		atomic_read(&vx_global_ctotal),
+		atomic_read(&vx_global_cactive),
+		atomic_read(&vs_global_nsproxy),
+		atomic_read(&vs_global_fs),
+		atomic_read(&vs_global_mnt_ns),
+		atomic_read(&vs_global_uts_ns),
+		atomic_read(&nr_ipc_ns),
+		atomic_read(&vs_global_user_ns),
+		atomic_read(&vs_global_pid_ns),
+		atomic_read(&init_task.usage),
+		atomic_read(&init_task.nsproxy->count),
+		init_task.fs->users);
+}
+
+
+int proc_vxi_info(struct vx_info *vxi, char *buffer)
+{
+	int length;
+
+	length = sprintf(buffer,
+		"ID:\t%d\n"
+		"Info:\t%p\n"
+		"Init:\t%d\n"
+		"OOM:\t%lld\n",
+		vxi->vx_id,
+		vxi,
+		vxi->vx_initpid,
+		vxi->vx_badness_bias);
+	return length;
+}
+
+int proc_vxi_status(struct vx_info *vxi, char *buffer)
+{
+	char *orig = buffer;
+
+	buffer += sprintf(buffer,
+		"UseCnt:\t%d\n"
+		"Tasks:\t%d\n"
+		"Flags:\t%016llx\n",
+		atomic_read(&vxi->vx_usecnt),
+		atomic_read(&vxi->vx_tasks),
+		(unsigned long long)vxi->vx_flags);
+
+	buffer += sprintf(buffer, "BCaps:\t");
+	buffer = print_cap_t(buffer, &vxi->vx_bcaps);
+	buffer += sprintf(buffer, "\n");
+
+	buffer += sprintf(buffer,
+		"CCaps:\t%016llx\n"
+		"Spaces:\t%08lx %08lx\n",
+		(unsigned long long)vxi->vx_ccaps,
+		vxi->vx_nsmask[0], vxi->vx_nsmask[1]);
+	return buffer - orig;
+}
+
+int proc_vxi_limit(struct vx_info *vxi, char *buffer)
+{
+	return vx_info_proc_limit(&vxi->limit, buffer);
+}
+
+int proc_vxi_sched(struct vx_info *vxi, char *buffer)
+{
+	int cpu, length;
+
+	length = vx_info_proc_sched(&vxi->sched, buffer);
+	for_each_online_cpu(cpu) {
+		length += vx_info_proc_sched_pc(
+			&vx_per_cpu(vxi, sched_pc, cpu),
+			buffer + length, cpu);
+	}
+	return length;
+}
+
+int proc_vxi_nsproxy0(struct vx_info *vxi, char *buffer)
+{
+	return vx_info_proc_nsproxy(vxi->vx_nsproxy[0], buffer);
+}
+
+int proc_vxi_nsproxy1(struct vx_info *vxi, char *buffer)
+{
+	return vx_info_proc_nsproxy(vxi->vx_nsproxy[1], buffer);
+}
+
+int proc_vxi_cvirt(struct vx_info *vxi, char *buffer)
+{
+	int cpu, length;
+
+	vx_update_load(vxi);
+	length = vx_info_proc_cvirt(&vxi->cvirt, buffer);
+	for_each_online_cpu(cpu) {
+		length += vx_info_proc_cvirt_pc(
+			&vx_per_cpu(vxi, cvirt_pc, cpu),
+			buffer + length, cpu);
+	}
+	return length;
+}
+
+int proc_vxi_cacct(struct vx_info *vxi, char *buffer)
+{
+	return vx_info_proc_cacct(&vxi->cacct, buffer);
+}
+
+
+static int proc_virtnet_info(char *buffer)
+{
+	return proc_vci(buffer);
+}
+
+static int proc_virtnet_status(char *buffer)
+{
+	return sprintf(buffer,
+		"#CTotal:\t%d\n"
+		"#CActive:\t%d\n",
+		atomic_read(&nx_global_ctotal),
+		atomic_read(&nx_global_cactive));
+}
+
+int proc_nxi_info(struct nx_info *nxi, char *buffer)
+{
+	struct nx_addr_v4 *v4a;
+#ifdef	CONFIG_IPV6
+	struct nx_addr_v6 *v6a;
+#endif
+	int length, i;
+
+	length = sprintf(buffer,
+		"ID:\t%d\n"
+		"Info:\t%p\n"
+		"Bcast:\t" NIPQUAD_FMT "\n"
+		"Lback:\t" NIPQUAD_FMT "\n",
+		nxi->nx_id,
+		nxi,
+		NIPQUAD(nxi->v4_bcast.s_addr),
+		NIPQUAD(nxi->v4_lback.s_addr));
+
+	if (!NX_IPV4(nxi))
+		goto skip_v4;
+	for (i = 0, v4a = &nxi->v4; v4a; i++, v4a = v4a->next)
+		length += sprintf(buffer + length, "%d:\t" NXAV4_FMT "\n",
+			i, NXAV4(v4a));
+skip_v4:
+#ifdef	CONFIG_IPV6
+	if (!NX_IPV6(nxi))
+		goto skip_v6;
+	for (i = 0, v6a = &nxi->v6; v6a; i++, v6a = v6a->next)
+		length += sprintf(buffer + length, "%d:\t" NXAV6_FMT "\n",
+			i, NXAV6(v6a));
+skip_v6:
+#endif
+	return length;
+}
+
+int proc_nxi_status(struct nx_info *nxi, char *buffer)
+{
+	int length;
+
+	length = sprintf(buffer,
+		"UseCnt:\t%d\n"
+		"Tasks:\t%d\n"
+		"Flags:\t%016llx\n"
+		"NCaps:\t%016llx\n",
+		atomic_read(&nxi->nx_usecnt),
+		atomic_read(&nxi->nx_tasks),
+		(unsigned long long)nxi->nx_flags,
+		(unsigned long long)nxi->nx_ncaps);
+	return length;
+}
+
+
+
+/* here the inode helpers */
+
+struct vs_entry {
+	int len;
+	char *name;
+	mode_t mode;
+	struct inode_operations *iop;
+	struct file_operations *fop;
+	union proc_op op;
+};
+
+static struct inode *vs_proc_make_inode(struct super_block *sb, struct vs_entry *p)
+{
+	struct inode *inode = new_inode(sb);
+
+	if (!inode)
+		goto out;
+
+	inode->i_mode = p->mode;
+	if (p->iop)
+		inode->i_op = p->iop;
+	if (p->fop)
+		inode->i_fop = p->fop;
+
+	inode->i_nlink = (p->mode & S_IFDIR) ? 2 : 1;
+	inode->i_flags |= S_IMMUTABLE;
+
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+
+	inode->i_uid = 0;
+	inode->i_gid = 0;
+	inode->i_tag = 0;
+out:
+	return inode;
+}
+
+static struct dentry *vs_proc_instantiate(struct inode *dir,
+	struct dentry *dentry, int id, void *ptr)
+{
+	struct vs_entry *p = ptr;
+	struct inode *inode = vs_proc_make_inode(dir->i_sb, p);
+	struct dentry *error = ERR_PTR(-EINVAL);
+
+	if (!inode)
+		goto out;
+
+	PROC_I(inode)->op = p->op;
+	PROC_I(inode)->fd = id;
+	d_add(dentry, inode);
+	error = NULL;
+out:
+	return error;
+}
+
+/* Lookups */
+
+typedef struct dentry *instantiate_t(struct inode *, struct dentry *, int, void *);
+
+/*
+ * Fill a directory entry.
+ *
+ * If possible create the dcache entry and derive our inode number and
+ * file type from dcache entry.
+ *
+ * Since all of the proc inode numbers are dynamically generated, the inode
+ * numbers do not exist until the inode is cache.  This means creating the
+ * the dcache entry in readdir is necessary to keep the inode numbers
+ * reported by readdir in sync with the inode numbers reported
+ * by stat.
+ */
+static int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
+	char *name, int len, instantiate_t instantiate, int id, void *ptr)
+{
+	struct dentry *child, *dir = filp->f_dentry;
+	struct inode *inode;
+	struct qstr qname;
+	ino_t ino = 0;
+	unsigned type = DT_UNKNOWN;
+
+	qname.name = name;
+	qname.len  = len;
+	qname.hash = full_name_hash(name, len);
+
+	child = d_lookup(dir, &qname);
+	if (!child) {
+		struct dentry *new;
+		new = d_alloc(dir, &qname);
+		if (new) {
+			child = instantiate(dir->d_inode, new, id, ptr);
+			if (child)
+				dput(new);
+			else
+				child = new;
+		}
+	}
+	if (!child || IS_ERR(child) || !child->d_inode)
+		goto end_instantiate;
+	inode = child->d_inode;
+	if (inode) {
+		ino = inode->i_ino;
+		type = inode->i_mode >> 12;
+	}
+	dput(child);
+end_instantiate:
+	if (!ino)
+		ino = find_inode_number(dir, &qname);
+	if (!ino)
+		ino = 1;
+	return filldir(dirent, name, len, filp->f_pos, ino, type);
+}
+
+
+
+/* get and revalidate vx_info/xid */
+
+static inline
+struct vx_info *get_proc_vx_info(struct inode *inode)
+{
+	return lookup_vx_info(PROC_I(inode)->fd);
+}
+
+static int proc_xid_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode *inode = dentry->d_inode;
+	xid_t xid = PROC_I(inode)->fd;
+
+	if (!xid || xid_is_hashed(xid))
+		return 1;
+	d_drop(dentry);
+	return 0;
+}
+
+
+/* get and revalidate nx_info/nid */
+
+static int proc_nid_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+	struct inode *inode = dentry->d_inode;
+	nid_t nid = PROC_I(inode)->fd;
+
+	if (!nid || nid_is_hashed(nid))
+		return 1;
+	d_drop(dentry);
+	return 0;
+}
+
+
+
+#define PROC_BLOCK_SIZE (PAGE_SIZE - 1024)
+
+static ssize_t proc_vs_info_read(struct file *file, char __user *buf,
+			  size_t count, loff_t *ppos)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	unsigned long page;
+	ssize_t length = 0;
+
+	if (count > PROC_BLOCK_SIZE)
+		count = PROC_BLOCK_SIZE;
+
+	/* fade that out as soon as stable */
+	WARN_ON(PROC_I(inode)->fd);
+
+	if (!(page = __get_free_page(GFP_KERNEL)))
+		return -ENOMEM;
+
+	BUG_ON(!PROC_I(inode)->op.proc_vs_read);
+	length = PROC_I(inode)->op.proc_vs_read((char *)page);
+
+	if (length >= 0)
+		length = simple_read_from_buffer(buf, count, ppos,
+			(char *)page, length);
+
+	free_page(page);
+	return length;
+}
+
+static ssize_t proc_vx_info_read(struct file *file, char __user *buf,
+			  size_t count, loff_t *ppos)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	struct vx_info *vxi = NULL;
+	xid_t xid = PROC_I(inode)->fd;
+	unsigned long page;
+	ssize_t length = 0;
+
+	if (count > PROC_BLOCK_SIZE)
+		count = PROC_BLOCK_SIZE;
+
+	/* fade that out as soon as stable */
+	WARN_ON(!xid);
+	vxi = lookup_vx_info(xid);
+	if (!vxi)
+		goto out;
+
+	length = -ENOMEM;
+	if (!(page = __get_free_page(GFP_KERNEL)))
+		goto out_put;
+
+	BUG_ON(!PROC_I(inode)->op.proc_vxi_read);
+	length = PROC_I(inode)->op.proc_vxi_read(vxi, (char *)page);
+
+	if (length >= 0)
+		length = simple_read_from_buffer(buf, count, ppos,
+			(char *)page, length);
+
+	free_page(page);
+out_put:
+	put_vx_info(vxi);
+out:
+	return length;
+}
+
+static ssize_t proc_nx_info_read(struct file *file, char __user *buf,
+			  size_t count, loff_t *ppos)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	struct nx_info *nxi = NULL;
+	nid_t nid = PROC_I(inode)->fd;
+	unsigned long page;
+	ssize_t length = 0;
+
+	if (count > PROC_BLOCK_SIZE)
+		count = PROC_BLOCK_SIZE;
+
+	/* fade that out as soon as stable */
+	WARN_ON(!nid);
+	nxi = lookup_nx_info(nid);
+	if (!nxi)
+		goto out;
+
+	length = -ENOMEM;
+	if (!(page = __get_free_page(GFP_KERNEL)))
+		goto out_put;
+
+	BUG_ON(!PROC_I(inode)->op.proc_nxi_read);
+	length = PROC_I(inode)->op.proc_nxi_read(nxi, (char *)page);
+
+	if (length >= 0)
+		length = simple_read_from_buffer(buf, count, ppos,
+			(char *)page, length);
+
+	free_page(page);
+out_put:
+	put_nx_info(nxi);
+out:
+	return length;
+}
+
+
+
+/* here comes the lower level */
+
+
+#define NOD(NAME, MODE, IOP, FOP, OP) {	\
+	.len  = sizeof(NAME) - 1,	\
+	.name = (NAME),			\
+	.mode = MODE,			\
+	.iop  = IOP,			\
+	.fop  = FOP,			\
+	.op   = OP,			\
+}
+
+
+#define DIR(NAME, MODE, OTYPE)				\
+	NOD(NAME, (S_IFDIR | (MODE)),			\
+		&proc_ ## OTYPE ## _inode_operations,	\
+		&proc_ ## OTYPE ## _file_operations, { } )
+
+#define INF(NAME, MODE, OTYPE)				\
+	NOD(NAME, (S_IFREG | (MODE)), NULL,		\
+		&proc_vs_info_file_operations,		\
+		{ .proc_vs_read = &proc_##OTYPE } )
+
+#define VINF(NAME, MODE, OTYPE)				\
+	NOD(NAME, (S_IFREG | (MODE)), NULL,		\
+		&proc_vx_info_file_operations,		\
+		{ .proc_vxi_read = &proc_##OTYPE } )
+
+#define NINF(NAME, MODE, OTYPE)				\
+	NOD(NAME, (S_IFREG | (MODE)), NULL,		\
+		&proc_nx_info_file_operations,		\
+		{ .proc_nxi_read = &proc_##OTYPE } )
+
+
+static struct file_operations proc_vs_info_file_operations = {
+	.read =		proc_vs_info_read,
+};
+
+static struct file_operations proc_vx_info_file_operations = {
+	.read =		proc_vx_info_read,
+};
+
+static struct dentry_operations proc_xid_dentry_operations = {
+	.d_revalidate =	proc_xid_revalidate,
+};
+
+static struct vs_entry vx_base_stuff[] = {
+	VINF("info",	S_IRUGO, vxi_info),
+	VINF("status",	S_IRUGO, vxi_status),
+	VINF("limit",	S_IRUGO, vxi_limit),
+	VINF("sched",	S_IRUGO, vxi_sched),
+	VINF("nsproxy",	S_IRUGO, vxi_nsproxy0),
+	VINF("nsproxy1",S_IRUGO, vxi_nsproxy1),
+	VINF("cvirt",	S_IRUGO, vxi_cvirt),
+	VINF("cacct",	S_IRUGO, vxi_cacct),
+	{}
+};
+
+
+
+
+static struct dentry *proc_xid_instantiate(struct inode *dir,
+	struct dentry *dentry, int id, void *ptr)
+{
+	dentry->d_op = &proc_xid_dentry_operations;
+	return vs_proc_instantiate(dir, dentry, id, ptr);
+}
+
+static struct dentry *proc_xid_lookup(struct inode *dir,
+	struct dentry *dentry, struct nameidata *nd)
+{
+	struct vs_entry *p = vx_base_stuff;
+	struct dentry *error = ERR_PTR(-ENOENT);
+
+	for (; p->name; p++) {
+		if (p->len != dentry->d_name.len)
+			continue;
+		if (!memcmp(dentry->d_name.name, p->name, p->len))
+			break;
+	}
+	if (!p->name)
+		goto out;
+
+	error = proc_xid_instantiate(dir, dentry, PROC_I(dir)->fd, p);
+out:
+	return error;
+}
+
+static int proc_xid_readdir(struct file *filp,
+	void *dirent, filldir_t filldir)
+{
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	struct vs_entry *p = vx_base_stuff;
+	int size = sizeof(vx_base_stuff) / sizeof(struct vs_entry);
+	int pos, index;
+	u64 ino;
+
+	pos = filp->f_pos;
+	switch (pos) {
+	case 0:
+		ino = inode->i_ino;
+		if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
+			goto out;
+		pos++;
+		/* fall through */
+	case 1:
+		ino = parent_ino(dentry);
+		if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
+			goto out;
+		pos++;
+		/* fall through */
+	default:
+		index = pos - 2;
+		if (index >= size)
+			goto out;
+		for (p += index; p->name; p++) {
+			if (proc_fill_cache(filp, dirent, filldir, p->name, p->len,
+				vs_proc_instantiate, PROC_I(inode)->fd, p))
+				goto out;
+			pos++;
+		}
+	}
+out:
+	filp->f_pos = pos;
+	return 1;
+}
+
+
+
+static struct file_operations proc_nx_info_file_operations = {
+	.read =		proc_nx_info_read,
+};
+
+static struct dentry_operations proc_nid_dentry_operations = {
+	.d_revalidate =	proc_nid_revalidate,
+};
+
+static struct vs_entry nx_base_stuff[] = {
+	NINF("info",	S_IRUGO, nxi_info),
+	NINF("status",	S_IRUGO, nxi_status),
+	{}
+};
+
+
+static struct dentry *proc_nid_instantiate(struct inode *dir,
+	struct dentry *dentry, int id, void *ptr)
+{
+	dentry->d_op = &proc_nid_dentry_operations;
+	return vs_proc_instantiate(dir, dentry, id, ptr);
+}
+
+static struct dentry *proc_nid_lookup(struct inode *dir,
+	struct dentry *dentry, struct nameidata *nd)
+{
+	struct vs_entry *p = nx_base_stuff;
+	struct dentry *error = ERR_PTR(-ENOENT);
+
+	for (; p->name; p++) {
+		if (p->len != dentry->d_name.len)
+			continue;
+		if (!memcmp(dentry->d_name.name, p->name, p->len))
+			break;
+	}
+	if (!p->name)
+		goto out;
+
+	error = proc_nid_instantiate(dir, dentry, PROC_I(dir)->fd, p);
+out:
+	return error;
+}
+
+static int proc_nid_readdir(struct file *filp,
+	void *dirent, filldir_t filldir)
+{
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	struct vs_entry *p = nx_base_stuff;
+	int size = sizeof(nx_base_stuff) / sizeof(struct vs_entry);
+	int pos, index;
+	u64 ino;
+
+	pos = filp->f_pos;
+	switch (pos) {
+	case 0:
+		ino = inode->i_ino;
+		if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
+			goto out;
+		pos++;
+		/* fall through */
+	case 1:
+		ino = parent_ino(dentry);
+		if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
+			goto out;
+		pos++;
+		/* fall through */
+	default:
+		index = pos - 2;
+		if (index >= size)
+			goto out;
+		for (p += index; p->name; p++) {
+			if (proc_fill_cache(filp, dirent, filldir, p->name, p->len,
+				vs_proc_instantiate, PROC_I(inode)->fd, p))
+				goto out;
+			pos++;
+		}
+	}
+out:
+	filp->f_pos = pos;
+	return 1;
+}
+
+
+#define MAX_MULBY10	((~0U - 9) / 10)
+
+static inline int atovid(const char *str, int len)
+{
+	int vid, c;
+
+	vid = 0;
+	while (len-- > 0) {
+		c = *str - '0';
+		str++;
+		if (c > 9)
+			return -1;
+		if (vid >= MAX_MULBY10)
+			return -1;
+		vid *= 10;
+		vid += c;
+		if (!vid)
+			return -1;
+	}
+	return vid;
+}
+
+/* now the upper level (virtual) */
+
+
+static struct file_operations proc_xid_file_operations = {
+	.read =		generic_read_dir,
+	.readdir =	proc_xid_readdir,
+};
+
+static struct inode_operations proc_xid_inode_operations = {
+	.lookup =	proc_xid_lookup,
+};
+
+static struct vs_entry vx_virtual_stuff[] = {
+	INF("info",	S_IRUGO, virtual_info),
+	INF("status",	S_IRUGO, virtual_status),
+	DIR(NULL,	S_IRUGO | S_IXUGO, xid),
+};
+
+
+static struct dentry *proc_virtual_lookup(struct inode *dir,
+	struct dentry *dentry, struct nameidata *nd)
+{
+	struct vs_entry *p = vx_virtual_stuff;
+	struct dentry *error = ERR_PTR(-ENOENT);
+	int id = 0;
+
+	for (; p->name; p++) {
+		if (p->len != dentry->d_name.len)
+			continue;
+		if (!memcmp(dentry->d_name.name, p->name, p->len))
+			break;
+	}
+	if (p->name)
+		goto instantiate;
+
+	id = atovid(dentry->d_name.name, dentry->d_name.len);
+	if ((id < 0) || !xid_is_hashed(id))
+		goto out;
+
+instantiate:
+	error = proc_xid_instantiate(dir, dentry, id, p);
+out:
+	return error;
+}
+
+static struct file_operations proc_nid_file_operations = {
+	.read =		generic_read_dir,
+	.readdir =	proc_nid_readdir,
+};
+
+static struct inode_operations proc_nid_inode_operations = {
+	.lookup =	proc_nid_lookup,
+};
+
+static struct vs_entry nx_virtnet_stuff[] = {
+	INF("info",	S_IRUGO, virtnet_info),
+	INF("status",	S_IRUGO, virtnet_status),
+	DIR(NULL,	S_IRUGO | S_IXUGO, nid),
+};
+
+
+static struct dentry *proc_virtnet_lookup(struct inode *dir,
+	struct dentry *dentry, struct nameidata *nd)
+{
+	struct vs_entry *p = nx_virtnet_stuff;
+	struct dentry *error = ERR_PTR(-ENOENT);
+	int id = 0;
+
+	for (; p->name; p++) {
+		if (p->len != dentry->d_name.len)
+			continue;
+		if (!memcmp(dentry->d_name.name, p->name, p->len))
+			break;
+	}
+	if (p->name)
+		goto instantiate;
+
+	id = atovid(dentry->d_name.name, dentry->d_name.len);
+	if ((id < 0) || !nid_is_hashed(id))
+		goto out;
+
+instantiate:
+	error = proc_nid_instantiate(dir, dentry, id, p);
+out:
+	return error;
+}
+
+
+#define PROC_MAXVIDS 32
+
+int proc_virtual_readdir(struct file *filp,
+	void *dirent, filldir_t filldir)
+{
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	struct vs_entry *p = vx_virtual_stuff;
+	int size = sizeof(vx_virtual_stuff) / sizeof(struct vs_entry);
+	int pos, index;
+	unsigned int xid_array[PROC_MAXVIDS];
+	char buf[PROC_NUMBUF];
+	unsigned int nr_xids, i;
+	u64 ino;
+
+	pos = filp->f_pos;
+	switch (pos) {
+	case 0:
+		ino = inode->i_ino;
+		if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
+			goto out;
+		pos++;
+		/* fall through */
+	case 1:
+		ino = parent_ino(dentry);
+		if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
+			goto out;
+		pos++;
+		/* fall through */
+	default:
+		index = pos - 2;
+		if (index >= size)
+			goto entries;
+		for (p += index; p->name; p++) {
+			if (proc_fill_cache(filp, dirent, filldir, p->name, p->len,
+				vs_proc_instantiate, 0, p))
+				goto out;
+			pos++;
+		}
+	entries:
+		index = pos - size;
+		p = &vx_virtual_stuff[size - 1];
+		nr_xids = get_xid_list(index, xid_array, PROC_MAXVIDS);
+		for (i = 0; i < nr_xids; i++) {
+			int n, xid = xid_array[i];
+			unsigned int j = PROC_NUMBUF;
+
+			n = xid;
+			do
+				buf[--j] = '0' + (n % 10);
+			while (n /= 10);
+
+			if (proc_fill_cache(filp, dirent, filldir,
+				buf + j, PROC_NUMBUF - j,
+				vs_proc_instantiate, xid, p))
+				goto out;
+			pos++;
+		}
+	}
+out:
+	filp->f_pos = pos;
+	return 0;
+}
+
+static int proc_virtual_getattr(struct vfsmount *mnt,
+	struct dentry *dentry, struct kstat *stat)
+{
+	struct inode *inode = dentry->d_inode;
+
+	generic_fillattr(inode, stat);
+	stat->nlink = 2 + atomic_read(&vx_global_cactive);
+	return 0;
+}
+
+static struct file_operations proc_virtual_dir_operations = {
+	.read =		generic_read_dir,
+	.readdir =	proc_virtual_readdir,
+};
+
+static struct inode_operations proc_virtual_dir_inode_operations = {
+	.getattr =	proc_virtual_getattr,
+	.lookup =	proc_virtual_lookup,
+};
+
+
+
+
+
+int proc_virtnet_readdir(struct file *filp,
+	void *dirent, filldir_t filldir)
+{
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	struct vs_entry *p = nx_virtnet_stuff;
+	int size = sizeof(nx_virtnet_stuff) / sizeof(struct vs_entry);
+	int pos, index;
+	unsigned int nid_array[PROC_MAXVIDS];
+	char buf[PROC_NUMBUF];
+	unsigned int nr_nids, i;
+	u64 ino;
+
+	pos = filp->f_pos;
+	switch (pos) {
+	case 0:
+		ino = inode->i_ino;
+		if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
+			goto out;
+		pos++;
+		/* fall through */
+	case 1:
+		ino = parent_ino(dentry);
+		if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
+			goto out;
+		pos++;
+		/* fall through */
+	default:
+		index = pos - 2;
+		if (index >= size)
+			goto entries;
+		for (p += index; p->name; p++) {
+			if (proc_fill_cache(filp, dirent, filldir, p->name, p->len,
+				vs_proc_instantiate, 0, p))
+				goto out;
+			pos++;
+		}
+	entries:
+		index = pos - size;
+		p = &nx_virtnet_stuff[size - 1];
+		nr_nids = get_nid_list(index, nid_array, PROC_MAXVIDS);
+		for (i = 0; i < nr_nids; i++) {
+			int n, nid = nid_array[i];
+			unsigned int j = PROC_NUMBUF;
+
+			n = nid;
+			do
+				buf[--j] = '0' + (n % 10);
+			while (n /= 10);
+
+			if (proc_fill_cache(filp, dirent, filldir,
+				buf + j, PROC_NUMBUF - j,
+				vs_proc_instantiate, nid, p))
+				goto out;
+			pos++;
+		}
+	}
+out:
+	filp->f_pos = pos;
+	return 0;
+}
+
+static int proc_virtnet_getattr(struct vfsmount *mnt,
+	struct dentry *dentry, struct kstat *stat)
+{
+	struct inode *inode = dentry->d_inode;
+
+	generic_fillattr(inode, stat);
+	stat->nlink = 2 + atomic_read(&nx_global_cactive);
+	return 0;
+}
+
+static struct file_operations proc_virtnet_dir_operations = {
+	.read =		generic_read_dir,
+	.readdir =	proc_virtnet_readdir,
+};
+
+static struct inode_operations proc_virtnet_dir_inode_operations = {
+	.getattr =	proc_virtnet_getattr,
+	.lookup =	proc_virtnet_lookup,
+};
+
+
+
+void proc_vx_init(void)
+{
+	struct proc_dir_entry *ent;
+
+	ent = proc_mkdir("virtual", 0);
+	if (ent) {
+		ent->proc_fops = &proc_virtual_dir_operations;
+		ent->proc_iops = &proc_virtual_dir_inode_operations;
+	}
+	proc_virtual = ent;
+
+	ent = proc_mkdir("virtnet", 0);
+	if (ent) {
+		ent->proc_fops = &proc_virtnet_dir_operations;
+		ent->proc_iops = &proc_virtnet_dir_inode_operations;
+	}
+	proc_virtnet = ent;
+}
+
+
+
+
+/* per pid info */
+
+
+int proc_pid_vx_info(struct task_struct *p, char *buffer)
+{
+	struct vx_info *vxi;
+	char *orig = buffer;
+
+	buffer += sprintf(buffer, "XID:\t%d\n", vx_task_xid(p));
+
+	vxi = task_get_vx_info(p);
+	if (!vxi)
+		goto out;
+
+	buffer += sprintf(buffer, "BCaps:\t");
+	buffer = print_cap_t(buffer, &vxi->vx_bcaps);
+	buffer += sprintf(buffer, "\n");
+	buffer += sprintf(buffer, "CCaps:\t%016llx\n",
+		(unsigned long long)vxi->vx_ccaps);
+	buffer += sprintf(buffer, "CFlags:\t%016llx\n",
+		(unsigned long long)vxi->vx_flags);
+	buffer += sprintf(buffer, "CIPid:\t%d\n", vxi->vx_initpid);
+
+	put_vx_info(vxi);
+out:
+	return buffer - orig;
+}
+
+
+int proc_pid_nx_info(struct task_struct *p, char *buffer)
+{
+	struct nx_info *nxi;
+	struct nx_addr_v4 *v4a;
+#ifdef	CONFIG_IPV6
+	struct nx_addr_v6 *v6a;
+#endif
+	char *orig = buffer;
+	int i;
+
+	buffer += sprintf(buffer, "NID:\t%d\n", nx_task_nid(p));
+
+	nxi = task_get_nx_info(p);
+	if (!nxi)
+		goto out;
+
+	buffer += sprintf(buffer, "NCaps:\t%016llx\n",
+		(unsigned long long)nxi->nx_ncaps);
+	buffer += sprintf(buffer, "NFlags:\t%016llx\n",
+		(unsigned long long)nxi->nx_flags);
+
+	buffer += sprintf(buffer,
+		"V4Root[bcast]:\t" NIPQUAD_FMT "\n",
+		NIPQUAD(nxi->v4_bcast.s_addr));
+	buffer += sprintf (buffer,
+		"V4Root[lback]:\t" NIPQUAD_FMT "\n",
+		NIPQUAD(nxi->v4_lback.s_addr));
+	if (!NX_IPV4(nxi))
+		goto skip_v4;
+	for (i = 0, v4a = &nxi->v4; v4a; i++, v4a = v4a->next)
+		buffer += sprintf(buffer, "V4Root[%d]:\t" NXAV4_FMT "\n",
+			i, NXAV4(v4a));
+skip_v4:
+#ifdef	CONFIG_IPV6
+	if (!NX_IPV6(nxi))
+		goto skip_v6;
+	for (i = 0, v6a = &nxi->v6; v6a; i++, v6a = v6a->next)
+		buffer += sprintf(buffer, "V6Root[%d]:\t" NXAV6_FMT "\n",
+			i, NXAV6(v6a));
+skip_v6:
+#endif
+	put_nx_info(nxi);
+out:
+	return buffer - orig;
+}
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/sched.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/sched.c
--- linux-2.6.32.17/kernel/vserver/sched.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/sched.c	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,414 @@
+/*
+ *  linux/kernel/vserver/sched.c
+ *
+ *  Virtual Server: Scheduler Support
+ *
+ *  Copyright (C) 2004-2007  Herbert Pötzl
+ *
+ *  V0.01  adapted Sam Vilains version to 2.6.3
+ *  V0.02  removed legacy interface
+ *  V0.03  changed vcmds to vxi arg
+ *  V0.04  removed older and legacy interfaces
+ *
+ */
+
+#include <linux/vs_context.h>
+#include <linux/vs_sched.h>
+#include <linux/vserver/sched_cmd.h>
+
+#include <asm/uaccess.h>
+
+
+#define vxd_check_range(val, min, max) do {		\
+	vxlprintk((val < min) || (val > max),		\
+		"check_range(%ld,%ld,%ld)",		\
+		(long)val, (long)min, (long)max,	\
+		__FILE__, __LINE__);			\
+	} while (0)
+
+
+void vx_update_sched_param(struct _vx_sched *sched,
+	struct _vx_sched_pc *sched_pc)
+{
+	unsigned int set_mask = sched->update_mask;
+
+	if (set_mask & VXSM_FILL_RATE)
+		sched_pc->fill_rate[0] = sched->fill_rate[0];
+	if (set_mask & VXSM_INTERVAL)
+		sched_pc->interval[0] = sched->interval[0];
+	if (set_mask & VXSM_FILL_RATE2)
+		sched_pc->fill_rate[1] = sched->fill_rate[1];
+	if (set_mask & VXSM_INTERVAL2)
+		sched_pc->interval[1] = sched->interval[1];
+	if (set_mask & VXSM_TOKENS)
+		sched_pc->tokens = sched->tokens;
+	if (set_mask & VXSM_TOKENS_MIN)
+		sched_pc->tokens_min = sched->tokens_min;
+	if (set_mask & VXSM_TOKENS_MAX)
+		sched_pc->tokens_max = sched->tokens_max;
+	if (set_mask & VXSM_PRIO_BIAS)
+		sched_pc->prio_bias = sched->prio_bias;
+
+	if (set_mask & VXSM_IDLE_TIME)
+		sched_pc->flags |= VXSF_IDLE_TIME;
+	else
+		sched_pc->flags &= ~VXSF_IDLE_TIME;
+
+	/* reset time */
+	sched_pc->norm_time = jiffies;
+}
+
+
+/*
+ * recalculate the context's scheduling tokens
+ *
+ * ret > 0 : number of tokens available
+ * ret < 0 : on hold, check delta_min[]
+ *	     -1 only jiffies
+ *	     -2 also idle time
+ *
+ */
+int vx_tokens_recalc(struct _vx_sched_pc *sched_pc,
+	unsigned long *norm_time, unsigned long *idle_time, int delta_min[2])
+{
+	long delta;
+	long tokens = 0;
+	int flags = sched_pc->flags;
+
+	/* how much time did pass? */
+	delta = *norm_time - sched_pc->norm_time;
+	// printk("@ %ld, %ld, %ld\n", *norm_time, sched_pc->norm_time, jiffies);
+	vxd_check_range(delta, 0, INT_MAX);
+
+	if (delta >= sched_pc->interval[0]) {
+		long tokens, integral;
+
+		/* calc integral token part */
+		tokens = delta / sched_pc->interval[0];
+		integral = tokens * sched_pc->interval[0];
+		tokens *= sched_pc->fill_rate[0];
+#ifdef	CONFIG_VSERVER_HARDCPU
+		delta_min[0] = delta - integral;
+		vxd_check_range(delta_min[0], 0, sched_pc->interval[0]);
+#endif
+		/* advance time */
+		sched_pc->norm_time += delta;
+
+		/* add tokens */
+		sched_pc->tokens += tokens;
+		sched_pc->token_time += tokens;
+	} else
+		delta_min[0] = delta;
+
+#ifdef	CONFIG_VSERVER_IDLETIME
+	if (!(flags & VXSF_IDLE_TIME))
+		goto skip_idle;
+
+	/* how much was the idle skip? */
+	delta = *idle_time - sched_pc->idle_time;
+	vxd_check_range(delta, 0, INT_MAX);
+
+	if (delta >= sched_pc->interval[1]) {
+		long tokens, integral;
+
+		/* calc fair share token part */
+		tokens = delta / sched_pc->interval[1];
+		integral = tokens * sched_pc->interval[1];
+		tokens *= sched_pc->fill_rate[1];
+		delta_min[1] = delta - integral;
+		vxd_check_range(delta_min[1], 0, sched_pc->interval[1]);
+
+		/* advance idle time */
+		sched_pc->idle_time += integral;
+
+		/* add tokens */
+		sched_pc->tokens += tokens;
+		sched_pc->token_time += tokens;
+	} else
+		delta_min[1] = delta;
+skip_idle:
+#endif
+
+	/* clip at maximum */
+	if (sched_pc->tokens > sched_pc->tokens_max)
+		sched_pc->tokens = sched_pc->tokens_max;
+	tokens = sched_pc->tokens;
+
+	if ((flags & VXSF_ONHOLD)) {
+		/* can we unhold? */
+		if (tokens >= sched_pc->tokens_min) {
+			flags &= ~VXSF_ONHOLD;
+			sched_pc->hold_ticks +=
+				*norm_time - sched_pc->onhold;
+		} else
+			goto on_hold;
+	} else {
+		/* put on hold? */
+		if (tokens <= 0) {
+			flags |= VXSF_ONHOLD;
+			sched_pc->onhold = *norm_time;
+			goto on_hold;
+		}
+	}
+	sched_pc->flags = flags;
+	return tokens;
+
+on_hold:
+	tokens = sched_pc->tokens_min - tokens;
+	sched_pc->flags = flags;
+	// BUG_ON(tokens < 0); probably doesn't hold anymore
+
+#ifdef	CONFIG_VSERVER_HARDCPU
+	/* next interval? */
+	if (!sched_pc->fill_rate[0])
+		delta_min[0] = HZ;
+	else if (tokens > sched_pc->fill_rate[0])
+		delta_min[0] += sched_pc->interval[0] *
+			tokens / sched_pc->fill_rate[0];
+	else
+		delta_min[0] = sched_pc->interval[0] - delta_min[0];
+	vxd_check_range(delta_min[0], 0, INT_MAX);
+
+#ifdef	CONFIG_VSERVER_IDLETIME
+	if (!(flags & VXSF_IDLE_TIME))
+		return -1;
+
+	/* next interval? */
+	if (!sched_pc->fill_rate[1])
+		delta_min[1] = HZ;
+	else if (tokens > sched_pc->fill_rate[1])
+		delta_min[1] += sched_pc->interval[1] *
+			tokens / sched_pc->fill_rate[1];
+	else
+		delta_min[1] = sched_pc->interval[1] - delta_min[1];
+	vxd_check_range(delta_min[1], 0, INT_MAX);
+
+	return -2;
+#else
+	return -1;
+#endif /* CONFIG_VSERVER_IDLETIME */
+#else
+	return 0;
+#endif /* CONFIG_VSERVER_HARDCPU */
+}
+
+static inline unsigned long msec_to_ticks(unsigned long msec)
+{
+	return msecs_to_jiffies(msec);
+}
+
+static inline unsigned long ticks_to_msec(unsigned long ticks)
+{
+	return jiffies_to_msecs(ticks);
+}
+
+static inline unsigned long ticks_to_usec(unsigned long ticks)
+{
+	return jiffies_to_usecs(ticks);
+}
+
+
+static int do_set_sched(struct vx_info *vxi, struct vcmd_sched_v5 *data)
+{
+	unsigned int set_mask = data->mask;
+	unsigned int update_mask;
+	int i, cpu;
+
+	/* Sanity check data values */
+	if (data->tokens_max <= 0)
+		data->tokens_max = HZ;
+	if (data->tokens_min < 0)
+		data->tokens_min = HZ / 3;
+	if (data->tokens_min >= data->tokens_max)
+		data->tokens_min = data->tokens_max;
+
+	if (data->prio_bias > MAX_PRIO_BIAS)
+		data->prio_bias = MAX_PRIO_BIAS;
+	if (data->prio_bias < MIN_PRIO_BIAS)
+		data->prio_bias = MIN_PRIO_BIAS;
+
+	spin_lock(&vxi->sched.tokens_lock);
+
+	/* sync up on delayed updates */
+	for_each_cpu_mask(cpu, vxi->sched.update)
+		vx_update_sched_param(&vxi->sched,
+			&vx_per_cpu(vxi, sched_pc, cpu));
+
+	if (set_mask & VXSM_FILL_RATE)
+		vxi->sched.fill_rate[0] = data->fill_rate[0];
+	if (set_mask & VXSM_FILL_RATE2)
+		vxi->sched.fill_rate[1] = data->fill_rate[1];
+	if (set_mask & VXSM_INTERVAL)
+		vxi->sched.interval[0] = (set_mask & VXSM_MSEC) ?
+			msec_to_ticks(data->interval[0]) : data->interval[0];
+	if (set_mask & VXSM_INTERVAL2)
+		vxi->sched.interval[1] = (set_mask & VXSM_MSEC) ?
+			msec_to_ticks(data->interval[1]) : data->interval[1];
+	if (set_mask & VXSM_TOKENS)
+		vxi->sched.tokens = data->tokens;
+	if (set_mask & VXSM_TOKENS_MIN)
+		vxi->sched.tokens_min = data->tokens_min;
+	if (set_mask & VXSM_TOKENS_MAX)
+		vxi->sched.tokens_max = data->tokens_max;
+	if (set_mask & VXSM_PRIO_BIAS)
+		vxi->sched.prio_bias = data->prio_bias;
+
+	/* Sanity check rate/interval */
+	for (i = 0; i < 2; i++) {
+		if (data->fill_rate[i] < 0)
+			data->fill_rate[i] = 0;
+		if (data->interval[i] <= 0)
+			data->interval[i] = HZ;
+	}
+
+	update_mask = vxi->sched.update_mask & VXSM_SET_MASK;
+	update_mask |= (set_mask & (VXSM_SET_MASK | VXSM_IDLE_TIME));
+	vxi->sched.update_mask = update_mask;
+
+#ifdef	CONFIG_SMP
+	rmb();
+	if (set_mask & VXSM_CPU_ID) {
+		vxi->sched.update = cpumask_of_cpu(data->cpu_id);
+		cpus_and(vxi->sched.update, cpu_online_map,
+			vxi->sched.update);
+	} else
+		vxi->sched.update = cpu_online_map;
+
+	/* forced reload? */
+	if (set_mask & VXSM_FORCE) {
+		for_each_cpu_mask(cpu, vxi->sched.update)
+			vx_update_sched_param(&vxi->sched,
+				&vx_per_cpu(vxi, sched_pc, cpu));
+		vxi->sched.update = CPU_MASK_NONE;
+	}
+#else
+	/* on UP we update immediately */
+	vx_update_sched_param(&vxi->sched,
+		&vx_per_cpu(vxi, sched_pc, 0));
+#endif
+
+	spin_unlock(&vxi->sched.tokens_lock);
+	return 0;
+}
+
+
+#define COPY_IDS(C) C(cpu_id); C(bucket_id)
+#define COPY_PRI(C) C(prio_bias)
+#define COPY_TOK(C) C(tokens); C(tokens_min); C(tokens_max)
+#define COPY_FRI(C) C(fill_rate[0]); C(interval[0]);	\
+		    C(fill_rate[1]); C(interval[1]);
+
+#define COPY_VALUE(name) vc_data.name = data->name
+
+static int do_set_sched_v4(struct vx_info *vxi, struct vcmd_set_sched_v4 *data)
+{
+	struct vcmd_sched_v5 vc_data;
+
+	vc_data.mask = data->set_mask;
+	COPY_IDS(COPY_VALUE);
+	COPY_PRI(COPY_VALUE);
+	COPY_TOK(COPY_VALUE);
+	vc_data.fill_rate[0] = vc_data.fill_rate[1] = data->fill_rate;
+	vc_data.interval[0] = vc_data.interval[1] = data->interval;
+	return do_set_sched(vxi, &vc_data);
+}
+
+int vc_set_sched_v4(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_set_sched_v4 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_set_sched_v4(vxi, &vc_data);
+}
+
+	/* latest interface is v5 */
+
+int vc_set_sched(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_sched_v5 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_set_sched(vxi, &vc_data);
+}
+
+
+#define COPY_PRI(C) C(prio_bias)
+#define COPY_TOK(C) C(tokens); C(tokens_min); C(tokens_max)
+#define COPY_FRI(C) C(fill_rate[0]); C(interval[0]);    \
+		    C(fill_rate[1]); C(interval[1]);
+
+#define COPY_VALUE(name) vc_data.name = data->name
+
+
+int vc_get_sched(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_sched_v5 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	if (vc_data.mask & VXSM_CPU_ID) {
+		int cpu = vc_data.cpu_id;
+		struct _vx_sched_pc *data;
+
+		if (!cpu_possible(cpu))
+			return -EINVAL;
+
+		data = &vx_per_cpu(vxi, sched_pc, cpu);
+		COPY_TOK(COPY_VALUE);
+		COPY_PRI(COPY_VALUE);
+		COPY_FRI(COPY_VALUE);
+
+		if (data->flags & VXSF_IDLE_TIME)
+			vc_data.mask |= VXSM_IDLE_TIME;
+	} else {
+		struct _vx_sched *data = &vxi->sched;
+
+		COPY_TOK(COPY_VALUE);
+		COPY_PRI(COPY_VALUE);
+		COPY_FRI(COPY_VALUE);
+	}
+
+	if (vc_data.mask & VXSM_MSEC) {
+		vc_data.interval[0] = ticks_to_msec(vc_data.interval[0]);
+		vc_data.interval[1] = ticks_to_msec(vc_data.interval[1]);
+	}
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+
+int vc_sched_info(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_sched_info vc_data;
+	int cpu;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	cpu = vc_data.cpu_id;
+	if (!cpu_possible(cpu))
+		return -EINVAL;
+
+	if (vxi) {
+		struct _vx_sched_pc *sched_pc =
+			&vx_per_cpu(vxi, sched_pc, cpu);
+
+		vc_data.user_msec = ticks_to_msec(sched_pc->user_ticks);
+		vc_data.sys_msec = ticks_to_msec(sched_pc->sys_ticks);
+		vc_data.hold_msec = ticks_to_msec(sched_pc->hold_ticks);
+		vc_data.vavavoom = sched_pc->vavavoom;
+	}
+	vc_data.token_usec = ticks_to_usec(1);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/sched_init.h linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/sched_init.h
--- linux-2.6.32.17/kernel/vserver/sched_init.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/sched_init.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,50 @@
+
+static inline void vx_info_init_sched(struct _vx_sched *sched)
+{
+	static struct lock_class_key tokens_lock_key;
+
+	/* scheduling; hard code starting values as constants */
+	sched->fill_rate[0]	= 1;
+	sched->interval[0]	= 4;
+	sched->fill_rate[1]	= 1;
+	sched->interval[1]	= 8;
+	sched->tokens		= HZ >> 2;
+	sched->tokens_min	= HZ >> 4;
+	sched->tokens_max	= HZ >> 1;
+	sched->tokens_lock	= SPIN_LOCK_UNLOCKED;
+	sched->prio_bias	= 0;
+
+	lockdep_set_class(&sched->tokens_lock, &tokens_lock_key);
+}
+
+static inline
+void vx_info_init_sched_pc(struct _vx_sched_pc *sched_pc, int cpu)
+{
+	sched_pc->fill_rate[0]	= 1;
+	sched_pc->interval[0]	= 4;
+	sched_pc->fill_rate[1]	= 1;
+	sched_pc->interval[1]	= 8;
+	sched_pc->tokens	= HZ >> 2;
+	sched_pc->tokens_min	= HZ >> 4;
+	sched_pc->tokens_max	= HZ >> 1;
+	sched_pc->prio_bias	= 0;
+	sched_pc->vavavoom	= 0;
+	sched_pc->token_time	= 0;
+	sched_pc->idle_time	= 0;
+	sched_pc->norm_time	= jiffies;
+
+	sched_pc->user_ticks = 0;
+	sched_pc->sys_ticks = 0;
+	sched_pc->hold_ticks = 0;
+}
+
+static inline void vx_info_exit_sched(struct _vx_sched *sched)
+{
+	return;
+}
+
+static inline
+void vx_info_exit_sched_pc(struct _vx_sched_pc *sched_pc, int cpu)
+{
+	return;
+}
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/sched_proc.h linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/sched_proc.h
--- linux-2.6.32.17/kernel/vserver/sched_proc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/sched_proc.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,57 @@
+#ifndef _VX_SCHED_PROC_H
+#define _VX_SCHED_PROC_H
+
+
+static inline
+int vx_info_proc_sched(struct _vx_sched *sched, char *buffer)
+{
+	int length = 0;
+
+	length += sprintf(buffer,
+		"FillRate:\t%8d,%d\n"
+		"Interval:\t%8d,%d\n"
+		"TokensMin:\t%8d\n"
+		"TokensMax:\t%8d\n"
+		"PrioBias:\t%8d\n",
+		sched->fill_rate[0],
+		sched->fill_rate[1],
+		sched->interval[0],
+		sched->interval[1],
+		sched->tokens_min,
+		sched->tokens_max,
+		sched->prio_bias);
+	return length;
+}
+
+static inline
+int vx_info_proc_sched_pc(struct _vx_sched_pc *sched_pc,
+	char *buffer, int cpu)
+{
+	int length = 0;
+
+	length += sprintf(buffer + length,
+		"cpu %d: %lld %lld %lld %ld %ld", cpu,
+		(unsigned long long)sched_pc->user_ticks,
+		(unsigned long long)sched_pc->sys_ticks,
+		(unsigned long long)sched_pc->hold_ticks,
+		sched_pc->token_time,
+		sched_pc->idle_time);
+	length += sprintf(buffer + length,
+		" %c%c %d %d %d %d/%d %d/%d",
+		(sched_pc->flags & VXSF_ONHOLD) ? 'H' : 'R',
+		(sched_pc->flags & VXSF_IDLE_TIME) ? 'I' : '-',
+		sched_pc->tokens,
+		sched_pc->tokens_min,
+		sched_pc->tokens_max,
+		sched_pc->fill_rate[0],
+		sched_pc->interval[0],
+		sched_pc->fill_rate[1],
+		sched_pc->interval[1]);
+	length += sprintf(buffer + length,
+		" %d %d\n",
+		sched_pc->prio_bias,
+		sched_pc->vavavoom);
+	return length;
+}
+
+#endif	/* _VX_SCHED_PROC_H */
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/signal.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/signal.c
--- linux-2.6.32.17/kernel/vserver/signal.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/signal.c	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,132 @@
+/*
+ *  linux/kernel/vserver/signal.c
+ *
+ *  Virtual Server: Signal Support
+ *
+ *  Copyright (C) 2003-2007  Herbert Pötzl
+ *
+ *  V0.01  broken out from vcontext V0.05
+ *  V0.02  changed vcmds to vxi arg
+ *  V0.03  adjusted siginfo for kill
+ *
+ */
+
+#include <asm/uaccess.h>
+
+#include <linux/vs_context.h>
+#include <linux/vs_pid.h>
+#include <linux/vserver/signal_cmd.h>
+
+
+int vx_info_kill(struct vx_info *vxi, int pid, int sig)
+{
+	int retval, count = 0;
+	struct task_struct *p;
+	struct siginfo *sip = SEND_SIG_PRIV;
+
+	retval = -ESRCH;
+	vxdprintk(VXD_CBIT(misc, 4),
+		"vx_info_kill(%p[#%d],%d,%d)*",
+		vxi, vxi->vx_id, pid, sig);
+	read_lock(&tasklist_lock);
+	switch (pid) {
+	case  0:
+	case -1:
+		for_each_process(p) {
+			int err = 0;
+
+			if (vx_task_xid(p) != vxi->vx_id || p->pid <= 1 ||
+				(pid && vxi->vx_initpid == p->pid))
+				continue;
+
+			err = group_send_sig_info(sig, sip, p);
+			++count;
+			if (err != -EPERM)
+				retval = err;
+		}
+		break;
+
+	case 1:
+		if (vxi->vx_initpid) {
+			pid = vxi->vx_initpid;
+			/* for now, only SIGINT to private init ... */
+			if (!vx_info_flags(vxi, VXF_STATE_ADMIN, 0) &&
+				/* ... as long as there are tasks left */
+				(atomic_read(&vxi->vx_tasks) > 1))
+				sig = SIGINT;
+		}
+		/* fallthrough */
+	default:
+		p = find_task_by_real_pid(pid);
+		if (p) {
+			if (vx_task_xid(p) == vxi->vx_id)
+				retval = group_send_sig_info(sig, sip, p);
+		}
+		break;
+	}
+	read_unlock(&tasklist_lock);
+	vxdprintk(VXD_CBIT(misc, 4),
+		"vx_info_kill(%p[#%d],%d,%d,%ld) = %d",
+		vxi, vxi->vx_id, pid, sig, (long)sip, retval);
+	return retval;
+}
+
+int vc_ctx_kill(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_kill_v0 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	/* special check to allow guest shutdown */
+	if (!vx_info_flags(vxi, VXF_STATE_ADMIN, 0) &&
+		/* forbid killall pid=0 when init is present */
+		(((vc_data.pid < 1) && vxi->vx_initpid) ||
+		(vc_data.pid > 1)))
+		return -EACCES;
+
+	return vx_info_kill(vxi, vc_data.pid, vc_data.sig);
+}
+
+
+static int __wait_exit(struct vx_info *vxi)
+{
+	DECLARE_WAITQUEUE(wait, current);
+	int ret = 0;
+
+	add_wait_queue(&vxi->vx_wait, &wait);
+	set_current_state(TASK_INTERRUPTIBLE);
+
+wait:
+	if (vx_info_state(vxi,
+		VXS_SHUTDOWN | VXS_HASHED | VXS_HELPER) == VXS_SHUTDOWN)
+		goto out;
+	if (signal_pending(current)) {
+		ret = -ERESTARTSYS;
+		goto out;
+	}
+	schedule();
+	goto wait;
+
+out:
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&vxi->vx_wait, &wait);
+	return ret;
+}
+
+
+
+int vc_wait_exit(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_wait_exit_v0 vc_data;
+	int ret;
+
+	ret = __wait_exit(vxi);
+	vc_data.reboot_cmd = vxi->reboot_cmd;
+	vc_data.exit_code = vxi->exit_code;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		ret = -EFAULT;
+	return ret;
+}
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/space.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/space.c
--- linux-2.6.32.17/kernel/vserver/space.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/space.c	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,375 @@
+/*
+ *  linux/kernel/vserver/space.c
+ *
+ *  Virtual Server: Context Space Support
+ *
+ *  Copyright (C) 2003-2007  Herbert Pötzl
+ *
+ *  V0.01  broken out from context.c 0.07
+ *  V0.02  added task locking for namespace
+ *  V0.03  broken out vx_enter_namespace
+ *  V0.04  added *space support and commands
+ *
+ */
+
+#include <linux/utsname.h>
+#include <linux/nsproxy.h>
+#include <linux/err.h>
+#include <linux/fs_struct.h>
+#include <asm/uaccess.h>
+
+#include <linux/vs_context.h>
+#include <linux/vserver/space.h>
+#include <linux/vserver/space_cmd.h>
+
+atomic_t vs_global_nsproxy	= ATOMIC_INIT(0);
+atomic_t vs_global_fs		= ATOMIC_INIT(0);
+atomic_t vs_global_mnt_ns	= ATOMIC_INIT(0);
+atomic_t vs_global_uts_ns	= ATOMIC_INIT(0);
+atomic_t vs_global_user_ns	= ATOMIC_INIT(0);
+atomic_t vs_global_pid_ns	= ATOMIC_INIT(0);
+
+
+/* namespace functions */
+
+#include <linux/mnt_namespace.h>
+#include <linux/user_namespace.h>
+#include <linux/pid_namespace.h>
+#include <linux/ipc_namespace.h>
+#include <net/net_namespace.h>
+
+
+static const struct vcmd_space_mask_v1 space_mask_v0 = {
+	.mask = CLONE_FS |
+		CLONE_NEWNS |
+		CLONE_NEWUTS |
+		CLONE_NEWIPC |
+		CLONE_NEWUSER |
+		0
+};
+
+static const struct vcmd_space_mask_v1 space_mask = {
+	.mask = CLONE_FS |
+		CLONE_NEWNS |
+		CLONE_NEWUTS |
+		CLONE_NEWIPC |
+		CLONE_NEWUSER |
+#ifdef	CONFIG_PID_NS
+		CLONE_NEWPID |
+#endif
+#ifdef	CONFIG_NET_NS
+		CLONE_NEWNET |
+#endif
+		0
+};
+
+static const struct vcmd_space_mask_v1 default_space_mask = {
+	.mask = CLONE_FS |
+		CLONE_NEWNS |
+		CLONE_NEWUTS |
+		CLONE_NEWIPC |
+		CLONE_NEWUSER |
+#ifdef	CONFIG_PID_NS
+//		CLONE_NEWPID |
+#endif
+		0
+};
+
+/*
+ *	build a new nsproxy mix
+ *      assumes that both proxies are 'const'
+ *	does not touch nsproxy refcounts
+ *	will hold a reference on the result.
+ */
+
+struct nsproxy *vs_mix_nsproxy(struct nsproxy *old_nsproxy,
+	struct nsproxy *new_nsproxy, unsigned long mask)
+{
+	struct mnt_namespace *old_ns;
+	struct uts_namespace *old_uts;
+	struct ipc_namespace *old_ipc;
+#ifdef	CONFIG_PID_NS
+	struct pid_namespace *old_pid;
+#endif
+#ifdef	CONFIG_NET_NS
+	struct net *old_net;
+#endif
+	struct nsproxy *nsproxy;
+
+	nsproxy = copy_nsproxy(old_nsproxy);
+	if (!nsproxy)
+		goto out;
+
+	if (mask & CLONE_NEWNS) {
+		old_ns = nsproxy->mnt_ns;
+		nsproxy->mnt_ns = new_nsproxy->mnt_ns;
+		if (nsproxy->mnt_ns)
+			get_mnt_ns(nsproxy->mnt_ns);
+	} else
+		old_ns = NULL;
+
+	if (mask & CLONE_NEWUTS) {
+		old_uts = nsproxy->uts_ns;
+		nsproxy->uts_ns = new_nsproxy->uts_ns;
+		if (nsproxy->uts_ns)
+			get_uts_ns(nsproxy->uts_ns);
+	} else
+		old_uts = NULL;
+
+	if (mask & CLONE_NEWIPC) {
+		old_ipc = nsproxy->ipc_ns;
+		nsproxy->ipc_ns = new_nsproxy->ipc_ns;
+		if (nsproxy->ipc_ns)
+			get_ipc_ns(nsproxy->ipc_ns);
+	} else
+		old_ipc = NULL;
+
+#ifdef	CONFIG_PID_NS
+	if (mask & CLONE_NEWPID) {
+		old_pid = nsproxy->pid_ns;
+		nsproxy->pid_ns = new_nsproxy->pid_ns;
+		if (nsproxy->pid_ns)
+			get_pid_ns(nsproxy->pid_ns);
+	} else
+		old_pid = NULL;
+#endif
+#ifdef	CONFIG_NET_NS
+	if (mask & CLONE_NEWNET) {
+		old_net = nsproxy->net_ns;
+		nsproxy->net_ns = new_nsproxy->net_ns;
+		if (nsproxy->net_ns)
+			get_net(nsproxy->net_ns);
+	} else
+		old_net = NULL;
+#endif
+	if (old_ns)
+		put_mnt_ns(old_ns);
+	if (old_uts)
+		put_uts_ns(old_uts);
+	if (old_ipc)
+		put_ipc_ns(old_ipc);
+#ifdef	CONFIG_PID_NS
+	if (old_pid)
+		put_pid_ns(old_pid);
+#endif
+#ifdef	CONFIG_NET_NS
+	if (old_net)
+		put_net(old_net);
+#endif
+out:
+	return nsproxy;
+}
+
+
+/*
+ *	merge two nsproxy structs into a new one.
+ *	will hold a reference on the result.
+ */
+
+static inline
+struct nsproxy *__vs_merge_nsproxy(struct nsproxy *old,
+	struct nsproxy *proxy, unsigned long mask)
+{
+	struct nsproxy null_proxy = { .mnt_ns = NULL };
+
+	if (!proxy)
+		return NULL;
+
+	if (mask) {
+		/* vs_mix_nsproxy returns with reference */
+		return vs_mix_nsproxy(old ? old : &null_proxy,
+			proxy, mask);
+	}
+	get_nsproxy(proxy);
+	return proxy;
+}
+
+
+int vx_enter_space(struct vx_info *vxi, unsigned long mask, unsigned index)
+{
+	struct nsproxy *proxy, *proxy_cur, *proxy_new;
+	struct fs_struct *fs_cur, *fs = NULL;
+	int ret, kill = 0;
+
+	vxdprintk(VXD_CBIT(space, 8), "vx_enter_space(%p[#%u],0x%08lx,%d)",
+		vxi, vxi->vx_id, mask, index);
+
+	if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0))
+		return -EACCES;
+
+	if (!mask)
+		mask = vxi->vx_nsmask[index];
+
+	if ((mask & vxi->vx_nsmask[index]) != mask)
+		return -EINVAL;
+
+	if (mask & CLONE_FS) {
+		fs = copy_fs_struct(vxi->vx_fs[index]);
+		if (!fs)
+			return -ENOMEM;
+	}
+	proxy = vxi->vx_nsproxy[index];
+
+	vxdprintk(VXD_CBIT(space, 9),
+		"vx_enter_space(%p[#%u],0x%08lx,%d) -> (%p,%p)",
+		vxi, vxi->vx_id, mask, index, proxy, fs);
+
+	task_lock(current);
+	fs_cur = current->fs;
+
+	if (mask & CLONE_FS) {
+		write_lock(&fs_cur->lock);
+		current->fs = fs;
+		kill = !--fs_cur->users;
+		write_unlock(&fs_cur->lock);
+	}
+
+	proxy_cur = current->nsproxy;
+	get_nsproxy(proxy_cur);
+	task_unlock(current);
+
+	if (kill)
+		free_fs_struct(fs_cur);
+
+	proxy_new = __vs_merge_nsproxy(proxy_cur, proxy, mask);
+	if (IS_ERR(proxy_new)) {
+		ret = PTR_ERR(proxy_new);
+		goto out_put;
+	}
+
+	proxy_new = xchg(&current->nsproxy, proxy_new);
+	ret = 0;
+
+	if (proxy_new)
+		put_nsproxy(proxy_new);
+out_put:
+	if (proxy_cur)
+		put_nsproxy(proxy_cur);
+	return ret;
+}
+
+
+int vx_set_space(struct vx_info *vxi, unsigned long mask, unsigned index)
+{
+	struct nsproxy *proxy_vxi, *proxy_cur, *proxy_new;
+	struct fs_struct *fs_vxi, *fs;
+	int ret, kill = 0;
+
+	vxdprintk(VXD_CBIT(space, 8), "vx_set_space(%p[#%u],0x%08lx,%d)",
+		vxi, vxi->vx_id, mask, index);
+#if 0
+	if (!mask)
+		mask = default_space_mask.mask;
+#endif
+	if ((mask & space_mask.mask) != mask)
+		return -EINVAL;
+
+	proxy_vxi = vxi->vx_nsproxy[index];
+	fs_vxi = vxi->vx_fs[index];
+
+	if (mask & CLONE_FS) {
+		fs = copy_fs_struct(current->fs);
+		if (!fs)
+			return -ENOMEM;
+	}
+
+	task_lock(current);
+
+	if (mask & CLONE_FS) {
+		write_lock(&fs_vxi->lock);
+		vxi->vx_fs[index] = fs;
+		kill = !--fs_vxi->users;
+		write_unlock(&fs_vxi->lock);
+	}
+
+	proxy_cur = current->nsproxy;
+	get_nsproxy(proxy_cur);
+	task_unlock(current);
+
+	if (kill)
+		free_fs_struct(fs_vxi);
+
+	proxy_new = __vs_merge_nsproxy(proxy_vxi, proxy_cur, mask);
+	if (IS_ERR(proxy_new)) {
+		ret = PTR_ERR(proxy_new);
+		goto out_put;
+	}
+
+	proxy_new = xchg(&vxi->vx_nsproxy[index], proxy_new);
+	vxi->vx_nsmask[index] |= mask;
+	ret = 0;
+
+	if (proxy_new)
+		put_nsproxy(proxy_new);
+out_put:
+	if (proxy_cur)
+		put_nsproxy(proxy_cur);
+	return ret;
+}
+
+
+int vc_enter_space_v1(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_space_mask_v1 vc_data = { .mask = 0 };
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return vx_enter_space(vxi, vc_data.mask, 0);
+}
+
+int vc_enter_space(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_space_mask_v2 vc_data = { .mask = 0 };
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	if (vc_data.index >= VX_SPACES)
+		return -EINVAL;
+
+	return vx_enter_space(vxi, vc_data.mask, vc_data.index);
+}
+
+int vc_set_space_v1(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_space_mask_v1 vc_data = { .mask = 0 };
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return vx_set_space(vxi, vc_data.mask, 0);
+}
+
+int vc_set_space(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_space_mask_v2 vc_data = { .mask = 0 };
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	if (vc_data.index >= VX_SPACES)
+		return -EINVAL;
+
+	return vx_set_space(vxi, vc_data.mask, vc_data.index);
+}
+
+int vc_get_space_mask(void __user *data, int type)
+{
+	const struct vcmd_space_mask_v1 *mask;
+
+	if (type == 0)
+		mask = &space_mask_v0;
+	else if (type == 1)
+		mask = &space_mask;
+	else
+		mask = &default_space_mask;
+
+	vxdprintk(VXD_CBIT(space, 10),
+		"vc_get_space_mask(%d) = %08llx", type, mask->mask);
+
+	if (copy_to_user(data, mask, sizeof(*mask)))
+		return -EFAULT;
+	return 0;
+}
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/switch.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/switch.c
--- linux-2.6.32.17/kernel/vserver/switch.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/switch.c	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,546 @@
+/*
+ *  linux/kernel/vserver/switch.c
+ *
+ *  Virtual Server: Syscall Switch
+ *
+ *  Copyright (C) 2003-2007  Herbert Pötzl
+ *
+ *  V0.01  syscall switch
+ *  V0.02  added signal to context
+ *  V0.03  added rlimit functions
+ *  V0.04  added iattr, task/xid functions
+ *  V0.05  added debug/history stuff
+ *  V0.06  added compat32 layer
+ *  V0.07  vcmd args and perms
+ *  V0.08  added status commands
+ *  V0.09  added tag commands
+ *  V0.10  added oom bias
+ *  V0.11  added device commands
+ *
+ */
+
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vserver/switch.h>
+
+#include "vci_config.h"
+
+
+static inline
+int vc_get_version(uint32_t id)
+{
+	return VCI_VERSION;
+}
+
+static inline
+int vc_get_vci(uint32_t id)
+{
+	return vci_kernel_config();
+}
+
+#include <linux/vserver/context_cmd.h>
+#include <linux/vserver/cvirt_cmd.h>
+#include <linux/vserver/cacct_cmd.h>
+#include <linux/vserver/limit_cmd.h>
+#include <linux/vserver/network_cmd.h>
+#include <linux/vserver/sched_cmd.h>
+#include <linux/vserver/debug_cmd.h>
+#include <linux/vserver/inode_cmd.h>
+#include <linux/vserver/dlimit_cmd.h>
+#include <linux/vserver/signal_cmd.h>
+#include <linux/vserver/space_cmd.h>
+#include <linux/vserver/tag_cmd.h>
+#include <linux/vserver/device_cmd.h>
+
+#include <linux/vserver/inode.h>
+#include <linux/vserver/dlimit.h>
+
+
+#ifdef	CONFIG_COMPAT
+#define __COMPAT(name, id, data, compat)	\
+	(compat) ? name ## _x32(id, data) : name(id, data)
+#define __COMPAT_NO_ID(name, data, compat)	\
+	(compat) ? name ## _x32(data) : name(data)
+#else
+#define __COMPAT(name, id, data, compat)	\
+	name(id, data)
+#define __COMPAT_NO_ID(name, data, compat)	\
+	name(data)
+#endif
+
+
+static inline
+long do_vcmd(uint32_t cmd, uint32_t id,
+	struct vx_info *vxi, struct nx_info *nxi,
+	void __user *data, int compat)
+{
+	switch (cmd) {
+
+	case VCMD_get_version:
+		return vc_get_version(id);
+	case VCMD_get_vci:
+		return vc_get_vci(id);
+
+	case VCMD_task_xid:
+		return vc_task_xid(id);
+	case VCMD_vx_info:
+		return vc_vx_info(vxi, data);
+
+	case VCMD_task_nid:
+		return vc_task_nid(id);
+	case VCMD_nx_info:
+		return vc_nx_info(nxi, data);
+
+	case VCMD_task_tag:
+		return vc_task_tag(id);
+
+	case VCMD_set_space_v1:
+		return vc_set_space_v1(vxi, data);
+	/* this is version 2 */
+	case VCMD_set_space:
+		return vc_set_space(vxi, data);
+
+	case VCMD_get_space_mask_v0:
+		return vc_get_space_mask(data, 0);
+	/* this is version 1 */
+	case VCMD_get_space_mask:
+		return vc_get_space_mask(data, 1);
+
+	case VCMD_get_space_default:
+		return vc_get_space_mask(data, -1);
+
+#ifdef	CONFIG_IA32_EMULATION
+	case VCMD_get_rlimit:
+		return __COMPAT(vc_get_rlimit, vxi, data, compat);
+	case VCMD_set_rlimit:
+		return __COMPAT(vc_set_rlimit, vxi, data, compat);
+#else
+	case VCMD_get_rlimit:
+		return vc_get_rlimit(vxi, data);
+	case VCMD_set_rlimit:
+		return vc_set_rlimit(vxi, data);
+#endif
+	case VCMD_get_rlimit_mask:
+		return vc_get_rlimit_mask(id, data);
+	case VCMD_reset_hits:
+		return vc_reset_hits(vxi, data);
+	case VCMD_reset_minmax:
+		return vc_reset_minmax(vxi, data);
+
+	case VCMD_get_vhi_name:
+		return vc_get_vhi_name(vxi, data);
+	case VCMD_set_vhi_name:
+		return vc_set_vhi_name(vxi, data);
+
+	case VCMD_ctx_stat:
+		return vc_ctx_stat(vxi, data);
+	case VCMD_virt_stat:
+		return vc_virt_stat(vxi, data);
+	case VCMD_sock_stat:
+		return vc_sock_stat(vxi, data);
+	case VCMD_rlimit_stat:
+		return vc_rlimit_stat(vxi, data);
+
+	case VCMD_set_cflags:
+		return vc_set_cflags(vxi, data);
+	case VCMD_get_cflags:
+		return vc_get_cflags(vxi, data);
+
+	/* this is version 1 */
+	case VCMD_set_ccaps:
+		return vc_set_ccaps(vxi, data);
+	/* this is version 1 */
+	case VCMD_get_ccaps:
+		return vc_get_ccaps(vxi, data);
+	case VCMD_set_bcaps:
+		return vc_set_bcaps(vxi, data);
+	case VCMD_get_bcaps:
+		return vc_get_bcaps(vxi, data);
+
+	case VCMD_set_badness:
+		return vc_set_badness(vxi, data);
+	case VCMD_get_badness:
+		return vc_get_badness(vxi, data);
+
+	case VCMD_set_nflags:
+		return vc_set_nflags(nxi, data);
+	case VCMD_get_nflags:
+		return vc_get_nflags(nxi, data);
+
+	case VCMD_set_ncaps:
+		return vc_set_ncaps(nxi, data);
+	case VCMD_get_ncaps:
+		return vc_get_ncaps(nxi, data);
+
+	case VCMD_set_sched_v4:
+		return vc_set_sched_v4(vxi, data);
+	/* this is version 5 */
+	case VCMD_set_sched:
+		return vc_set_sched(vxi, data);
+	case VCMD_get_sched:
+		return vc_get_sched(vxi, data);
+	case VCMD_sched_info:
+		return vc_sched_info(vxi, data);
+
+	case VCMD_add_dlimit:
+		return __COMPAT(vc_add_dlimit, id, data, compat);
+	case VCMD_rem_dlimit:
+		return __COMPAT(vc_rem_dlimit, id, data, compat);
+	case VCMD_set_dlimit:
+		return __COMPAT(vc_set_dlimit, id, data, compat);
+	case VCMD_get_dlimit:
+		return __COMPAT(vc_get_dlimit, id, data, compat);
+
+	case VCMD_ctx_kill:
+		return vc_ctx_kill(vxi, data);
+
+	case VCMD_wait_exit:
+		return vc_wait_exit(vxi, data);
+
+	case VCMD_get_iattr:
+		return __COMPAT_NO_ID(vc_get_iattr, data, compat);
+	case VCMD_set_iattr:
+		return __COMPAT_NO_ID(vc_set_iattr, data, compat);
+
+	case VCMD_fget_iattr:
+		return vc_fget_iattr(id, data);
+	case VCMD_fset_iattr:
+		return vc_fset_iattr(id, data);
+
+	case VCMD_enter_space_v0:
+		return vc_enter_space_v1(vxi, NULL);
+	case VCMD_enter_space_v1:
+		return vc_enter_space_v1(vxi, data);
+	/* this is version 2 */
+	case VCMD_enter_space:
+		return vc_enter_space(vxi, data);
+
+	case VCMD_ctx_create_v0:
+		return vc_ctx_create(id, NULL);
+	case VCMD_ctx_create:
+		return vc_ctx_create(id, data);
+	case VCMD_ctx_migrate_v0:
+		return vc_ctx_migrate(vxi, NULL);
+	case VCMD_ctx_migrate:
+		return vc_ctx_migrate(vxi, data);
+
+	case VCMD_net_create_v0:
+		return vc_net_create(id, NULL);
+	case VCMD_net_create:
+		return vc_net_create(id, data);
+	case VCMD_net_migrate:
+		return vc_net_migrate(nxi, data);
+
+	case VCMD_tag_migrate:
+		return vc_tag_migrate(id);
+
+	case VCMD_net_add:
+		return vc_net_add(nxi, data);
+	case VCMD_net_remove:
+		return vc_net_remove(nxi, data);
+
+	case VCMD_net_add_ipv4:
+		return vc_net_add_ipv4(nxi, data);
+	case VCMD_net_remove_ipv4:
+		return vc_net_remove_ipv4(nxi, data);
+#ifdef	CONFIG_IPV6
+	case VCMD_net_add_ipv6:
+		return vc_net_add_ipv6(nxi, data);
+	case VCMD_net_remove_ipv6:
+		return vc_net_remove_ipv6(nxi, data);
+#endif
+/*	case VCMD_add_match_ipv4:
+		return vc_add_match_ipv4(nxi, data);
+	case VCMD_get_match_ipv4:
+		return vc_get_match_ipv4(nxi, data);
+#ifdef	CONFIG_IPV6
+	case VCMD_add_match_ipv6:
+		return vc_add_match_ipv6(nxi, data);
+	case VCMD_get_match_ipv6:
+		return vc_get_match_ipv6(nxi, data);
+#endif	*/
+
+#ifdef	CONFIG_VSERVER_DEVICE
+	case VCMD_set_mapping:
+		return __COMPAT(vc_set_mapping, vxi, data, compat);
+	case VCMD_unset_mapping:
+		return __COMPAT(vc_unset_mapping, vxi, data, compat);
+#endif
+#ifdef	CONFIG_VSERVER_HISTORY
+	case VCMD_dump_history:
+		return vc_dump_history(id);
+	case VCMD_read_history:
+		return __COMPAT(vc_read_history, id, data, compat);
+#endif
+#ifdef	CONFIG_VSERVER_MONITOR
+	case VCMD_read_monitor:
+		return __COMPAT(vc_read_monitor, id, data, compat);
+#endif
+	default:
+		vxwprintk_task(1, "unimplemented VCMD_%02d_%d[%d]",
+			VC_CATEGORY(cmd), VC_COMMAND(cmd), VC_VERSION(cmd));
+	}
+	return -ENOSYS;
+}
+
+
+#define	__VCMD(vcmd, _perm, _args, _flags)		\
+	case VCMD_ ## vcmd: perm = _perm;		\
+		args = _args; flags = _flags; break
+
+
+#define VCA_NONE	0x00
+#define VCA_VXI		0x01
+#define VCA_NXI		0x02
+
+#define VCF_NONE	0x00
+#define VCF_INFO	0x01
+#define VCF_ADMIN	0x02
+#define VCF_ARES	0x06	/* includes admin */
+#define VCF_SETUP	0x08
+
+#define VCF_ZIDOK	0x10	/* zero id okay */
+
+
+static inline
+long do_vserver(uint32_t cmd, uint32_t id, void __user *data, int compat)
+{
+	long ret;
+	int permit = -1, state = 0;
+	int perm = -1, args = 0, flags = 0;
+	struct vx_info *vxi = NULL;
+	struct nx_info *nxi = NULL;
+
+	switch (cmd) {
+	/* unpriviledged commands */
+	__VCMD(get_version,	 0, VCA_NONE,	0);
+	__VCMD(get_vci,		 0, VCA_NONE,	0);
+	__VCMD(get_rlimit_mask,	 0, VCA_NONE,	0);
+	__VCMD(get_space_mask_v0,0, VCA_NONE,   0);
+	__VCMD(get_space_mask,	 0, VCA_NONE,   0);
+	__VCMD(get_space_default,0, VCA_NONE,   0);
+
+	/* info commands */
+	__VCMD(task_xid,	 2, VCA_NONE,	0);
+	__VCMD(reset_hits,	 2, VCA_VXI,	0);
+	__VCMD(reset_minmax,	 2, VCA_VXI,	0);
+	__VCMD(vx_info,		 3, VCA_VXI,	VCF_INFO);
+	__VCMD(get_bcaps,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(get_ccaps,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(get_cflags,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(get_badness,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(get_vhi_name,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(get_rlimit,	 3, VCA_VXI,	VCF_INFO);
+
+	__VCMD(ctx_stat,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(virt_stat,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(sock_stat,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(rlimit_stat,	 3, VCA_VXI,	VCF_INFO);
+
+	__VCMD(task_nid,	 2, VCA_NONE,	0);
+	__VCMD(nx_info,		 3, VCA_NXI,	VCF_INFO);
+	__VCMD(get_ncaps,	 3, VCA_NXI,	VCF_INFO);
+	__VCMD(get_nflags,	 3, VCA_NXI,	VCF_INFO);
+
+	__VCMD(task_tag,	 2, VCA_NONE,	0);
+
+	__VCMD(get_iattr,	 2, VCA_NONE,	0);
+	__VCMD(fget_iattr,	 2, VCA_NONE,	0);
+	__VCMD(get_dlimit,	 3, VCA_NONE,	VCF_INFO);
+	__VCMD(get_sched,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(sched_info,	 3, VCA_VXI,	VCF_INFO | VCF_ZIDOK);
+
+	/* lower admin commands */
+	__VCMD(wait_exit,	 4, VCA_VXI,	VCF_INFO);
+	__VCMD(ctx_create_v0,	 5, VCA_NONE,	0);
+	__VCMD(ctx_create,	 5, VCA_NONE,	0);
+	__VCMD(ctx_migrate_v0,	 5, VCA_VXI,	VCF_ADMIN);
+	__VCMD(ctx_migrate,	 5, VCA_VXI,	VCF_ADMIN);
+	__VCMD(enter_space_v0,	 5, VCA_VXI,	VCF_ADMIN);
+	__VCMD(enter_space_v1,	 5, VCA_VXI,	VCF_ADMIN);
+	__VCMD(enter_space,	 5, VCA_VXI,	VCF_ADMIN);
+
+	__VCMD(net_create_v0,	 5, VCA_NONE,	0);
+	__VCMD(net_create,	 5, VCA_NONE,	0);
+	__VCMD(net_migrate,	 5, VCA_NXI,	VCF_ADMIN);
+
+	__VCMD(tag_migrate,	 5, VCA_NONE,	VCF_ADMIN);
+
+	/* higher admin commands */
+	__VCMD(ctx_kill,	 6, VCA_VXI,	VCF_ARES);
+	__VCMD(set_space_v1,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(set_space,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+
+	__VCMD(set_ccaps,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(set_bcaps,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(set_cflags,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(set_badness,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+
+	__VCMD(set_vhi_name,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(set_rlimit,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(set_sched,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(set_sched_v4,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+
+	__VCMD(set_ncaps,	 7, VCA_NXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(set_nflags,	 7, VCA_NXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(net_add,		 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(net_remove,	 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(net_add_ipv4,	 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(net_remove_ipv4,	 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
+#ifdef	CONFIG_IPV6
+	__VCMD(net_add_ipv6,	 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(net_remove_ipv6,	 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
+#endif
+	__VCMD(set_iattr,	 7, VCA_NONE,	0);
+	__VCMD(fset_iattr,	 7, VCA_NONE,	0);
+	__VCMD(set_dlimit,	 7, VCA_NONE,	VCF_ARES);
+	__VCMD(add_dlimit,	 8, VCA_NONE,	VCF_ARES);
+	__VCMD(rem_dlimit,	 8, VCA_NONE,	VCF_ARES);
+
+#ifdef	CONFIG_VSERVER_DEVICE
+	__VCMD(set_mapping,	 8, VCA_VXI,    VCF_ARES|VCF_ZIDOK);
+	__VCMD(unset_mapping,	 8, VCA_VXI,	VCF_ARES|VCF_ZIDOK);
+#endif
+	/* debug level admin commands */
+#ifdef	CONFIG_VSERVER_HISTORY
+	__VCMD(dump_history,	 9, VCA_NONE,	0);
+	__VCMD(read_history,	 9, VCA_NONE,	0);
+#endif
+#ifdef	CONFIG_VSERVER_MONITOR
+	__VCMD(read_monitor,	 9, VCA_NONE,	0);
+#endif
+
+	default:
+		perm = -1;
+	}
+
+	vxdprintk(VXD_CBIT(switch, 0),
+		"vc: VCMD_%02d_%d[%d], %d,%p [%d,%d,%x,%x]",
+		VC_CATEGORY(cmd), VC_COMMAND(cmd),
+		VC_VERSION(cmd), id, data, compat,
+		perm, args, flags);
+
+	ret = -ENOSYS;
+	if (perm < 0)
+		goto out;
+
+	state = 1;
+	if (!capable(CAP_CONTEXT))
+		goto out;
+
+	state = 2;
+	/* moved here from the individual commands */
+	ret = -EPERM;
+	if ((perm > 1) && !capable(CAP_SYS_ADMIN))
+		goto out;
+
+	state = 3;
+	/* vcmd involves resource management  */
+	ret = -EPERM;
+	if ((flags & VCF_ARES) && !capable(CAP_SYS_RESOURCE))
+		goto out;
+
+	state = 4;
+	/* various legacy exceptions */
+	switch (cmd) {
+	/* will go away when spectator is a cap */
+	case VCMD_ctx_migrate_v0:
+	case VCMD_ctx_migrate:
+		if (id == 1) {
+			current->xid = 1;
+			ret = 1;
+			goto out;
+		}
+		break;
+
+	/* will go away when spectator is a cap */
+	case VCMD_net_migrate:
+		if (id == 1) {
+			current->nid = 1;
+			ret = 1;
+			goto out;
+		}
+		break;
+	}
+
+	/* vcmds are fine by default */
+	permit = 1;
+
+	/* admin type vcmds require admin ... */
+	if (flags & VCF_ADMIN)
+		permit = vx_check(0, VS_ADMIN) ? 1 : 0;
+
+	/* ... but setup type vcmds override that */
+	if (!permit && (flags & VCF_SETUP))
+		permit = vx_flags(VXF_STATE_SETUP, 0) ? 2 : 0;
+
+	state = 5;
+	ret = -EPERM;
+	if (!permit)
+		goto out;
+
+	state = 6;
+	if (!id && (flags & VCF_ZIDOK))
+		goto skip_id;
+
+	ret = -ESRCH;
+	if (args & VCA_VXI) {
+		vxi = lookup_vx_info(id);
+		if (!vxi)
+			goto out;
+
+		if ((flags & VCF_ADMIN) &&
+			/* special case kill for shutdown */
+			(cmd != VCMD_ctx_kill) &&
+			/* can context be administrated? */
+			!vx_info_flags(vxi, VXF_STATE_ADMIN, 0)) {
+			ret = -EACCES;
+			goto out_vxi;
+		}
+	}
+	state = 7;
+	if (args & VCA_NXI) {
+		nxi = lookup_nx_info(id);
+		if (!nxi)
+			goto out_vxi;
+
+		if ((flags & VCF_ADMIN) &&
+			/* can context be administrated? */
+			!nx_info_flags(nxi, NXF_STATE_ADMIN, 0)) {
+			ret = -EACCES;
+			goto out_nxi;
+		}
+	}
+skip_id:
+	state = 8;
+	ret = do_vcmd(cmd, id, vxi, nxi, data, compat);
+
+out_nxi:
+	if ((args & VCA_NXI) && nxi)
+		put_nx_info(nxi);
+out_vxi:
+	if ((args & VCA_VXI) && vxi)
+		put_vx_info(vxi);
+out:
+	vxdprintk(VXD_CBIT(switch, 1),
+		"vc: VCMD_%02d_%d[%d] = %08lx(%ld) [%d,%d]",
+		VC_CATEGORY(cmd), VC_COMMAND(cmd),
+		VC_VERSION(cmd), ret, ret, state, permit);
+	return ret;
+}
+
+asmlinkage long
+sys_vserver(uint32_t cmd, uint32_t id, void __user *data)
+{
+	return do_vserver(cmd, id, data, 0);
+}
+
+#ifdef	CONFIG_COMPAT
+
+asmlinkage long
+sys32_vserver(uint32_t cmd, uint32_t id, void __user *data)
+{
+	return do_vserver(cmd, id, data, 1);
+}
+
+#endif	/* CONFIG_COMPAT */
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/sysctl.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/sysctl.c
--- linux-2.6.32.17/kernel/vserver/sysctl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/sysctl.c	2010-01-13 15:21:01.000000000 +0100
@@ -0,0 +1,245 @@
+/*
+ *  kernel/vserver/sysctl.c
+ *
+ *  Virtual Context Support
+ *
+ *  Copyright (C) 2004-2007  Herbert Pötzl
+ *
+ *  V0.01  basic structure
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/sysctl.h>
+#include <linux/parser.h>
+#include <asm/uaccess.h>
+
+
+enum {
+	CTL_DEBUG_ERROR		= 0,
+	CTL_DEBUG_SWITCH	= 1,
+	CTL_DEBUG_XID,
+	CTL_DEBUG_NID,
+	CTL_DEBUG_TAG,
+	CTL_DEBUG_NET,
+	CTL_DEBUG_LIMIT,
+	CTL_DEBUG_CRES,
+	CTL_DEBUG_DLIM,
+	CTL_DEBUG_QUOTA,
+	CTL_DEBUG_CVIRT,
+	CTL_DEBUG_SPACE,
+	CTL_DEBUG_MISC,
+};
+
+
+unsigned int vx_debug_switch	= 0;
+unsigned int vx_debug_xid	= 0;
+unsigned int vx_debug_nid	= 0;
+unsigned int vx_debug_tag	= 0;
+unsigned int vx_debug_net	= 0;
+unsigned int vx_debug_limit	= 0;
+unsigned int vx_debug_cres	= 0;
+unsigned int vx_debug_dlim	= 0;
+unsigned int vx_debug_quota	= 0;
+unsigned int vx_debug_cvirt	= 0;
+unsigned int vx_debug_space	= 0;
+unsigned int vx_debug_misc	= 0;
+
+
+static struct ctl_table_header *vserver_table_header;
+static ctl_table vserver_root_table[];
+
+
+void vserver_register_sysctl(void)
+{
+	if (!vserver_table_header) {
+		vserver_table_header = register_sysctl_table(vserver_root_table);
+	}
+
+}
+
+void vserver_unregister_sysctl(void)
+{
+	if (vserver_table_header) {
+		unregister_sysctl_table(vserver_table_header);
+		vserver_table_header = NULL;
+	}
+}
+
+
+static int proc_dodebug(ctl_table *table, int write,
+	void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	char		tmpbuf[20], *p, c;
+	unsigned int	value;
+	size_t		left, len;
+
+	if ((*ppos && !write) || !*lenp) {
+		*lenp = 0;
+		return 0;
+	}
+
+	left = *lenp;
+
+	if (write) {
+		if (!access_ok(VERIFY_READ, buffer, left))
+			return -EFAULT;
+		p = (char *)buffer;
+		while (left && __get_user(c, p) >= 0 && isspace(c))
+			left--, p++;
+		if (!left)
+			goto done;
+
+		if (left > sizeof(tmpbuf) - 1)
+			return -EINVAL;
+		if (copy_from_user(tmpbuf, p, left))
+			return -EFAULT;
+		tmpbuf[left] = '\0';
+
+		for (p = tmpbuf, value = 0; '0' <= *p && *p <= '9'; p++, left--)
+			value = 10 * value + (*p - '0');
+		if (*p && !isspace(*p))
+			return -EINVAL;
+		while (left && isspace(*p))
+			left--, p++;
+		*(unsigned int *)table->data = value;
+	} else {
+		if (!access_ok(VERIFY_WRITE, buffer, left))
+			return -EFAULT;
+		len = sprintf(tmpbuf, "%d", *(unsigned int *)table->data);
+		if (len > left)
+			len = left;
+		if (__copy_to_user(buffer, tmpbuf, len))
+			return -EFAULT;
+		if ((left -= len) > 0) {
+			if (put_user('\n', (char *)buffer + len))
+				return -EFAULT;
+			left--;
+		}
+	}
+
+done:
+	*lenp -= left;
+	*ppos += *lenp;
+	return 0;
+}
+
+static int zero;
+
+#define	CTL_ENTRY(ctl, name)				\
+	{						\
+		.ctl_name	= ctl,			\
+		.procname	= #name,		\
+		.data		= &vx_ ## name,		\
+		.maxlen		= sizeof(int),		\
+		.mode		= 0644,			\
+		.proc_handler	= &proc_dodebug,	\
+		.strategy	= &sysctl_intvec,	\
+		.extra1		= &zero,		\
+		.extra2		= &zero,		\
+	}
+
+static ctl_table vserver_debug_table[] = {
+	CTL_ENTRY(CTL_DEBUG_SWITCH,	debug_switch),
+	CTL_ENTRY(CTL_DEBUG_XID,	debug_xid),
+	CTL_ENTRY(CTL_DEBUG_NID,	debug_nid),
+	CTL_ENTRY(CTL_DEBUG_TAG,	debug_tag),
+	CTL_ENTRY(CTL_DEBUG_NET,	debug_net),
+	CTL_ENTRY(CTL_DEBUG_LIMIT,	debug_limit),
+	CTL_ENTRY(CTL_DEBUG_CRES,	debug_cres),
+	CTL_ENTRY(CTL_DEBUG_DLIM,	debug_dlim),
+	CTL_ENTRY(CTL_DEBUG_QUOTA,	debug_quota),
+	CTL_ENTRY(CTL_DEBUG_CVIRT,	debug_cvirt),
+	CTL_ENTRY(CTL_DEBUG_SPACE,	debug_space),
+	CTL_ENTRY(CTL_DEBUG_MISC,	debug_misc),
+	{ .ctl_name = 0 }
+};
+
+static ctl_table vserver_root_table[] = {
+	{
+		.ctl_name	= CTL_VSERVER,
+		.procname	= "vserver",
+		.mode		= 0555,
+		.child		= vserver_debug_table
+	},
+	{ .ctl_name = 0 }
+};
+
+
+static match_table_t tokens = {
+	{ CTL_DEBUG_SWITCH,	"switch=%x"	},
+	{ CTL_DEBUG_XID,	"xid=%x"	},
+	{ CTL_DEBUG_NID,	"nid=%x"	},
+	{ CTL_DEBUG_TAG,	"tag=%x"	},
+	{ CTL_DEBUG_NET,	"net=%x"	},
+	{ CTL_DEBUG_LIMIT,	"limit=%x"	},
+	{ CTL_DEBUG_CRES,	"cres=%x"	},
+	{ CTL_DEBUG_DLIM,	"dlim=%x"	},
+	{ CTL_DEBUG_QUOTA,	"quota=%x"	},
+	{ CTL_DEBUG_CVIRT,	"cvirt=%x"	},
+	{ CTL_DEBUG_SPACE,	"space=%x"	},
+	{ CTL_DEBUG_MISC,	"misc=%x"	},
+	{ CTL_DEBUG_ERROR,	NULL		}
+};
+
+#define	HANDLE_CASE(id, name, val)				\
+	case CTL_DEBUG_ ## id:					\
+		vx_debug_ ## name = val;			\
+		printk("vs_debug_" #name "=0x%x\n", val);	\
+		break
+
+
+static int __init vs_debug_setup(char *str)
+{
+	char *p;
+	int token;
+
+	printk("vs_debug_setup(%s)\n", str);
+	while ((p = strsep(&str, ",")) != NULL) {
+		substring_t args[MAX_OPT_ARGS];
+		unsigned int value;
+
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		value = (token > 0) ? simple_strtoul(args[0].from, NULL, 0) : 0;
+
+		switch (token) {
+		HANDLE_CASE(SWITCH, switch, value);
+		HANDLE_CASE(XID,    xid,    value);
+		HANDLE_CASE(NID,    nid,    value);
+		HANDLE_CASE(TAG,    tag,    value);
+		HANDLE_CASE(NET,    net,    value);
+		HANDLE_CASE(LIMIT,  limit,  value);
+		HANDLE_CASE(CRES,   cres,   value);
+		HANDLE_CASE(DLIM,   dlim,   value);
+		HANDLE_CASE(QUOTA,  quota,  value);
+		HANDLE_CASE(CVIRT,  cvirt,  value);
+		HANDLE_CASE(SPACE,  space,  value);
+		HANDLE_CASE(MISC,   misc,   value);
+		default:
+			return -EINVAL;
+			break;
+		}
+	}
+	return 1;
+}
+
+__setup("vsdebug=", vs_debug_setup);
+
+
+
+EXPORT_SYMBOL_GPL(vx_debug_switch);
+EXPORT_SYMBOL_GPL(vx_debug_xid);
+EXPORT_SYMBOL_GPL(vx_debug_nid);
+EXPORT_SYMBOL_GPL(vx_debug_net);
+EXPORT_SYMBOL_GPL(vx_debug_limit);
+EXPORT_SYMBOL_GPL(vx_debug_cres);
+EXPORT_SYMBOL_GPL(vx_debug_dlim);
+EXPORT_SYMBOL_GPL(vx_debug_quota);
+EXPORT_SYMBOL_GPL(vx_debug_cvirt);
+EXPORT_SYMBOL_GPL(vx_debug_space);
+EXPORT_SYMBOL_GPL(vx_debug_misc);
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/tag.c linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/tag.c
--- linux-2.6.32.17/kernel/vserver/tag.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/tag.c	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,63 @@
+/*
+ *  linux/kernel/vserver/tag.c
+ *
+ *  Virtual Server: Shallow Tag Space
+ *
+ *  Copyright (C) 2007  Herbert Pötzl
+ *
+ *  V0.01  basic implementation
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/vserver/debug.h>
+#include <linux/vs_pid.h>
+#include <linux/vs_tag.h>
+
+#include <linux/vserver/tag_cmd.h>
+
+
+int dx_migrate_task(struct task_struct *p, tag_t tag)
+{
+	if (!p)
+		BUG();
+
+	vxdprintk(VXD_CBIT(tag, 5),
+		"dx_migrate_task(%p[#%d],#%d)", p, p->tag, tag);
+
+	task_lock(p);
+	p->tag = tag;
+	task_unlock(p);
+
+	vxdprintk(VXD_CBIT(tag, 5),
+		"moved task %p into [#%d]", p, tag);
+	return 0;
+}
+
+/* vserver syscall commands below here */
+
+/* taks xid and vx_info functions */
+
+
+int vc_task_tag(uint32_t id)
+{
+	tag_t tag;
+
+	if (id) {
+		struct task_struct *tsk;
+		read_lock(&tasklist_lock);
+		tsk = find_task_by_real_pid(id);
+		tag = (tsk) ? tsk->tag : -ESRCH;
+		read_unlock(&tasklist_lock);
+	} else
+		tag = dx_current_tag();
+	return tag;
+}
+
+
+int vc_tag_migrate(uint32_t tag)
+{
+	return dx_migrate_task(current, tag & 0xFFFF);
+}
+
+
diff -NurpP --minimal linux-2.6.32.17/kernel/vserver/vci_config.h linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/vci_config.h
--- linux-2.6.32.17/kernel/vserver/vci_config.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/kernel/vserver/vci_config.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,81 @@
+
+/*  interface version */
+
+#define VCI_VERSION		0x00020305
+
+
+enum {
+	VCI_KCBIT_NO_DYNAMIC = 0,
+
+	VCI_KCBIT_PROC_SECURE = 4,
+	VCI_KCBIT_HARDCPU = 5,
+	VCI_KCBIT_IDLELIMIT = 6,
+	VCI_KCBIT_IDLETIME = 7,
+
+	VCI_KCBIT_COWBL = 8,
+	VCI_KCBIT_FULLCOWBL = 9,
+	VCI_KCBIT_SPACES = 10,
+	VCI_KCBIT_NETV2 = 11,
+
+	VCI_KCBIT_DEBUG = 16,
+	VCI_KCBIT_HISTORY = 20,
+	VCI_KCBIT_TAGGED = 24,
+	VCI_KCBIT_PPTAG = 28,
+
+	VCI_KCBIT_MORE = 31,
+};
+
+
+static inline uint32_t vci_kernel_config(void)
+{
+	return
+	(1 << VCI_KCBIT_NO_DYNAMIC) |
+
+	/* configured features */
+#ifdef	CONFIG_VSERVER_PROC_SECURE
+	(1 << VCI_KCBIT_PROC_SECURE) |
+#endif
+#ifdef	CONFIG_VSERVER_HARDCPU
+	(1 << VCI_KCBIT_HARDCPU) |
+#endif
+#ifdef	CONFIG_VSERVER_IDLELIMIT
+	(1 << VCI_KCBIT_IDLELIMIT) |
+#endif
+#ifdef	CONFIG_VSERVER_IDLETIME
+	(1 << VCI_KCBIT_IDLETIME) |
+#endif
+#ifdef	CONFIG_VSERVER_COWBL
+	(1 << VCI_KCBIT_COWBL) |
+	(1 << VCI_KCBIT_FULLCOWBL) |
+#endif
+	(1 << VCI_KCBIT_SPACES) |
+	(1 << VCI_KCBIT_NETV2) |
+
+	/* debug options */
+#ifdef	CONFIG_VSERVER_DEBUG
+	(1 << VCI_KCBIT_DEBUG) |
+#endif
+#ifdef	CONFIG_VSERVER_HISTORY
+	(1 << VCI_KCBIT_HISTORY) |
+#endif
+
+	/* inode context tagging */
+#if	defined(CONFIG_TAGGING_NONE)
+	(0 << VCI_KCBIT_TAGGED) |
+#elif	defined(CONFIG_TAGGING_UID16)
+	(1 << VCI_KCBIT_TAGGED) |
+#elif	defined(CONFIG_TAGGING_GID16)
+	(2 << VCI_KCBIT_TAGGED) |
+#elif	defined(CONFIG_TAGGING_ID24)
+	(3 << VCI_KCBIT_TAGGED) |
+#elif	defined(CONFIG_TAGGING_INTERN)
+	(4 << VCI_KCBIT_TAGGED) |
+#elif	defined(CONFIG_TAGGING_RUNTIME)
+	(5 << VCI_KCBIT_TAGGED) |
+#else
+	(7 << VCI_KCBIT_TAGGED) |
+#endif
+	(1 << VCI_KCBIT_PPTAG) |
+	0;
+}
+
diff -NurpP --minimal linux-2.6.32.17/Makefile linux-2.6.32.17-vs2.3.0.36.29.4/Makefile
--- linux-2.6.32.17/Makefile	2010-08-09 18:04:07.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/Makefile	2010-08-09 18:25:13.000000000 +0200
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 32
-EXTRAVERSION = .17
+EXTRAVERSION = .17-vs2.3.0.36.29.4
 NAME = Man-Eating Seals of Antiquity
 
 # *DOCUMENTATION*
diff -NurpP --minimal linux-2.6.32.17/mm/allocpercpu.c linux-2.6.32.17-vs2.3.0.36.29.4/mm/allocpercpu.c
--- linux-2.6.32.17/mm/allocpercpu.c	2009-12-03 20:02:58.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/mm/allocpercpu.c	2009-12-03 20:04:56.000000000 +0100
@@ -160,12 +160,14 @@ EXPORT_SYMBOL(__per_cpu_offset);
 
 void __init setup_per_cpu_areas(void)
 {
-	unsigned long size, i;
+	unsigned long size, vspc, i;
 	char *ptr;
 	unsigned long nr_possible_cpus = num_possible_cpus();
 
+	vspc = PERCPU_PERCTX * CONFIG_VSERVER_CONTEXTS;
+
 	/* Copy section for each CPU (we discard the original) */
-	size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
+	size = ALIGN(PERCPU_ENOUGH_ROOM + vspc, PAGE_SIZE);
 	ptr = alloc_bootmem_pages(size * nr_possible_cpus);
 
 	for_each_possible_cpu(i) {
diff -NurpP --minimal linux-2.6.32.17/mm/filemap_xip.c linux-2.6.32.17-vs2.3.0.36.29.4/mm/filemap_xip.c
--- linux-2.6.32.17/mm/filemap_xip.c	2009-12-03 20:02:58.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/mm/filemap_xip.c	2009-12-03 20:04:56.000000000 +0100
@@ -17,6 +17,7 @@
 #include <linux/sched.h>
 #include <linux/seqlock.h>
 #include <linux/mutex.h>
+#include <linux/vs_memory.h>
 #include <asm/tlbflush.h>
 #include <asm/io.h>
 
diff -NurpP --minimal linux-2.6.32.17/mm/fremap.c linux-2.6.32.17-vs2.3.0.36.29.4/mm/fremap.c
--- linux-2.6.32.17/mm/fremap.c	2009-03-24 14:22:45.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/mm/fremap.c	2009-12-03 20:04:56.000000000 +0100
@@ -16,6 +16,7 @@
 #include <linux/module.h>
 #include <linux/syscalls.h>
 #include <linux/mmu_notifier.h>
+#include <linux/vs_memory.h>
 
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
diff -NurpP --minimal linux-2.6.32.17/mm/hugetlb.c linux-2.6.32.17-vs2.3.0.36.29.4/mm/hugetlb.c
--- linux-2.6.32.17/mm/hugetlb.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/mm/hugetlb.c	2010-07-08 02:22:31.000000000 +0200
@@ -24,6 +24,7 @@
 #include <asm/io.h>
 
 #include <linux/hugetlb.h>
+#include <linux/vs_memory.h>
 #include "internal.h"
 
 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
diff -NurpP --minimal linux-2.6.32.17/mm/memcontrol.c linux-2.6.32.17-vs2.3.0.36.29.4/mm/memcontrol.c
--- linux-2.6.32.17/mm/memcontrol.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/mm/memcontrol.c	2010-05-21 13:20:19.000000000 +0200
@@ -549,6 +549,31 @@ struct mem_cgroup *mem_cgroup_from_task(
 				struct mem_cgroup, css);
 }
 
+u64 mem_cgroup_res_read_u64(struct mem_cgroup *mem, int member)
+{
+	return res_counter_read_u64(&mem->res, member);
+}
+
+u64 mem_cgroup_memsw_read_u64(struct mem_cgroup *mem, int member)
+{
+	return res_counter_read_u64(&mem->memsw, member);
+}
+
+s64 mem_cgroup_stat_read_cache(struct mem_cgroup *mem)
+{
+	return mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE);
+}
+
+s64 mem_cgroup_stat_read_anon(struct mem_cgroup *mem)
+{
+	return mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
+}
+
+s64 mem_cgroup_stat_read_mapped(struct mem_cgroup *mem)
+{
+	return mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE);
+}
+
 static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
 {
 	struct mem_cgroup *mem = NULL;
diff -NurpP --minimal linux-2.6.32.17/mm/memory.c linux-2.6.32.17-vs2.3.0.36.29.4/mm/memory.c
--- linux-2.6.32.17/mm/memory.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/mm/memory.c	2009-12-30 00:58:19.000000000 +0100
@@ -56,6 +56,7 @@
 #include <linux/kallsyms.h>
 #include <linux/swapops.h>
 #include <linux/elf.h>
+// #include <linux/vs_memory.h>
 
 #include <asm/io.h>
 #include <asm/pgalloc.h>
@@ -647,6 +648,9 @@ static int copy_pte_range(struct mm_stru
 	int progress = 0;
 	int rss[2];
 
+	if (!vx_rss_avail(dst_mm, ((end - addr)/PAGE_SIZE + 1)))
+		return -ENOMEM;
+
 again:
 	rss[1] = rss[0] = 0;
 	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
@@ -2645,6 +2649,8 @@ static int do_anonymous_page(struct mm_s
 	/* Allocate our own private page. */
 	pte_unmap(page_table);
 
+	if (!vx_rss_avail(mm, 1))
+		goto oom;
 	if (unlikely(anon_vma_prepare(vma)))
 		goto oom;
 	page = alloc_zeroed_user_highpage_movable(vma, address);
@@ -2936,6 +2942,7 @@ static inline int handle_pte_fault(struc
 {
 	pte_t entry;
 	spinlock_t *ptl;
+	int ret = 0, type = VXPT_UNKNOWN;
 
 	entry = *pte;
 	if (!pte_present(entry)) {
@@ -2960,9 +2967,12 @@ static inline int handle_pte_fault(struc
 	if (unlikely(!pte_same(*pte, entry)))
 		goto unlock;
 	if (flags & FAULT_FLAG_WRITE) {
-		if (!pte_write(entry))
-			return do_wp_page(mm, vma, address,
+		if (!pte_write(entry)) {
+			ret = do_wp_page(mm, vma, address,
 					pte, pmd, ptl, entry);
+			type = VXPT_WRITE;
+			goto out;
+		}
 		entry = pte_mkdirty(entry);
 	}
 	entry = pte_mkyoung(entry);
@@ -2980,7 +2990,10 @@ static inline int handle_pte_fault(struc
 	}
 unlock:
 	pte_unmap_unlock(pte, ptl);
-	return 0;
+	ret = 0;
+out:
+	vx_page_fault(mm, vma, type, ret);
+	return ret;
 }
 
 /*
diff -NurpP --minimal linux-2.6.32.17/mm/mlock.c linux-2.6.32.17-vs2.3.0.36.29.4/mm/mlock.c
--- linux-2.6.32.17/mm/mlock.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/mm/mlock.c	2010-01-13 14:33:47.000000000 +0100
@@ -18,6 +18,7 @@
 #include <linux/rmap.h>
 #include <linux/mmzone.h>
 #include <linux/hugetlb.h>
+#include <linux/vs_memory.h>
 
 #include "internal.h"
 
@@ -401,7 +402,7 @@ success:
 	nr_pages = (end - start) >> PAGE_SHIFT;
 	if (!lock)
 		nr_pages = -nr_pages;
-	mm->locked_vm += nr_pages;
+	vx_vmlocked_add(mm, nr_pages);
 
 	/*
 	 * vm_flags is protected by the mmap_sem held in write mode.
@@ -474,7 +475,7 @@ static int do_mlock(unsigned long start,
 
 SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
 {
-	unsigned long locked;
+	unsigned long locked, grow;
 	unsigned long lock_limit;
 	int error = -ENOMEM;
 
@@ -487,8 +488,10 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
 	len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
 	start &= PAGE_MASK;
 
-	locked = len >> PAGE_SHIFT;
-	locked += current->mm->locked_vm;
+	grow = len >> PAGE_SHIFT;
+	if (!vx_vmlocked_avail(current->mm, grow))
+		goto out;
+	locked = current->mm->locked_vm + grow;
 
 	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
 	lock_limit >>= PAGE_SHIFT;
@@ -496,6 +499,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
 	/* check against resource limits */
 	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
 		error = do_mlock(start, len, 1);
+out:
 	up_write(&current->mm->mmap_sem);
 	return error;
 }
@@ -557,6 +561,8 @@ SYSCALL_DEFINE1(mlockall, int, flags)
 	lock_limit >>= PAGE_SHIFT;
 
 	ret = -ENOMEM;
+	if (!vx_vmlocked_avail(current->mm, current->mm->total_vm))
+		goto out;
 	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
 	    capable(CAP_IPC_LOCK))
 		ret = do_mlockall(flags);
@@ -631,8 +637,10 @@ int account_locked_memory(struct mm_stru
 	if (lim < vm)
 		goto out;
 
-	mm->total_vm  += pgsz;
-	mm->locked_vm += pgsz;
+	// mm->total_vm  += pgsz;
+	vx_vmpages_add(mm, pgsz);
+	// mm->locked_vm += pgsz;
+	vx_vmlocked_add(mm, pgsz);
 
 	error = 0;
  out:
@@ -646,8 +654,10 @@ void refund_locked_memory(struct mm_stru
 
 	down_write(&mm->mmap_sem);
 
-	mm->total_vm  -= pgsz;
-	mm->locked_vm -= pgsz;
+	// mm->total_vm  -= pgsz;
+	vx_vmpages_sub(mm, pgsz);
+	// mm->locked_vm -= pgsz;
+	vx_vmlocked_sub(mm, pgsz);
 
 	up_write(&mm->mmap_sem);
 }
diff -NurpP --minimal linux-2.6.32.17/mm/mmap.c linux-2.6.32.17-vs2.3.0.36.29.4/mm/mmap.c
--- linux-2.6.32.17/mm/mmap.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/mm/mmap.c	2010-01-20 04:21:33.000000000 +0100
@@ -1214,7 +1214,8 @@ munmap_back:
 out:
 	perf_event_mmap(vma);
 
-	mm->total_vm += len >> PAGE_SHIFT;
+	// mm->total_vm += len >> PAGE_SHIFT;
+	vx_vmpages_add(mm, len >> PAGE_SHIFT);
 	vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
 	if (vm_flags & VM_LOCKED) {
 		/*
@@ -1223,7 +1224,8 @@ out:
 		long nr_pages = mlock_vma_pages_range(vma, addr, addr + len);
 		if (nr_pages < 0)
 			return nr_pages;	/* vma gone! */
-		mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages;
+		// mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages;
+		vx_vmlocked_add(mm, (len >> PAGE_SHIFT) - nr_pages);
 	} else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
 		make_pages_present(addr, addr + len);
 	return addr;
@@ -1578,9 +1580,9 @@ static int acct_stack_growth(struct vm_a
 		return -ENOMEM;
 
 	/* Ok, everything looks good - let it rip */
-	mm->total_vm += grow;
+	vx_vmpages_add(mm, grow);
 	if (vma->vm_flags & VM_LOCKED)
-		mm->locked_vm += grow;
+		vx_vmlocked_add(mm, grow);
 	vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
 	return 0;
 }
@@ -1755,7 +1757,8 @@ static void remove_vma_list(struct mm_st
 	do {
 		long nrpages = vma_pages(vma);
 
-		mm->total_vm -= nrpages;
+		// mm->total_vm -= nrpages;
+		vx_vmpages_sub(mm, nrpages);
 		vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
 		vma = remove_vma(vma);
 	} while (vma);
@@ -1927,7 +1930,8 @@ int do_munmap(struct mm_struct *mm, unsi
 		struct vm_area_struct *tmp = vma;
 		while (tmp && tmp->vm_start < end) {
 			if (tmp->vm_flags & VM_LOCKED) {
-				mm->locked_vm -= vma_pages(tmp);
+				// mm->locked_vm -= vma_pages(tmp);
+				vx_vmlocked_sub(mm, vma_pages(tmp));
 				munlock_vma_pages_all(tmp);
 			}
 			tmp = tmp->vm_next;
@@ -2010,6 +2014,8 @@ unsigned long do_brk(unsigned long addr,
 		lock_limit >>= PAGE_SHIFT;
 		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
 			return -EAGAIN;
+		if (!vx_vmlocked_avail(mm, len >> PAGE_SHIFT))
+			return -ENOMEM;
 	}
 
 	/*
@@ -2036,7 +2042,8 @@ unsigned long do_brk(unsigned long addr,
 	if (mm->map_count > sysctl_max_map_count)
 		return -ENOMEM;
 
-	if (security_vm_enough_memory(len >> PAGE_SHIFT))
+	if (security_vm_enough_memory(len >> PAGE_SHIFT) ||
+		!vx_vmpages_avail(mm, len >> PAGE_SHIFT))
 		return -ENOMEM;
 
 	/* Can we just expand an old private anonymous mapping? */
@@ -2062,10 +2069,13 @@ unsigned long do_brk(unsigned long addr,
 	vma->vm_page_prot = vm_get_page_prot(flags);
 	vma_link(mm, vma, prev, rb_link, rb_parent);
 out:
-	mm->total_vm += len >> PAGE_SHIFT;
+	// mm->total_vm += len >> PAGE_SHIFT;
+	vx_vmpages_add(mm, len >> PAGE_SHIFT);
+
 	if (flags & VM_LOCKED) {
 		if (!mlock_vma_pages_range(vma, addr, addr + len))
-			mm->locked_vm += (len >> PAGE_SHIFT);
+			// mm->locked_vm += (len >> PAGE_SHIFT);
+			vx_vmlocked_add(mm, len >> PAGE_SHIFT);
 	}
 	return addr;
 }
@@ -2109,6 +2119,11 @@ void exit_mmap(struct mm_struct *mm)
 	free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
 	tlb_finish_mmu(tlb, 0, end);
 
+	set_mm_counter(mm, file_rss, 0);
+	set_mm_counter(mm, anon_rss, 0);
+	vx_vmpages_sub(mm, mm->total_vm);
+	vx_vmlocked_sub(mm, mm->locked_vm);
+
 	/*
 	 * Walk the list again, actually closing and freeing it,
 	 * with preemption enabled, without holding any MM locks.
@@ -2148,7 +2163,8 @@ int insert_vm_struct(struct mm_struct * 
 	if (__vma && __vma->vm_start < vma->vm_end)
 		return -ENOMEM;
 	if ((vma->vm_flags & VM_ACCOUNT) &&
-	     security_vm_enough_memory_mm(mm, vma_pages(vma)))
+		(security_vm_enough_memory_mm(mm, vma_pages(vma)) ||
+		!vx_vmpages_avail(mm, vma_pages(vma))))
 		return -ENOMEM;
 	vma_link(mm, vma, prev, rb_link, rb_parent);
 	return 0;
@@ -2224,6 +2240,8 @@ int may_expand_vm(struct mm_struct *mm, 
 
 	if (cur + npages > lim)
 		return 0;
+	if (!vx_vmpages_avail(mm, npages))
+		return 0;
 	return 1;
 }
 
@@ -2301,7 +2319,7 @@ int install_special_mapping(struct mm_st
 		return -ENOMEM;
 	}
 
-	mm->total_vm += len >> PAGE_SHIFT;
+	vx_vmpages_add(mm, len >> PAGE_SHIFT);
 
 	perf_event_mmap(vma);
 
diff -NurpP --minimal linux-2.6.32.17/mm/mremap.c linux-2.6.32.17-vs2.3.0.36.29.4/mm/mremap.c
--- linux-2.6.32.17/mm/mremap.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/mm/mremap.c	2010-01-20 04:28:22.000000000 +0100
@@ -20,6 +20,7 @@
 #include <linux/security.h>
 #include <linux/syscalls.h>
 #include <linux/mmu_notifier.h>
+#include <linux/vs_memory.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -234,7 +235,7 @@ static unsigned long move_vma(struct vm_
 	 * If this were a serious issue, we'd add a flag to do_munmap().
 	 */
 	hiwater_vm = mm->hiwater_vm;
-	mm->total_vm += new_len >> PAGE_SHIFT;
+	vx_vmpages_add(mm, new_len >> PAGE_SHIFT);
 	vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
 
 	if (do_munmap(mm, old_addr, old_len) < 0) {
@@ -252,7 +253,7 @@ static unsigned long move_vma(struct vm_
 	}
 
 	if (vm_flags & VM_LOCKED) {
-		mm->locked_vm += new_len >> PAGE_SHIFT;
+		vx_vmlocked_add(mm, new_len >> PAGE_SHIFT);
 		if (new_len > old_len)
 			mlock_vma_pages_range(new_vma, new_addr + old_len,
 						       new_addr + new_len);
@@ -463,10 +464,12 @@ unsigned long do_mremap(unsigned long ad
 			vma_adjust(vma, vma->vm_start,
 				addr + new_len, vma->vm_pgoff, NULL);
 
-			mm->total_vm += pages;
+			// mm->total_vm += pages;
+			vx_vmpages_add(mm, pages);
 			vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
 			if (vma->vm_flags & VM_LOCKED) {
-				mm->locked_vm += pages;
+				// mm->locked_vm += pages;
+				vx_vmlocked_add(mm, pages);
 				mlock_vma_pages_range(vma, addr + old_len,
 						   addr + new_len);
 			}
diff -NurpP --minimal linux-2.6.32.17/mm/nommu.c linux-2.6.32.17-vs2.3.0.36.29.4/mm/nommu.c
--- linux-2.6.32.17/mm/nommu.c	2009-12-03 20:02:58.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/mm/nommu.c	2009-12-03 20:04:56.000000000 +0100
@@ -1346,7 +1346,7 @@ unsigned long do_mmap_pgoff(struct file 
 	/* okay... we have a mapping; now we have to register it */
 	result = vma->vm_start;
 
-	current->mm->total_vm += len >> PAGE_SHIFT;
+	vx_vmpages_add(current->mm, len >> PAGE_SHIFT);
 
 share:
 	add_vma_to_mm(current->mm, vma);
@@ -1606,7 +1606,7 @@ void exit_mmap(struct mm_struct *mm)
 
 	kenter("");
 
-	mm->total_vm = 0;
+	vx_vmpages_sub(mm, mm->total_vm);
 
 	while ((vma = mm->mmap)) {
 		mm->mmap = vma->vm_next;
diff -NurpP --minimal linux-2.6.32.17/mm/oom_kill.c linux-2.6.32.17-vs2.3.0.36.29.4/mm/oom_kill.c
--- linux-2.6.32.17/mm/oom_kill.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/mm/oom_kill.c	2010-03-18 16:53:06.000000000 +0100
@@ -27,6 +27,9 @@
 #include <linux/notifier.h>
 #include <linux/memcontrol.h>
 #include <linux/security.h>
+#include <linux/reboot.h>
+#include <linux/vs_memory.h>
+#include <linux/vs_context.h>
 
 int sysctl_panic_on_oom;
 int sysctl_oom_kill_allocating_task;
@@ -186,9 +189,21 @@ unsigned long badness(struct task_struct
 			points >>= -(oom_adj);
 	}
 
+	/*
+	 * add points for context badness and
+	 * reduce badness for processes belonging to
+	 * a different context
+	 */
+
+	points += vx_badness(p, mm);
+
+	if ((vx_current_xid() > 1) &&
+		vx_current_xid() != vx_task_xid(p))
+		points /= 16;
+
 #ifdef DEBUG
-	printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points\n",
-	p->pid, p->comm, points);
+	printk(KERN_DEBUG "OOMkill: task %d:#%u (%s) got %d points\n",
+		task_pid_nr(p), p->xid, p->comm, points);
 #endif
 	return points;
 }
@@ -230,6 +245,7 @@ static struct task_struct *select_bad_pr
 	struct task_struct *p;
 	struct task_struct *chosen = NULL;
 	struct timespec uptime;
+	unsigned xid = vx_current_xid();
 	*ppoints = 0;
 
 	do_posix_clock_monotonic_gettime(&uptime);
@@ -242,11 +258,14 @@ static struct task_struct *select_bad_pr
 		 */
 		if (!p->mm)
 			continue;
-		/* skip the init task */
-		if (is_global_init(p))
+		/* skip the init task, global and per guest */
+		if (task_is_init(p))
 			continue;
 		if (mem && !task_in_mem_cgroup(p, mem))
 			continue;
+		/* skip other guest and host processes if oom in guest */
+		if (xid && vx_task_xid(p) != xid)
+			continue;
 
 		/*
 		 * This task already has access to memory reserves and is
@@ -357,8 +376,8 @@ static void __oom_kill_task(struct task_
 	}
 
 	if (verbose)
-		printk(KERN_ERR "Killed process %d (%s)\n",
-				task_pid_nr(p), p->comm);
+		printk(KERN_ERR "Killed process %s(%d:#%u)\n",
+			p->comm, task_pid_nr(p), p->xid);
 
 	/*
 	 * We give our sacrificial lamb high priority and access to
@@ -419,8 +438,8 @@ static int oom_kill_process(struct task_
 		return 0;
 	}
 
-	printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n",
-					message, task_pid_nr(p), p->comm, points);
+	printk(KERN_ERR "%s: kill process %s(%d:#%u) score %li or a child\n",
+		message, p->comm, task_pid_nr(p), p->xid, points);
 
 	/* Try to kill a child first */
 	list_for_each_entry(c, &p->children, sibling) {
@@ -521,6 +540,8 @@ void clear_zonelist_oom(struct zonelist 
 	spin_unlock(&zone_scan_lock);
 }
 
+long vs_oom_action(unsigned int);
+
 /*
  * Must be called with tasklist_lock held for read.
  */
@@ -546,7 +567,11 @@ retry:
 	/* Found nothing?!?! Either we hang forever, or we panic. */
 	if (!p) {
 		read_unlock(&tasklist_lock);
-		panic("Out of memory and no killable processes...\n");
+		/* avoid panic for guest OOM */
+		if (current->xid)
+			vs_oom_action(LINUX_REBOOT_CMD_OOM);
+		else
+			panic("Out of memory and no killable processes...\n");
 	}
 
 	if (oom_kill_process(p, gfp_mask, order, points, NULL,
diff -NurpP --minimal linux-2.6.32.17/mm/page_alloc.c linux-2.6.32.17-vs2.3.0.36.29.4/mm/page_alloc.c
--- linux-2.6.32.17/mm/page_alloc.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/mm/page_alloc.c	2010-02-12 10:59:55.000000000 +0100
@@ -48,6 +48,8 @@
 #include <linux/page_cgroup.h>
 #include <linux/debugobjects.h>
 #include <linux/kmemleak.h>
+#include <linux/vs_base.h>
+#include <linux/vs_limit.h>
 #include <trace/events/kmem.h>
 
 #include <asm/tlbflush.h>
@@ -2131,6 +2133,9 @@ void si_meminfo(struct sysinfo *val)
 	val->totalhigh = totalhigh_pages;
 	val->freehigh = nr_free_highpages();
 	val->mem_unit = PAGE_SIZE;
+
+	if (vx_flags(VXF_VIRT_MEM, 0))
+		vx_vsi_meminfo(val);
 }
 
 EXPORT_SYMBOL(si_meminfo);
@@ -2151,6 +2156,9 @@ void si_meminfo_node(struct sysinfo *val
 	val->freehigh = 0;
 #endif
 	val->mem_unit = PAGE_SIZE;
+
+	if (vx_flags(VXF_VIRT_MEM, 0))
+		vx_vsi_meminfo(val);
 }
 #endif
 
diff -NurpP --minimal linux-2.6.32.17/mm/rmap.c linux-2.6.32.17-vs2.3.0.36.29.4/mm/rmap.c
--- linux-2.6.32.17/mm/rmap.c	2009-12-03 20:02:58.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/mm/rmap.c	2009-12-03 20:04:56.000000000 +0100
@@ -55,6 +55,7 @@
 #include <linux/memcontrol.h>
 #include <linux/mmu_notifier.h>
 #include <linux/migrate.h>
+#include <linux/vs_memory.h>
 
 #include <asm/tlbflush.h>
 
diff -NurpP --minimal linux-2.6.32.17/mm/shmem.c linux-2.6.32.17-vs2.3.0.36.29.4/mm/shmem.c
--- linux-2.6.32.17/mm/shmem.c	2009-12-03 20:02:58.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/mm/shmem.c	2009-12-03 20:04:56.000000000 +0100
@@ -1781,7 +1781,7 @@ static int shmem_statfs(struct dentry *d
 {
 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
 
-	buf->f_type = TMPFS_MAGIC;
+	buf->f_type = TMPFS_SUPER_MAGIC;
 	buf->f_bsize = PAGE_CACHE_SIZE;
 	buf->f_namelen = NAME_MAX;
 	spin_lock(&sbinfo->stat_lock);
@@ -2346,7 +2346,7 @@ int shmem_fill_super(struct super_block 
 	sb->s_maxbytes = SHMEM_MAX_BYTES;
 	sb->s_blocksize = PAGE_CACHE_SIZE;
 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
-	sb->s_magic = TMPFS_MAGIC;
+	sb->s_magic = TMPFS_SUPER_MAGIC;
 	sb->s_op = &shmem_ops;
 	sb->s_time_gran = 1;
 #ifdef CONFIG_TMPFS_POSIX_ACL
diff -NurpP --minimal linux-2.6.32.17/mm/slab.c linux-2.6.32.17-vs2.3.0.36.29.4/mm/slab.c
--- linux-2.6.32.17/mm/slab.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/mm/slab.c	2010-03-18 16:53:06.000000000 +0100
@@ -431,6 +431,8 @@ static void kmem_list3_init(struct kmem_
 #define STATS_INC_FREEMISS(x)	do { } while (0)
 #endif
 
+#include "slab_vs.h"
+
 #if DEBUG
 
 /*
@@ -3251,6 +3253,7 @@ retry:
 
 	obj = slab_get_obj(cachep, slabp, nodeid);
 	check_slabp(cachep, slabp);
+	vx_slab_alloc(cachep, flags);
 	l3->free_objects--;
 	/* move slabp to correct slabp list: */
 	list_del(&slabp->list);
@@ -3327,6 +3330,7 @@ __cache_alloc_node(struct kmem_cache *ca
 	/* ___cache_alloc_node can fall back to other nodes */
 	ptr = ____cache_alloc_node(cachep, flags, nodeid);
   out:
+	vx_slab_alloc(cachep, flags);
 	local_irq_restore(save_flags);
 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
 	kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
@@ -3513,6 +3517,7 @@ static inline void __cache_free(struct k
 	check_irq_off();
 	kmemleak_free_recursive(objp, cachep->flags);
 	objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
+	vx_slab_free(cachep);
 
 	kmemcheck_slab_free(cachep, objp, obj_size(cachep));
 
diff -NurpP --minimal linux-2.6.32.17/mm/slab_vs.h linux-2.6.32.17-vs2.3.0.36.29.4/mm/slab_vs.h
--- linux-2.6.32.17/mm/slab_vs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/mm/slab_vs.h	2009-12-03 20:04:56.000000000 +0100
@@ -0,0 +1,29 @@
+
+#include <linux/vserver/context.h>
+
+#include <linux/vs_context.h>
+
+static inline
+void vx_slab_alloc(struct kmem_cache *cachep, gfp_t flags)
+{
+	int what = gfp_zone(cachep->gfpflags);
+	struct vx_info *vxi = current_vx_info();
+
+	if (!vxi)
+		return;
+
+	atomic_add(cachep->buffer_size, &vxi->cacct.slab[what]);
+}
+
+static inline
+void vx_slab_free(struct kmem_cache *cachep)
+{
+	int what = gfp_zone(cachep->gfpflags);
+	struct vx_info *vxi = current_vx_info();
+
+	if (!vxi)
+		return;
+
+	atomic_sub(cachep->buffer_size, &vxi->cacct.slab[what]);
+}
+
diff -NurpP --minimal linux-2.6.32.17/mm/swapfile.c linux-2.6.32.17-vs2.3.0.36.29.4/mm/swapfile.c
--- linux-2.6.32.17/mm/swapfile.c	2009-12-03 20:02:58.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/mm/swapfile.c	2009-12-03 20:04:56.000000000 +0100
@@ -34,6 +34,8 @@
 #include <asm/tlbflush.h>
 #include <linux/swapops.h>
 #include <linux/page_cgroup.h>
+#include <linux/vs_base.h>
+#include <linux/vs_memory.h>
 
 static DEFINE_SPINLOCK(swap_lock);
 static unsigned int nr_swapfiles;
@@ -1680,6 +1682,8 @@ static void *swap_next(struct seq_file *
 	if (v == SEQ_START_TOKEN)
 		ptr = swap_info;
 	else {
+		if (vx_flags(VXF_VIRT_MEM, 0))
+			return NULL;
 		ptr = v;
 		ptr++;
 	}
@@ -1707,6 +1711,16 @@ static int swap_show(struct seq_file *sw
 
 	if (ptr == SEQ_START_TOKEN) {
 		seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
+		if (vx_flags(VXF_VIRT_MEM, 0)) {
+			struct sysinfo si;
+
+			vx_vsi_swapinfo(&si);
+			if (si.totalswap < (1 << 10))
+				return 0;
+			seq_printf(swap, "%s\t\t\t\t\t%s\t%lu\t%lu\t%d\n",
+				"hdv0", "partition", si.totalswap >> 10,
+				(si.totalswap - si.freeswap) >> 10, -1);
+		}
 		return 0;
 	}
 
@@ -2064,6 +2078,8 @@ void si_swapinfo(struct sysinfo *val)
 	val->freeswap = nr_swap_pages + nr_to_be_unused;
 	val->totalswap = total_swap_pages + nr_to_be_unused;
 	spin_unlock(&swap_lock);
+	if (vx_flags(VXF_VIRT_MEM, 0))
+		vx_vsi_swapinfo(val);
 }
 
 /*
diff -NurpP --minimal linux-2.6.32.17/net/core/dev.c linux-2.6.32.17-vs2.3.0.36.29.4/net/core/dev.c
--- linux-2.6.32.17/net/core/dev.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/core/dev.c	2010-03-18 16:53:06.000000000 +0100
@@ -126,6 +126,7 @@
 #include <linux/in.h>
 #include <linux/jhash.h>
 #include <linux/random.h>
+#include <linux/vs_inet.h>
 #include <trace/events/napi.h>
 
 #include "net-sysfs.h"
@@ -591,7 +592,8 @@ struct net_device *__dev_get_by_name(str
 	hlist_for_each(p, dev_name_hash(net, name)) {
 		struct net_device *dev
 			= hlist_entry(p, struct net_device, name_hlist);
-		if (!strncmp(dev->name, name, IFNAMSIZ))
+		if (!strncmp(dev->name, name, IFNAMSIZ) &&
+		    nx_dev_visible(current_nx_info(), dev))
 			return dev;
 	}
 	return NULL;
@@ -642,7 +644,8 @@ struct net_device *__dev_get_by_index(st
 	hlist_for_each(p, dev_index_hash(net, ifindex)) {
 		struct net_device *dev
 			= hlist_entry(p, struct net_device, index_hlist);
-		if (dev->ifindex == ifindex)
+		if ((dev->ifindex == ifindex) &&
+		    nx_dev_visible(current_nx_info(), dev))
 			return dev;
 	}
 	return NULL;
@@ -695,10 +698,12 @@ struct net_device *dev_getbyhwaddr(struc
 
 	ASSERT_RTNL();
 
-	for_each_netdev(net, dev)
+	for_each_netdev(net, dev) {
 		if (dev->type == type &&
-		    !memcmp(dev->dev_addr, ha, dev->addr_len))
+		    !memcmp(dev->dev_addr, ha, dev->addr_len) &&
+		    nx_dev_visible(current_nx_info(), dev))
 			return dev;
+	}
 
 	return NULL;
 }
@@ -709,9 +714,11 @@ struct net_device *__dev_getfirstbyhwtyp
 	struct net_device *dev;
 
 	ASSERT_RTNL();
-	for_each_netdev(net, dev)
-		if (dev->type == type)
+	for_each_netdev(net, dev) {
+		if ((dev->type == type) &&
+		    nx_dev_visible(current_nx_info(), dev))
 			return dev;
+	}
 
 	return NULL;
 }
@@ -830,6 +837,8 @@ static int __dev_alloc_name(struct net *
 				continue;
 			if (i < 0 || i >= max_netdevices)
 				continue;
+			if (!nx_dev_visible(current_nx_info(), d))
+				continue;
 
 			/*  avoid cases where sscanf is not exact inverse of printf */
 			snprintf(buf, IFNAMSIZ, name, i);
@@ -2984,6 +2993,8 @@ static int dev_ifconf(struct net *net, c
 
 	total = 0;
 	for_each_netdev(net, dev) {
+		if (!nx_dev_visible(current_nx_info(), dev))
+			continue;
 		for (i = 0; i < NPROTO; i++) {
 			if (gifconf_list[i]) {
 				int done;
@@ -3052,6 +3063,9 @@ static void dev_seq_printf_stats(struct 
 {
 	const struct net_device_stats *stats = dev_get_stats(dev);
 
+	if (!nx_dev_visible(current_nx_info(), dev))
+		return;
+
 	seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
 		   "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
 		   dev->name, stats->rx_bytes, stats->rx_packets,
@@ -5317,7 +5331,6 @@ int dev_change_net_namespace(struct net_
 	if (dev->dev.parent)
 		goto out;
 #endif
-
 	/* Ensure the device has been registrered */
 	err = -EINVAL;
 	if (dev->reg_state != NETREG_REGISTERED)
diff -NurpP --minimal linux-2.6.32.17/net/core/rtnetlink.c linux-2.6.32.17-vs2.3.0.36.29.4/net/core/rtnetlink.c
--- linux-2.6.32.17/net/core/rtnetlink.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/core/rtnetlink.c	2009-12-29 00:36:26.000000000 +0100
@@ -688,6 +688,8 @@ static int rtnl_dump_ifinfo(struct sk_bu
 
 	idx = 0;
 	for_each_netdev(net, dev) {
+		if (!nx_dev_visible(skb->sk->sk_nx_info, dev))
+			continue;
 		if (idx < s_idx)
 			goto cont;
 		if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
@@ -1222,6 +1224,9 @@ void rtmsg_ifinfo(int type, struct net_d
 	struct sk_buff *skb;
 	int err = -ENOBUFS;
 
+	if (!nx_dev_visible(current_nx_info(), dev))
+		return;
+
 	skb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL);
 	if (skb == NULL)
 		goto errout;
diff -NurpP --minimal linux-2.6.32.17/net/core/sock.c linux-2.6.32.17-vs2.3.0.36.29.4/net/core/sock.c
--- linux-2.6.32.17/net/core/sock.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/core/sock.c	2010-02-12 10:59:55.000000000 +0100
@@ -125,6 +125,10 @@
 #include <linux/ipsec.h>
 
 #include <linux/filter.h>
+#include <linux/vs_socket.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
 
 #ifdef CONFIG_INET
 #include <net/tcp.h>
@@ -984,6 +988,8 @@ static struct sock *sk_prot_alloc(struct
 		if (!try_module_get(prot->owner))
 			goto out_free_sec;
 	}
+		sock_vx_init(sk);
+		sock_nx_init(sk);
 
 	return sk;
 
@@ -1063,6 +1069,11 @@ static void __sk_free(struct sock *sk)
 		       __func__, atomic_read(&sk->sk_omem_alloc));
 
 	put_net(sock_net(sk));
+	vx_sock_dec(sk);
+	clr_vx_info(&sk->sk_vx_info);
+	sk->sk_xid = -1;
+	clr_nx_info(&sk->sk_nx_info);
+	sk->sk_nid = -1;
 	sk_prot_free(sk->sk_prot_creator, sk);
 }
 
@@ -1110,6 +1121,8 @@ struct sock *sk_clone(const struct sock 
 
 		/* SANITY */
 		get_net(sock_net(newsk));
+		sock_vx_init(newsk);
+		sock_nx_init(newsk);
 		sk_node_init(&newsk->sk_node);
 		sock_lock_init(newsk);
 		bh_lock_sock(newsk);
@@ -1164,6 +1177,12 @@ struct sock *sk_clone(const struct sock 
 		smp_wmb();
 		atomic_set(&newsk->sk_refcnt, 2);
 
+		set_vx_info(&newsk->sk_vx_info, sk->sk_vx_info);
+		newsk->sk_xid = sk->sk_xid;
+		vx_sock_inc(newsk);
+		set_nx_info(&newsk->sk_nx_info, sk->sk_nx_info);
+		newsk->sk_nid = sk->sk_nid;
+
 		/*
 		 * Increment the counter in the same struct proto as the master
 		 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
@@ -1886,6 +1905,12 @@ void sock_init_data(struct socket *sock,
 
 	sk->sk_stamp = ktime_set(-1L, 0);
 
+	set_vx_info(&sk->sk_vx_info, current_vx_info());
+	sk->sk_xid = vx_current_xid();
+	vx_sock_inc(sk);
+	set_nx_info(&sk->sk_nx_info, current_nx_info());
+	sk->sk_nid = nx_current_nid();
+
 	/*
 	 * Before updating sk_refcnt, we must commit prior changes to memory
 	 * (Documentation/RCU/rculist_nulls.txt for details)
diff -NurpP --minimal linux-2.6.32.17/net/ipv4/af_inet.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/af_inet.c
--- linux-2.6.32.17/net/ipv4/af_inet.c	2009-12-03 20:02:59.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/af_inet.c	2009-12-03 20:04:56.000000000 +0100
@@ -115,6 +115,7 @@
 #ifdef CONFIG_IP_MROUTE
 #include <linux/mroute.h>
 #endif
+#include <linux/vs_limit.h>
 
 
 /* The inetsw table contains everything that inet_create needs to
@@ -325,9 +326,12 @@ lookup_protocol:
 	}
 
 	err = -EPERM;
+	if ((protocol == IPPROTO_ICMP) &&
+		nx_capable(answer->capability, NXC_RAW_ICMP))
+		goto override;
 	if (answer->capability > 0 && !capable(answer->capability))
 		goto out_rcu_unlock;
-
+override:
 	err = -EAFNOSUPPORT;
 	if (!inet_netns_ok(net, protocol))
 		goto out_rcu_unlock;
@@ -447,6 +451,7 @@ int inet_bind(struct socket *sock, struc
 	struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
 	struct sock *sk = sock->sk;
 	struct inet_sock *inet = inet_sk(sk);
+	struct nx_v4_sock_addr nsa;
 	unsigned short snum;
 	int chk_addr_ret;
 	int err;
@@ -460,7 +465,11 @@ int inet_bind(struct socket *sock, struc
 	if (addr_len < sizeof(struct sockaddr_in))
 		goto out;
 
-	chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
+	err = v4_map_sock_addr(inet, addr, &nsa);
+	if (err)
+		goto out;
+
+	chk_addr_ret = inet_addr_type(sock_net(sk), nsa.saddr);
 
 	/* Not specified by any standard per-se, however it breaks too
 	 * many applications when removed.  It is unfortunate since
@@ -472,7 +481,7 @@ int inet_bind(struct socket *sock, struc
 	err = -EADDRNOTAVAIL;
 	if (!sysctl_ip_nonlocal_bind &&
 	    !(inet->freebind || inet->transparent) &&
-	    addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
+	    nsa.saddr != htonl(INADDR_ANY) &&
 	    chk_addr_ret != RTN_LOCAL &&
 	    chk_addr_ret != RTN_MULTICAST &&
 	    chk_addr_ret != RTN_BROADCAST)
@@ -497,7 +506,7 @@ int inet_bind(struct socket *sock, struc
 	if (sk->sk_state != TCP_CLOSE || inet->num)
 		goto out_release_sock;
 
-	inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr;
+	v4_set_sock_addr(inet, &nsa);
 	if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
 		inet->saddr = 0;  /* Use device */
 
@@ -694,11 +703,13 @@ int inet_getname(struct socket *sock, st
 		     peer == 1))
 			return -ENOTCONN;
 		sin->sin_port = inet->dport;
-		sin->sin_addr.s_addr = inet->daddr;
+		sin->sin_addr.s_addr =
+			nx_map_sock_lback(sk->sk_nx_info, inet->daddr);
 	} else {
 		__be32 addr = inet->rcv_saddr;
 		if (!addr)
 			addr = inet->saddr;
+		addr = nx_map_sock_lback(sk->sk_nx_info, addr);
 		sin->sin_port = inet->sport;
 		sin->sin_addr.s_addr = addr;
 	}
diff -NurpP --minimal linux-2.6.32.17/net/ipv4/devinet.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/devinet.c
--- linux-2.6.32.17/net/ipv4/devinet.c	2010-08-09 18:04:15.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/devinet.c	2010-03-18 16:53:06.000000000 +0100
@@ -413,6 +413,7 @@ struct in_device *inetdev_by_index(struc
 	return in_dev;
 }
 
+
 /* Called only from RTNL semaphored context. No locks. */
 
 struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
@@ -653,6 +654,8 @@ int devinet_ioctl(struct net *net, unsig
 		*colon = ':';
 
 	if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
+		struct nx_info *nxi = current_nx_info();
+
 		if (tryaddrmatch) {
 			/* Matthias Andree */
 			/* compare label and address (4.4BSD style) */
@@ -661,6 +664,8 @@ int devinet_ioctl(struct net *net, unsig
 			   This is checked above. */
 			for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
 			     ifap = &ifa->ifa_next) {
+				if (!nx_v4_ifa_visible(nxi, ifa))
+					continue;
 				if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
 				    sin_orig.sin_addr.s_addr ==
 							ifa->ifa_address) {
@@ -673,9 +678,12 @@ int devinet_ioctl(struct net *net, unsig
 		   comparing just the label */
 		if (!ifa) {
 			for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
-			     ifap = &ifa->ifa_next)
+			     ifap = &ifa->ifa_next) {
+				if (!nx_v4_ifa_visible(nxi, ifa))
+					continue;
 				if (!strcmp(ifr.ifr_name, ifa->ifa_label))
 					break;
+			}
 		}
 	}
 
@@ -826,6 +834,8 @@ static int inet_gifconf(struct net_devic
 		goto out;
 
 	for (; ifa; ifa = ifa->ifa_next) {
+		if (!nx_v4_ifa_visible(current_nx_info(), ifa))
+			continue;
 		if (!buf) {
 			done += sizeof(ifr);
 			continue;
@@ -1174,6 +1184,7 @@ static int inet_dump_ifaddr(struct sk_bu
 	struct net_device *dev;
 	struct in_device *in_dev;
 	struct in_ifaddr *ifa;
+	struct sock *sk = skb->sk;
 	int s_ip_idx, s_idx = cb->args[0];
 
 	s_ip_idx = ip_idx = cb->args[1];
@@ -1188,6 +1199,8 @@ static int inet_dump_ifaddr(struct sk_bu
 
 		for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
 		     ifa = ifa->ifa_next, ip_idx++) {
+			if (sk && !nx_v4_ifa_visible(sk->sk_nx_info, ifa))
+				continue;
 			if (ip_idx < s_ip_idx)
 				continue;
 			if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
diff -NurpP --minimal linux-2.6.32.17/net/ipv4/fib_hash.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/fib_hash.c
--- linux-2.6.32.17/net/ipv4/fib_hash.c	2009-09-10 15:26:29.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/fib_hash.c	2009-12-03 20:04:56.000000000 +0100
@@ -1021,7 +1021,7 @@ static int fib_seq_show(struct seq_file 
 	prefix	= f->fn_key;
 	mask	= FZ_MASK(iter->zone);
 	flags	= fib_flag_trans(fa->fa_type, mask, fi);
-	if (fi)
+	if (fi && nx_dev_visible(current_nx_info(), fi->fib_dev))
 		seq_printf(seq,
 			 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n",
 			 fi->fib_dev ? fi->fib_dev->name : "*", prefix,
diff -NurpP --minimal linux-2.6.32.17/net/ipv4/inet_connection_sock.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/inet_connection_sock.c
--- linux-2.6.32.17/net/ipv4/inet_connection_sock.c	2009-12-03 20:02:59.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/inet_connection_sock.c	2009-12-03 20:04:56.000000000 +0100
@@ -49,10 +49,40 @@ void inet_get_local_port_range(int *low,
 }
 EXPORT_SYMBOL(inet_get_local_port_range);
 
+int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
+{
+	__be32	sk1_rcv_saddr = inet_rcv_saddr(sk1),
+		sk2_rcv_saddr = inet_rcv_saddr(sk2);
+
+	if (inet_v6_ipv6only(sk2))
+		return 0;
+
+	if (sk1_rcv_saddr &&
+	    sk2_rcv_saddr &&
+	    sk1_rcv_saddr == sk2_rcv_saddr)
+		return 1;
+
+	if (sk1_rcv_saddr &&
+	    !sk2_rcv_saddr &&
+	    v4_addr_in_nx_info(sk2->sk_nx_info, sk1_rcv_saddr, NXA_MASK_BIND))
+		return 1;
+
+	if (sk2_rcv_saddr &&
+	    !sk1_rcv_saddr &&
+	    v4_addr_in_nx_info(sk1->sk_nx_info, sk2_rcv_saddr, NXA_MASK_BIND))
+		return 1;
+
+	if (!sk1_rcv_saddr &&
+	    !sk2_rcv_saddr &&
+	    nx_v4_addr_conflict(sk1->sk_nx_info, sk2->sk_nx_info))
+		return 1;
+
+	return 0;
+}
+
 int inet_csk_bind_conflict(const struct sock *sk,
 			   const struct inet_bind_bucket *tb)
 {
-	const __be32 sk_rcv_saddr = inet_rcv_saddr(sk);
 	struct sock *sk2;
 	struct hlist_node *node;
 	int reuse = sk->sk_reuse;
@@ -72,9 +102,7 @@ int inet_csk_bind_conflict(const struct 
 		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
 			if (!reuse || !sk2->sk_reuse ||
 			    sk2->sk_state == TCP_LISTEN) {
-				const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
-				if (!sk2_rcv_saddr || !sk_rcv_saddr ||
-				    sk2_rcv_saddr == sk_rcv_saddr)
+				if (ipv4_rcv_saddr_equal(sk, sk2))
 					break;
 			}
 		}
diff -NurpP --minimal linux-2.6.32.17/net/ipv4/inet_diag.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/inet_diag.c
--- linux-2.6.32.17/net/ipv4/inet_diag.c	2009-09-10 15:26:29.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/inet_diag.c	2009-12-03 20:04:56.000000000 +0100
@@ -32,6 +32,8 @@
 #include <linux/stddef.h>
 
 #include <linux/inet_diag.h>
+#include <linux/vs_network.h>
+#include <linux/vs_inet.h>
 
 static const struct inet_diag_handler **inet_diag_table;
 
@@ -118,8 +120,8 @@ static int inet_csk_diag_fill(struct soc
 
 	r->id.idiag_sport = inet->sport;
 	r->id.idiag_dport = inet->dport;
-	r->id.idiag_src[0] = inet->rcv_saddr;
-	r->id.idiag_dst[0] = inet->daddr;
+	r->id.idiag_src[0] = nx_map_sock_lback(sk->sk_nx_info, inet->rcv_saddr);
+	r->id.idiag_dst[0] = nx_map_sock_lback(sk->sk_nx_info, inet->daddr);
 
 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
 	if (r->idiag_family == AF_INET6) {
@@ -204,8 +206,8 @@ static int inet_twsk_diag_fill(struct in
 	r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
 	r->id.idiag_sport     = tw->tw_sport;
 	r->id.idiag_dport     = tw->tw_dport;
-	r->id.idiag_src[0]    = tw->tw_rcv_saddr;
-	r->id.idiag_dst[0]    = tw->tw_daddr;
+	r->id.idiag_src[0]    = nx_map_sock_lback(tw->tw_nx_info, tw->tw_rcv_saddr);
+	r->id.idiag_dst[0]    = nx_map_sock_lback(tw->tw_nx_info, tw->tw_daddr);
 	r->idiag_state	      = tw->tw_substate;
 	r->idiag_timer	      = 3;
 	r->idiag_expires      = DIV_ROUND_UP(tmo * 1000, HZ);
@@ -262,6 +264,7 @@ static int inet_diag_get_exact(struct sk
 	err = -EINVAL;
 
 	if (req->idiag_family == AF_INET) {
+		/* TODO: lback */
 		sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0],
 				 req->id.idiag_dport, req->id.idiag_src[0],
 				 req->id.idiag_sport, req->id.idiag_if);
@@ -504,6 +507,7 @@ static int inet_csk_diag_dump(struct soc
 		} else
 #endif
 		{
+			/* TODO: lback */
 			entry.saddr = &inet->rcv_saddr;
 			entry.daddr = &inet->daddr;
 		}
@@ -540,6 +544,7 @@ static int inet_twsk_diag_dump(struct in
 		} else
 #endif
 		{
+			/* TODO: lback */
 			entry.saddr = &tw->tw_rcv_saddr;
 			entry.daddr = &tw->tw_daddr;
 		}
@@ -586,8 +591,8 @@ static int inet_diag_fill_req(struct sk_
 
 	r->id.idiag_sport = inet->sport;
 	r->id.idiag_dport = ireq->rmt_port;
-	r->id.idiag_src[0] = ireq->loc_addr;
-	r->id.idiag_dst[0] = ireq->rmt_addr;
+	r->id.idiag_src[0] = nx_map_sock_lback(sk->sk_nx_info, ireq->loc_addr);
+	r->id.idiag_dst[0] = nx_map_sock_lback(sk->sk_nx_info, ireq->rmt_addr);
 	r->idiag_expires = jiffies_to_msecs(tmo);
 	r->idiag_rqueue = 0;
 	r->idiag_wqueue = 0;
@@ -657,6 +662,7 @@ static int inet_diag_dump_reqs(struct sk
 				continue;
 
 			if (bc) {
+				/* TODO: lback */
 				entry.saddr =
 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
 					(entry.family == AF_INET6) ?
@@ -727,6 +733,8 @@ static int inet_diag_dump(struct sk_buff
 			sk_nulls_for_each(sk, node, &ilb->head) {
 				struct inet_sock *inet = inet_sk(sk);
 
+				if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+					continue;
 				if (num < s_num) {
 					num++;
 					continue;
@@ -793,6 +801,8 @@ skip_listen_ht:
 		sk_nulls_for_each(sk, node, &head->chain) {
 			struct inet_sock *inet = inet_sk(sk);
 
+			if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+				continue;
 			if (num < s_num)
 				goto next_normal;
 			if (!(r->idiag_states & (1 << sk->sk_state)))
@@ -817,6 +827,8 @@ next_normal:
 			inet_twsk_for_each(tw, node,
 				    &head->twchain) {
 
+				if (!nx_check(tw->tw_nid, VS_WATCH_P | VS_IDENT))
+					continue;
 				if (num < s_num)
 					goto next_dying;
 				if (r->id.idiag_sport != tw->tw_sport &&
diff -NurpP --minimal linux-2.6.32.17/net/ipv4/inet_hashtables.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/inet_hashtables.c
--- linux-2.6.32.17/net/ipv4/inet_hashtables.c	2009-06-11 17:13:29.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/inet_hashtables.c	2009-12-03 20:04:56.000000000 +0100
@@ -21,6 +21,7 @@
 
 #include <net/inet_connection_sock.h>
 #include <net/inet_hashtables.h>
+#include <net/route.h>
 #include <net/ip.h>
 
 /*
@@ -134,6 +135,11 @@ static inline int compute_score(struct s
 			if (rcv_saddr != daddr)
 				return -1;
 			score += 2;
+		} else {
+			/* block non nx_info ips */
+			if (!v4_addr_in_nx_info(sk->sk_nx_info,
+				daddr, NXA_MASK_BIND))
+				return -1;
 		}
 		if (sk->sk_bound_dev_if) {
 			if (sk->sk_bound_dev_if != dif)
@@ -151,7 +157,6 @@ static inline int compute_score(struct s
  * wildcarded during the search since they can never be otherwise.
  */
 
-
 struct sock *__inet_lookup_listener(struct net *net,
 				    struct inet_hashinfo *hashinfo,
 				    const __be32 daddr, const unsigned short hnum,
@@ -174,6 +179,7 @@ begin:
 			hiscore = score;
 		}
 	}
+
 	/*
 	 * if the nulls value we got at the end of this lookup is
 	 * not the expected one, we must restart lookup.
diff -NurpP --minimal linux-2.6.32.17/net/ipv4/netfilter/nf_nat_helper.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/netfilter/nf_nat_helper.c
--- linux-2.6.32.17/net/ipv4/netfilter/nf_nat_helper.c	2009-12-03 20:02:59.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/netfilter/nf_nat_helper.c	2009-12-03 20:04:56.000000000 +0100
@@ -19,6 +19,7 @@
 #include <net/route.h>
 
 #include <linux/netfilter_ipv4.h>
+#include <net/route.h>
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_ecache.h>
diff -NurpP --minimal linux-2.6.32.17/net/ipv4/netfilter.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/netfilter.c
--- linux-2.6.32.17/net/ipv4/netfilter.c	2009-09-10 15:26:29.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/netfilter.c	2009-12-03 20:04:56.000000000 +0100
@@ -4,7 +4,7 @@
 #include <linux/netfilter_ipv4.h>
 #include <linux/ip.h>
 #include <linux/skbuff.h>
-#include <net/route.h>
+// #include <net/route.h>
 #include <net/xfrm.h>
 #include <net/ip.h>
 #include <net/netfilter/nf_queue.h>
diff -NurpP --minimal linux-2.6.32.17/net/ipv4/raw.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/raw.c
--- linux-2.6.32.17/net/ipv4/raw.c	2009-12-03 20:02:59.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/raw.c	2009-12-03 20:04:56.000000000 +0100
@@ -117,7 +117,7 @@ static struct sock *__raw_v4_lookup(stru
 
 		if (net_eq(sock_net(sk), net) && inet->num == num	&&
 		    !(inet->daddr && inet->daddr != raddr) 		&&
-		    !(inet->rcv_saddr && inet->rcv_saddr != laddr)	&&
+		    v4_sock_addr_match(sk->sk_nx_info, inet, laddr)	&&
 		    !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
 			goto found; /* gotcha */
 	}
@@ -383,6 +383,12 @@ static int raw_send_hdrinc(struct sock *
 		icmp_out_count(net, ((struct icmphdr *)
 			skb_transport_header(skb))->type);
 
+	err = -EPERM;
+	if (!nx_check(0, VS_ADMIN) && !capable(CAP_NET_RAW) &&
+		sk->sk_nx_info &&
+		!v4_addr_in_nx_info(sk->sk_nx_info, iph->saddr, NXA_MASK_BIND))
+		goto error_free;
+
 	err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
 		      dst_output);
 	if (err > 0)
@@ -563,6 +569,13 @@ static int raw_sendmsg(struct kiocb *ioc
 		}
 
 		security_sk_classify_flow(sk, &fl);
+		if (sk->sk_nx_info) {
+			err = ip_v4_find_src(sock_net(sk),
+				sk->sk_nx_info, &rt, &fl);
+
+			if (err)
+				goto done;
+		}
 		err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 1);
 	}
 	if (err)
@@ -635,17 +648,19 @@ static int raw_bind(struct sock *sk, str
 {
 	struct inet_sock *inet = inet_sk(sk);
 	struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
+	struct nx_v4_sock_addr nsa = { 0 };
 	int ret = -EINVAL;
 	int chk_addr_ret;
 
 	if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
 		goto out;
-	chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
+	v4_map_sock_addr(inet, addr, &nsa);
+	chk_addr_ret = inet_addr_type(sock_net(sk), nsa.saddr);
 	ret = -EADDRNOTAVAIL;
-	if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
+	if (nsa.saddr && chk_addr_ret != RTN_LOCAL &&
 	    chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
 		goto out;
-	inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr;
+	v4_set_sock_addr(inet, &nsa);
 	if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
 		inet->saddr = 0;  /* Use device */
 	sk_dst_reset(sk);
@@ -697,7 +712,8 @@ static int raw_recvmsg(struct kiocb *ioc
 	/* Copy the address. */
 	if (sin) {
 		sin->sin_family = AF_INET;
-		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+		sin->sin_addr.s_addr =
+			nx_map_sock_lback(sk->sk_nx_info, ip_hdr(skb)->saddr);
 		sin->sin_port = 0;
 		memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
 	}
@@ -875,7 +891,8 @@ static struct sock *raw_get_first(struct
 		struct hlist_node *node;
 
 		sk_for_each(sk, node, &state->h->ht[state->bucket])
-			if (sock_net(sk) == seq_file_net(seq))
+			if ((sock_net(sk) == seq_file_net(seq)) &&
+				nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
 				goto found;
 	}
 	sk = NULL;
@@ -891,7 +908,8 @@ static struct sock *raw_get_next(struct 
 		sk = sk_next(sk);
 try_again:
 		;
-	} while (sk && sock_net(sk) != seq_file_net(seq));
+	} while (sk && ((sock_net(sk) != seq_file_net(seq)) ||
+		!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)));
 
 	if (!sk && ++state->bucket < RAW_HTABLE_SIZE) {
 		sk = sk_head(&state->h->ht[state->bucket]);
@@ -950,7 +968,10 @@ static void raw_sock_seq_show(struct seq
 
 	seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n",
-		i, src, srcp, dest, destp, sp->sk_state,
+		i,
+		nx_map_sock_lback(current_nx_info(), src), srcp,
+		nx_map_sock_lback(current_nx_info(), dest), destp,
+		sp->sk_state,
 		sk_wmem_alloc_get(sp),
 		sk_rmem_alloc_get(sp),
 		0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
diff -NurpP --minimal linux-2.6.32.17/net/ipv4/tcp.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/tcp.c
--- linux-2.6.32.17/net/ipv4/tcp.c	2009-12-03 20:02:59.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/tcp.c	2009-12-03 20:04:56.000000000 +0100
@@ -264,6 +264,7 @@
 #include <linux/cache.h>
 #include <linux/err.h>
 #include <linux/crypto.h>
+#include <linux/in.h>
 
 #include <net/icmp.h>
 #include <net/tcp.h>
diff -NurpP --minimal linux-2.6.32.17/net/ipv4/tcp_ipv4.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/tcp_ipv4.c
--- linux-2.6.32.17/net/ipv4/tcp_ipv4.c	2009-12-03 20:03:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/tcp_ipv4.c	2009-12-03 20:04:56.000000000 +0100
@@ -1925,6 +1925,12 @@ static void *listening_get_next(struct s
 		req = req->dl_next;
 		while (1) {
 			while (req) {
+				vxdprintk(VXD_CBIT(net, 6),
+					"sk,req: %p [#%d] (from %d)", req->sk,
+					(req->sk)?req->sk->sk_nid:0, nx_current_nid());
+				if (req->sk &&
+					!nx_check(req->sk->sk_nid, VS_WATCH_P | VS_IDENT))
+					continue;
 				if (req->rsk_ops->family == st->family) {
 					cur = req;
 					goto out;
@@ -1949,6 +1955,10 @@ get_req:
 	}
 get_sk:
 	sk_nulls_for_each_from(sk, node) {
+		vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)",
+			sk, sk->sk_nid, nx_current_nid());
+		if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+			continue;
 		if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
 			cur = sk;
 			goto out;
@@ -2012,6 +2022,11 @@ static void *established_get_first(struc
 
 		spin_lock_bh(lock);
 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
+			vxdprintk(VXD_CBIT(net, 6),
+				"sk,egf: %p [#%d] (from %d)",
+				sk, sk->sk_nid, nx_current_nid());
+			if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+				continue;
 			if (sk->sk_family != st->family ||
 			    !net_eq(sock_net(sk), net)) {
 				continue;
@@ -2022,6 +2037,11 @@ static void *established_get_first(struc
 		st->state = TCP_SEQ_STATE_TIME_WAIT;
 		inet_twsk_for_each(tw, node,
 				   &tcp_hashinfo.ehash[st->bucket].twchain) {
+			vxdprintk(VXD_CBIT(net, 6),
+				"tw: %p [#%d] (from %d)",
+				tw, tw->tw_nid, nx_current_nid());
+			if (!nx_check(tw->tw_nid, VS_WATCH_P | VS_IDENT))
+				continue;
 			if (tw->tw_family != st->family ||
 			    !net_eq(twsk_net(tw), net)) {
 				continue;
@@ -2050,7 +2070,9 @@ static void *established_get_next(struct
 		tw = cur;
 		tw = tw_next(tw);
 get_tw:
-		while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
+		while (tw && (tw->tw_family != st->family ||
+			!net_eq(twsk_net(tw), net) ||
+			!nx_check(tw->tw_nid, VS_WATCH_P | VS_IDENT))) {
 			tw = tw_next(tw);
 		}
 		if (tw) {
@@ -2073,6 +2095,11 @@ get_tw:
 		sk = sk_nulls_next(sk);
 
 	sk_nulls_for_each_from(sk, node) {
+		vxdprintk(VXD_CBIT(net, 6),
+			"sk,egn: %p [#%d] (from %d)",
+			sk, sk->sk_nid, nx_current_nid());
+		if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+			continue;
 		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
 			goto found;
 	}
@@ -2224,9 +2251,9 @@ static void get_openreq4(struct sock *sk
 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
 		i,
-		ireq->loc_addr,
+		nx_map_sock_lback(current_nx_info(), ireq->loc_addr),
 		ntohs(inet_sk(sk)->sport),
-		ireq->rmt_addr,
+		nx_map_sock_lback(current_nx_info(), ireq->rmt_addr),
 		ntohs(ireq->rmt_port),
 		TCP_SYN_RECV,
 		0, 0, /* could print option size, but that is af dependent. */
@@ -2269,7 +2296,10 @@ static void get_tcp4_sock(struct sock *s
 
 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
 			"%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
-		i, src, srcp, dest, destp, sk->sk_state,
+		i,
+		nx_map_sock_lback(current_nx_info(), src), srcp,
+		nx_map_sock_lback(current_nx_info(), dest), destp,
+		sk->sk_state,
 		tp->write_seq - tp->snd_una,
 		sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
 					     (tp->rcv_nxt - tp->copied_seq),
@@ -2305,7 +2335,10 @@ static void get_timewait4_sock(struct in
 
 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
-		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
+		i,
+		nx_map_sock_lback(current_nx_info(), src), srcp,
+		nx_map_sock_lback(current_nx_info(), dest), destp,
+		tw->tw_substate, 0, 0,
 		3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
 		atomic_read(&tw->tw_refcnt), tw, len);
 }
diff -NurpP --minimal linux-2.6.32.17/net/ipv4/tcp_minisocks.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/tcp_minisocks.c
--- linux-2.6.32.17/net/ipv4/tcp_minisocks.c	2009-12-03 20:03:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/tcp_minisocks.c	2009-12-03 20:04:56.000000000 +0100
@@ -26,6 +26,10 @@
 #include <net/inet_common.h>
 #include <net/xfrm.h>
 
+#include <linux/vs_limit.h>
+#include <linux/vs_socket.h>
+#include <linux/vs_context.h>
+
 #ifdef CONFIG_SYSCTL
 #define SYNC_INIT 0 /* let the user enable it */
 #else
@@ -294,6 +298,11 @@ void tcp_time_wait(struct sock *sk, int 
 		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
 		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
 
+		tw->tw_xid		= sk->sk_xid;
+		tw->tw_vx_info		= NULL;
+		tw->tw_nid		= sk->sk_nid;
+		tw->tw_nx_info		= NULL;
+
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 		if (tw->tw_family == PF_INET6) {
 			struct ipv6_pinfo *np = inet6_sk(sk);
diff -NurpP --minimal linux-2.6.32.17/net/ipv4/udp.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/udp.c
--- linux-2.6.32.17/net/ipv4/udp.c	2010-08-09 18:04:16.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv4/udp.c	2010-05-27 19:21:36.000000000 +0200
@@ -224,14 +224,7 @@ fail:
 }
 EXPORT_SYMBOL(udp_lib_get_port);
 
-static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
-{
-	struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
-
-	return 	(!ipv6_only_sock(sk2)  &&
-		 (!inet1->rcv_saddr || !inet2->rcv_saddr ||
-		   inet1->rcv_saddr == inet2->rcv_saddr));
-}
+extern int ipv4_rcv_saddr_equal(const struct sock *, const struct sock *);
 
 int udp_v4_get_port(struct sock *sk, unsigned short snum)
 {
@@ -253,6 +246,11 @@ static inline int compute_score(struct s
 			if (inet->rcv_saddr != daddr)
 				return -1;
 			score += 2;
+		} else {
+			/* block non nx_info ips */
+			if (!v4_addr_in_nx_info(sk->sk_nx_info,
+				daddr, NXA_MASK_BIND))
+				return -1;
 		}
 		if (inet->daddr) {
 			if (inet->daddr != saddr)
@@ -273,6 +271,7 @@ static inline int compute_score(struct s
 	return score;
 }
 
+
 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
  * harder than this. -DaveM
  */
@@ -294,6 +293,11 @@ begin:
 	sk_nulls_for_each_rcu(sk, node, &hslot->head) {
 		score = compute_score(sk, net, saddr, hnum, sport,
 				      daddr, dport, dif);
+		/* FIXME: disabled?
+		if (score == 9) {
+			result = sk;
+			break;
+		} else */
 		if (score > badness) {
 			result = sk;
 			badness = score;
@@ -307,6 +311,7 @@ begin:
 	if (get_nulls_value(node) != hash)
 		goto begin;
 
+
 	if (result) {
 		if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
 			result = NULL;
@@ -316,6 +321,7 @@ begin:
 			goto begin;
 		}
 	}
+
 	rcu_read_unlock();
 	return result;
 }
@@ -358,7 +364,7 @@ static inline struct sock *udp_v4_mcast_
 		    s->sk_hash != hnum					||
 		    (inet->daddr && inet->daddr != rmt_addr)		||
 		    (inet->dport != rmt_port && inet->dport)		||
-		    (inet->rcv_saddr && inet->rcv_saddr != loc_addr)	||
+		    !v4_sock_addr_match(sk->sk_nx_info, inet, loc_addr)	||
 		    ipv6_only_sock(s)					||
 		    (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
 			continue;
@@ -707,8 +713,13 @@ int udp_sendmsg(struct kiocb *iocb, stru
 					       { .sport = inet->sport,
 						 .dport = dport } } };
 		struct net *net = sock_net(sk);
+		struct nx_info *nxi = sk->sk_nx_info;
 
 		security_sk_classify_flow(sk, &fl);
+		err = ip_v4_find_src(net, nxi, &rt, &fl);
+		if (err)
+			goto out;
+
 		err = ip_route_output_flow(net, &rt, &fl, sk, 1);
 		if (err) {
 			if (err == -ENETUNREACH)
@@ -988,7 +999,8 @@ try_again:
 	if (sin) {
 		sin->sin_family = AF_INET;
 		sin->sin_port = udp_hdr(skb)->source;
-		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+		sin->sin_addr.s_addr = nx_map_sock_lback(
+			skb->sk->sk_nx_info, ip_hdr(skb)->saddr);
 		memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
 	}
 	if (inet->cmsg_flags)
@@ -1627,6 +1639,8 @@ static struct sock *udp_get_first(struct
 		sk_nulls_for_each(sk, node, &hslot->head) {
 			if (!net_eq(sock_net(sk), net))
 				continue;
+			if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+				continue;
 			if (sk->sk_family == state->family)
 				goto found;
 		}
@@ -1644,7 +1658,9 @@ static struct sock *udp_get_next(struct 
 
 	do {
 		sk = sk_nulls_next(sk);
-	} while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
+	} while (sk && (!net_eq(sock_net(sk), net) ||
+		sk->sk_family != state->family ||
+		!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)));
 
 	if (!sk) {
 		if (state->bucket < UDP_HTABLE_SIZE)
@@ -1751,7 +1767,10 @@ static void udp4_format_sock(struct sock
 
 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n",
-		bucket, src, srcp, dest, destp, sp->sk_state,
+		bucket,
+		nx_map_sock_lback(current_nx_info(), src), srcp,
+		nx_map_sock_lback(current_nx_info(), dest), destp,
+		sp->sk_state,
 		sk_wmem_alloc_get(sp),
 		sk_rmem_alloc_get(sp),
 		0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
diff -NurpP --minimal linux-2.6.32.17/net/ipv6/addrconf.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/addrconf.c
--- linux-2.6.32.17/net/ipv6/addrconf.c	2010-08-09 18:04:16.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/addrconf.c	2010-03-18 16:53:06.000000000 +0100
@@ -86,6 +86,8 @@
 
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#include <linux/vs_network.h>
+#include <linux/vs_inet6.h>
 
 /* Set to 3 to get tracing... */
 #define ACONF_DEBUG 2
@@ -1122,7 +1124,7 @@ out:
 
 int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
 		       const struct in6_addr *daddr, unsigned int prefs,
-		       struct in6_addr *saddr)
+		       struct in6_addr *saddr, struct nx_info *nxi)
 {
 	struct ipv6_saddr_score scores[2],
 				*score = &scores[0], *hiscore = &scores[1];
@@ -1195,6 +1197,8 @@ int ipv6_dev_get_saddr(struct net *net, 
 					       dev->name);
 				continue;
 			}
+			if (!v6_addr_in_nx_info(nxi, &score->ifa->addr, -1))
+				continue;
 
 			score->rule = -1;
 			bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
@@ -3003,7 +3007,10 @@ static void if6_seq_stop(struct seq_file
 static int if6_seq_show(struct seq_file *seq, void *v)
 {
 	struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
-	seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
+
+	if (nx_check(0, VS_ADMIN|VS_WATCH) ||
+	    v6_addr_in_nx_info(current_nx_info(), &ifp->addr, -1))
+		seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
 		   &ifp->addr,
 		   ifp->idev->dev->ifindex,
 		   ifp->prefix_len,
@@ -3500,6 +3507,12 @@ static int inet6_dump_addr(struct sk_buf
 	struct ifmcaddr6 *ifmca;
 	struct ifacaddr6 *ifaca;
 	struct net *net = sock_net(skb->sk);
+	struct nx_info *nxi = skb->sk ? skb->sk->sk_nx_info : NULL;
+
+	/* disable ipv6 on non v6 guests */
+	if (nxi && !nx_info_has_v6(nxi))
+		return skb->len;
+
 
 	s_idx = cb->args[0];
 	s_ip_idx = ip_idx = cb->args[1];
@@ -3521,6 +3534,8 @@ static int inet6_dump_addr(struct sk_buf
 			     ifa = ifa->if_next, ip_idx++) {
 				if (ip_idx < s_ip_idx)
 					continue;
+				if (!v6_addr_in_nx_info(nxi, &ifa->addr, -1))
+					continue;
 				err = inet6_fill_ifaddr(skb, ifa,
 							NETLINK_CB(cb->skb).pid,
 							cb->nlh->nlmsg_seq,
@@ -3534,6 +3549,8 @@ static int inet6_dump_addr(struct sk_buf
 			     ifmca = ifmca->next, ip_idx++) {
 				if (ip_idx < s_ip_idx)
 					continue;
+				if (!v6_addr_in_nx_info(nxi, &ifmca->mca_addr, -1))
+					continue;
 				err = inet6_fill_ifmcaddr(skb, ifmca,
 							  NETLINK_CB(cb->skb).pid,
 							  cb->nlh->nlmsg_seq,
@@ -3547,6 +3564,8 @@ static int inet6_dump_addr(struct sk_buf
 			     ifaca = ifaca->aca_next, ip_idx++) {
 				if (ip_idx < s_ip_idx)
 					continue;
+				if (!v6_addr_in_nx_info(nxi, &ifaca->aca_addr, -1))
+					continue;
 				err = inet6_fill_ifacaddr(skb, ifaca,
 							  NETLINK_CB(cb->skb).pid,
 							  cb->nlh->nlmsg_seq,
@@ -3833,12 +3852,19 @@ static int inet6_dump_ifinfo(struct sk_b
 	int s_idx = cb->args[0];
 	struct net_device *dev;
 	struct inet6_dev *idev;
+	struct nx_info *nxi = skb->sk ? skb->sk->sk_nx_info : NULL;
+
+	/* FIXME: maybe disable ipv6 on non v6 guests?
+	if (skb->sk && skb->sk->sk_vx_info)
+		return skb->len; */
 
 	read_lock(&dev_base_lock);
 	idx = 0;
 	for_each_netdev(net, dev) {
 		if (idx < s_idx)
 			goto cont;
+		if (!v6_dev_in_nx_info(dev, nxi))
+			goto cont;
 		if ((idev = in6_dev_get(dev)) == NULL)
 			goto cont;
 		err = inet6_fill_ifinfo(skb, idev, NETLINK_CB(cb->skb).pid,
diff -NurpP --minimal linux-2.6.32.17/net/ipv6/af_inet6.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/af_inet6.c
--- linux-2.6.32.17/net/ipv6/af_inet6.c	2009-12-03 20:03:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/af_inet6.c	2009-12-03 20:04:56.000000000 +0100
@@ -41,6 +41,8 @@
 #include <linux/netdevice.h>
 #include <linux/icmpv6.h>
 #include <linux/netfilter_ipv6.h>
+#include <linux/vs_inet.h>
+#include <linux/vs_inet6.h>
 
 #include <net/ip.h>
 #include <net/ipv6.h>
@@ -158,9 +160,12 @@ lookup_protocol:
 	}
 
 	err = -EPERM;
+	if ((protocol == IPPROTO_ICMPV6) &&
+		nx_capable(answer->capability, NXC_RAW_ICMP))
+		goto override;
 	if (answer->capability > 0 && !capable(answer->capability))
 		goto out_rcu_unlock;
-
+override:
 	sock->ops = answer->ops;
 	answer_prot = answer->prot;
 	answer_no_check = answer->no_check;
@@ -259,6 +264,7 @@ int inet6_bind(struct socket *sock, stru
 	struct inet_sock *inet = inet_sk(sk);
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct net *net = sock_net(sk);
+	struct nx_v6_sock_addr nsa;
 	__be32 v4addr = 0;
 	unsigned short snum;
 	int addr_type = 0;
@@ -270,6 +276,11 @@ int inet6_bind(struct socket *sock, stru
 
 	if (addr_len < SIN6_LEN_RFC2133)
 		return -EINVAL;
+
+	err = v6_map_sock_addr(inet, addr, &nsa);
+	if (err)
+		return err;
+
 	addr_type = ipv6_addr_type(&addr->sin6_addr);
 	if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM)
 		return -EINVAL;
@@ -301,6 +312,7 @@ int inet6_bind(struct socket *sock, stru
 		/* Reproduce AF_INET checks to make the bindings consitant */
 		v4addr = addr->sin6_addr.s6_addr32[3];
 		chk_addr_ret = inet_addr_type(net, v4addr);
+
 		if (!sysctl_ip_nonlocal_bind &&
 		    !(inet->freebind || inet->transparent) &&
 		    v4addr != htonl(INADDR_ANY) &&
@@ -310,6 +322,10 @@ int inet6_bind(struct socket *sock, stru
 			err = -EADDRNOTAVAIL;
 			goto out;
 		}
+		if (!v4_addr_in_nx_info(sk->sk_nx_info, v4addr, NXA_MASK_BIND)) {
+			err = -EADDRNOTAVAIL;
+			goto out;
+		}
 	} else {
 		if (addr_type != IPV6_ADDR_ANY) {
 			struct net_device *dev = NULL;
@@ -335,6 +351,11 @@ int inet6_bind(struct socket *sock, stru
 				}
 			}
 
+			if (!v6_addr_in_nx_info(sk->sk_nx_info, &addr->sin6_addr, -1)) {
+				err = -EADDRNOTAVAIL;
+				goto out;
+			}
+
 			/* ipv4 addr of the socket is invalid.  Only the
 			 * unspecified and mapped address have a v4 equivalent.
 			 */
@@ -353,6 +374,8 @@ int inet6_bind(struct socket *sock, stru
 		}
 	}
 
+	v6_set_sock_addr(inet, &nsa);
+
 	inet->rcv_saddr = v4addr;
 	inet->saddr = v4addr;
 
@@ -448,9 +471,11 @@ int inet6_getname(struct socket *sock, s
 			return -ENOTCONN;
 		sin->sin6_port = inet->dport;
 		ipv6_addr_copy(&sin->sin6_addr, &np->daddr);
+		/* FIXME: remap lback? */
 		if (np->sndflow)
 			sin->sin6_flowinfo = np->flow_label;
 	} else {
+		/* FIXME: remap lback? */
 		if (ipv6_addr_any(&np->rcv_saddr))
 			ipv6_addr_copy(&sin->sin6_addr, &np->saddr);
 		else
diff -NurpP --minimal linux-2.6.32.17/net/ipv6/fib6_rules.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/fib6_rules.c
--- linux-2.6.32.17/net/ipv6/fib6_rules.c	2009-09-10 15:26:30.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/fib6_rules.c	2009-12-03 20:04:56.000000000 +0100
@@ -96,7 +96,7 @@ static int fib6_rule_action(struct fib_r
 			if (ipv6_dev_get_saddr(net,
 					       ip6_dst_idev(&rt->u.dst)->dev,
 					       &flp->fl6_dst, srcprefs,
-					       &saddr))
+					       &saddr, NULL))
 				goto again;
 			if (!ipv6_prefix_equal(&saddr, &r->src.addr,
 					       r->src.plen))
diff -NurpP --minimal linux-2.6.32.17/net/ipv6/inet6_hashtables.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/inet6_hashtables.c
--- linux-2.6.32.17/net/ipv6/inet6_hashtables.c	2009-03-24 14:22:46.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/inet6_hashtables.c	2009-12-03 20:04:56.000000000 +0100
@@ -16,6 +16,7 @@
 
 #include <linux/module.h>
 #include <linux/random.h>
+#include <linux/vs_inet6.h>
 
 #include <net/inet_connection_sock.h>
 #include <net/inet_hashtables.h>
@@ -76,7 +77,6 @@ struct sock *__inet6_lookup_established(
 	unsigned int slot = hash & (hashinfo->ehash_size - 1);
 	struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
 
-
 	rcu_read_lock();
 begin:
 	sk_nulls_for_each_rcu(sk, node, &head->chain) {
@@ -88,7 +88,7 @@ begin:
 				sock_put(sk);
 				goto begin;
 			}
-		goto out;
+			goto out;
 		}
 	}
 	if (get_nulls_value(node) != slot)
@@ -134,6 +134,9 @@ static int inline compute_score(struct s
 			if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
 				return -1;
 			score++;
+		} else {
+			if (!v6_addr_in_nx_info(sk->sk_nx_info, daddr, -1))
+				return -1;
 		}
 		if (sk->sk_bound_dev_if) {
 			if (sk->sk_bound_dev_if != dif)
diff -NurpP --minimal linux-2.6.32.17/net/ipv6/ip6_output.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/ip6_output.c
--- linux-2.6.32.17/net/ipv6/ip6_output.c	2009-12-03 20:03:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/ip6_output.c	2009-12-03 20:04:56.000000000 +0100
@@ -934,7 +934,7 @@ static int ip6_dst_lookup_tail(struct so
 		err = ipv6_dev_get_saddr(net, ip6_dst_idev(*dst)->dev,
 					 &fl->fl6_dst,
 					 sk ? inet6_sk(sk)->srcprefs : 0,
-					 &fl->fl6_src);
+					 &fl->fl6_src, sk->sk_nx_info);
 		if (err)
 			goto out_err_release;
 	}
diff -NurpP --minimal linux-2.6.32.17/net/ipv6/Kconfig linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/Kconfig
--- linux-2.6.32.17/net/ipv6/Kconfig	2009-09-10 15:26:30.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/Kconfig	2009-12-03 20:04:56.000000000 +0100
@@ -4,8 +4,8 @@
 
 #   IPv6 as module will cause a CRASH if you try to unload it
 menuconfig IPV6
-	tristate "The IPv6 protocol"
-	default m
+	bool "The IPv6 protocol"
+	default n
 	---help---
 	  This is complemental support for the IP version 6.
 	  You will still be able to do traditional IPv4 networking as well.
diff -NurpP --minimal linux-2.6.32.17/net/ipv6/ndisc.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/ndisc.c
--- linux-2.6.32.17/net/ipv6/ndisc.c	2009-12-03 20:03:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/ndisc.c	2009-12-03 20:04:56.000000000 +0100
@@ -589,7 +589,7 @@ static void ndisc_send_na(struct net_dev
 	} else {
 		if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr,
 				       inet6_sk(dev_net(dev)->ipv6.ndisc_sk)->srcprefs,
-				       &tmpaddr))
+				       &tmpaddr, NULL /* FIXME: ? */ ))
 			return;
 		src_addr = &tmpaddr;
 	}
diff -NurpP --minimal linux-2.6.32.17/net/ipv6/raw.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/raw.c
--- linux-2.6.32.17/net/ipv6/raw.c	2009-12-03 20:03:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/raw.c	2009-12-03 20:04:56.000000000 +0100
@@ -29,6 +29,7 @@
 #include <linux/icmpv6.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv6.h>
+#include <linux/vs_inet6.h>
 #include <linux/skbuff.h>
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
@@ -281,6 +282,13 @@ static int rawv6_bind(struct sock *sk, s
 			}
 		}
 
+		if (!v6_addr_in_nx_info(sk->sk_nx_info, &addr->sin6_addr, -1)) {
+			err = -EADDRNOTAVAIL;
+			if (dev)
+				dev_put(dev);
+			goto out;
+		}
+
 		/* ipv4 addr of the socket is invalid.  Only the
 		 * unspecified and mapped address have a v4 equivalent.
 		 */
diff -NurpP --minimal linux-2.6.32.17/net/ipv6/route.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/route.c
--- linux-2.6.32.17/net/ipv6/route.c	2009-12-03 20:03:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/route.c	2009-12-03 20:04:56.000000000 +0100
@@ -2257,7 +2257,8 @@ static int rt6_fill_node(struct net *net
 		struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst);
 		struct in6_addr saddr_buf;
 		if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
-				       dst, 0, &saddr_buf) == 0)
+			dst, 0, &saddr_buf,
+			(skb->sk ? skb->sk->sk_nx_info : NULL)) == 0)
 			NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
 	}
 
diff -NurpP --minimal linux-2.6.32.17/net/ipv6/tcp_ipv6.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/tcp_ipv6.c
--- linux-2.6.32.17/net/ipv6/tcp_ipv6.c	2009-12-03 20:03:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/tcp_ipv6.c	2009-12-03 20:04:56.000000000 +0100
@@ -68,6 +68,7 @@
 
 #include <linux/crypto.h>
 #include <linux/scatterlist.h>
+#include <linux/vs_inet6.h>
 
 static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
 static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
@@ -156,8 +157,15 @@ static int tcp_v6_connect(struct sock *s
 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 	 */
 
-	if(ipv6_addr_any(&usin->sin6_addr))
-		usin->sin6_addr.s6_addr[15] = 0x1;
+	if(ipv6_addr_any(&usin->sin6_addr)) {
+		struct nx_info *nxi =  sk->sk_nx_info;
+
+		if (nxi && nx_info_has_v6(nxi))
+			/* FIXME: remap lback? */
+			usin->sin6_addr = nxi->v6.ip;
+		else
+			usin->sin6_addr.s6_addr[15] = 0x1;
+	}
 
 	addr_type = ipv6_addr_type(&usin->sin6_addr);
 
diff -NurpP --minimal linux-2.6.32.17/net/ipv6/udp.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/udp.c
--- linux-2.6.32.17/net/ipv6/udp.c	2009-12-03 20:03:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/udp.c	2009-12-03 20:04:56.000000000 +0100
@@ -47,6 +47,7 @@
 
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#include <linux/vs_inet6.h>
 #include "udp_impl.h"
 
 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
@@ -61,24 +62,49 @@ int ipv6_rcv_saddr_equal(const struct so
 	int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
 
 	/* if both are mapped, treat as IPv4 */
-	if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
-		return (!sk2_ipv6only &&
+	if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
+		if (!sk2_ipv6only &&
 			(!sk_rcv_saddr || !sk2_rcv_saddr ||
-			  sk_rcv_saddr == sk2_rcv_saddr));
+			  sk_rcv_saddr == sk2_rcv_saddr))
+			goto vs_v4;
+		else
+			return 0;
+	}
 
 	if (addr_type2 == IPV6_ADDR_ANY &&
 	    !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
-		return 1;
+		goto vs;
 
 	if (addr_type == IPV6_ADDR_ANY &&
 	    !(sk_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
-		return 1;
+		goto vs;
 
 	if (sk2_rcv_saddr6 &&
 	    ipv6_addr_equal(sk_rcv_saddr6, sk2_rcv_saddr6))
-		return 1;
+		goto vs;
 
 	return 0;
+
+vs_v4:
+	if (!sk_rcv_saddr && !sk2_rcv_saddr)
+		return nx_v4_addr_conflict(sk->sk_nx_info, sk2->sk_nx_info);
+	if (!sk2_rcv_saddr)
+		return v4_addr_in_nx_info(sk->sk_nx_info, sk2_rcv_saddr, -1);
+	if (!sk_rcv_saddr)
+		return v4_addr_in_nx_info(sk2->sk_nx_info, sk_rcv_saddr, -1);
+	return 1;
+vs:
+	if (addr_type2 == IPV6_ADDR_ANY && addr_type == IPV6_ADDR_ANY)
+		return nx_v6_addr_conflict(sk->sk_nx_info, sk2->sk_nx_info);
+	else if (addr_type2 == IPV6_ADDR_ANY)
+		return v6_addr_in_nx_info(sk2->sk_nx_info, sk_rcv_saddr6, -1);
+	else if (addr_type == IPV6_ADDR_ANY) {
+		if (addr_type2 == IPV6_ADDR_MAPPED)
+			return nx_v4_addr_conflict(sk->sk_nx_info, sk2->sk_nx_info);
+		else
+			return v6_addr_in_nx_info(sk->sk_nx_info, sk2_rcv_saddr6, -1);
+	}
+	return 1;
 }
 
 int udp_v6_get_port(struct sock *sk, unsigned short snum)
@@ -109,6 +135,10 @@ static inline int compute_score(struct s
 			if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
 				return -1;
 			score++;
+		} else {
+			/* block non nx_info ips */
+			if (!v6_addr_in_nx_info(sk->sk_nx_info, daddr, -1))
+				return -1;
 		}
 		if (!ipv6_addr_any(&np->daddr)) {
 			if (!ipv6_addr_equal(&np->daddr, saddr))
diff -NurpP --minimal linux-2.6.32.17/net/ipv6/xfrm6_policy.c linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/xfrm6_policy.c
--- linux-2.6.32.17/net/ipv6/xfrm6_policy.c	2009-12-03 20:03:00.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/ipv6/xfrm6_policy.c	2009-12-03 20:04:56.000000000 +0100
@@ -63,7 +63,7 @@ static int xfrm6_get_saddr(struct net *n
 	dev = ip6_dst_idev(dst)->dev;
 	ipv6_dev_get_saddr(dev_net(dev), dev,
 			   (struct in6_addr *)&daddr->a6, 0,
-			   (struct in6_addr *)&saddr->a6);
+			   (struct in6_addr *)&saddr->a6, NULL);
 	dst_release(dst);
 	return 0;
 }
diff -NurpP --minimal linux-2.6.32.17/net/netlink/af_netlink.c linux-2.6.32.17-vs2.3.0.36.29.4/net/netlink/af_netlink.c
--- linux-2.6.32.17/net/netlink/af_netlink.c	2009-12-03 20:03:01.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/netlink/af_netlink.c	2009-12-03 20:04:56.000000000 +0100
@@ -55,6 +55,9 @@
 #include <linux/types.h>
 #include <linux/audit.h>
 #include <linux/mutex.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vs_limit.h>
 
 #include <net/net_namespace.h>
 #include <net/sock.h>
@@ -1899,6 +1902,8 @@ static struct sock *netlink_seq_socket_i
 			sk_for_each(s, node, &hash->table[j]) {
 				if (sock_net(s) != seq_file_net(seq))
 					continue;
+				if (!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT))
+					continue;
 				if (off == pos) {
 					iter->link = i;
 					iter->hash_idx = j;
@@ -1933,7 +1938,8 @@ static void *netlink_seq_next(struct seq
 	s = v;
 	do {
 		s = sk_next(s);
-	} while (s && sock_net(s) != seq_file_net(seq));
+	} while (s && (sock_net(s) != seq_file_net(seq) ||
+		!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT)));
 	if (s)
 		return s;
 
@@ -1945,7 +1951,8 @@ static void *netlink_seq_next(struct seq
 
 		for (; j <= hash->mask; j++) {
 			s = sk_head(&hash->table[j]);
-			while (s && sock_net(s) != seq_file_net(seq))
+			while (s && (sock_net(s) != seq_file_net(seq) ||
+				!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT)))
 				s = sk_next(s);
 			if (s) {
 				iter->link = i;
diff -NurpP --minimal linux-2.6.32.17/net/sctp/ipv6.c linux-2.6.32.17-vs2.3.0.36.29.4/net/sctp/ipv6.c
--- linux-2.6.32.17/net/sctp/ipv6.c	2009-12-03 20:03:01.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/sctp/ipv6.c	2009-12-03 20:04:56.000000000 +0100
@@ -316,7 +316,8 @@ static void sctp_v6_get_saddr(struct sct
 				   dst ? ip6_dst_idev(dst)->dev : NULL,
 				   &daddr->v6.sin6_addr,
 				   inet6_sk(&sk->inet.sk)->srcprefs,
-				   &saddr->v6.sin6_addr);
+				   &saddr->v6.sin6_addr,
+				   asoc->base.sk->sk_nx_info);
 		SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: %pI6\n",
 				  &saddr->v6.sin6_addr);
 		return;
diff -NurpP --minimal linux-2.6.32.17/net/socket.c linux-2.6.32.17-vs2.3.0.36.29.4/net/socket.c
--- linux-2.6.32.17/net/socket.c	2009-12-03 20:03:01.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/socket.c	2009-12-03 20:04:56.000000000 +0100
@@ -96,6 +96,10 @@
 
 #include <net/sock.h>
 #include <linux/netfilter.h>
+#include <linux/vs_base.h>
+#include <linux/vs_socket.h>
+#include <linux/vs_inet.h>
+#include <linux/vs_inet6.h>
 
 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
@@ -559,7 +563,7 @@ static inline int __sock_sendmsg(struct 
 				 struct msghdr *msg, size_t size)
 {
 	struct sock_iocb *si = kiocb_to_siocb(iocb);
-	int err;
+	int err, len;
 
 	si->sock = sock;
 	si->scm = NULL;
@@ -570,7 +574,22 @@ static inline int __sock_sendmsg(struct 
 	if (err)
 		return err;
 
-	return sock->ops->sendmsg(iocb, sock, msg, size);
+	len = sock->ops->sendmsg(iocb, sock, msg, size);
+	if (sock->sk) {
+		if (len == size)
+			vx_sock_send(sock->sk, size);
+		else
+			vx_sock_fail(sock->sk, size);
+	}
+	vxdprintk(VXD_CBIT(net, 7),
+		"__sock_sendmsg: %p[%p,%p,%p;%d/%d]:%d/%d",
+		sock, sock->sk,
+		(sock->sk)?sock->sk->sk_nx_info:0,
+		(sock->sk)?sock->sk->sk_vx_info:0,
+		(sock->sk)?sock->sk->sk_xid:0,
+		(sock->sk)?sock->sk->sk_nid:0,
+		(unsigned int)size, len);
+	return len;
 }
 
 int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
@@ -671,7 +690,7 @@ EXPORT_SYMBOL_GPL(__sock_recv_timestamp)
 static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock,
 				 struct msghdr *msg, size_t size, int flags)
 {
-	int err;
+	int err, len;
 	struct sock_iocb *si = kiocb_to_siocb(iocb);
 
 	si->sock = sock;
@@ -684,7 +703,18 @@ static inline int __sock_recvmsg(struct 
 	if (err)
 		return err;
 
-	return sock->ops->recvmsg(iocb, sock, msg, size, flags);
+	len = sock->ops->recvmsg(iocb, sock, msg, size, flags);
+	if ((len >= 0) && sock->sk)
+		vx_sock_recv(sock->sk, len);
+	vxdprintk(VXD_CBIT(net, 7),
+		"__sock_recvmsg: %p[%p,%p,%p;%d/%d]:%d/%d",
+		sock, sock->sk,
+		(sock->sk)?sock->sk->sk_nx_info:0,
+		(sock->sk)?sock->sk->sk_vx_info:0,
+		(sock->sk)?sock->sk->sk_xid:0,
+		(sock->sk)?sock->sk->sk_nid:0,
+		(unsigned int)size, len);
+	return len;
 }
 
 int sock_recvmsg(struct socket *sock, struct msghdr *msg,
@@ -1155,6 +1185,13 @@ static int __sock_create(struct net *net
 	if (type < 0 || type >= SOCK_MAX)
 		return -EINVAL;
 
+	if (!nx_check(0, VS_ADMIN)) {
+		if (family == PF_INET && !current_nx_info_has_v4())
+			return -EAFNOSUPPORT;
+		if (family == PF_INET6 && !current_nx_info_has_v6())
+			return -EAFNOSUPPORT;
+	}
+
 	/* Compatibility.
 
 	   This uglymoron is moved from INET layer to here to avoid
@@ -1287,6 +1324,7 @@ SYSCALL_DEFINE3(socket, int, family, int
 	if (retval < 0)
 		goto out;
 
+	set_bit(SOCK_USER_SOCKET, &sock->flags);
 	retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
 	if (retval < 0)
 		goto out_release;
@@ -1328,10 +1366,12 @@ SYSCALL_DEFINE4(socketpair, int, family,
 	err = sock_create(family, type, protocol, &sock1);
 	if (err < 0)
 		goto out;
+	set_bit(SOCK_USER_SOCKET, &sock1->flags);
 
 	err = sock_create(family, type, protocol, &sock2);
 	if (err < 0)
 		goto out_release_1;
+	set_bit(SOCK_USER_SOCKET, &sock2->flags);
 
 	err = sock1->ops->socketpair(sock1, sock2);
 	if (err < 0)
diff -NurpP --minimal linux-2.6.32.17/net/sunrpc/auth.c linux-2.6.32.17-vs2.3.0.36.29.4/net/sunrpc/auth.c
--- linux-2.6.32.17/net/sunrpc/auth.c	2009-12-03 20:03:01.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/sunrpc/auth.c	2009-12-03 20:04:56.000000000 +0100
@@ -14,6 +14,7 @@
 #include <linux/hash.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/spinlock.h>
+#include <linux/vs_tag.h>
 
 #ifdef RPC_DEBUG
 # define RPCDBG_FACILITY	RPCDBG_AUTH
@@ -360,6 +361,7 @@ rpcauth_lookupcred(struct rpc_auth *auth
 	memset(&acred, 0, sizeof(acred));
 	acred.uid = cred->fsuid;
 	acred.gid = cred->fsgid;
+	acred.tag = dx_current_tag();
 	acred.group_info = get_group_info(((struct cred *)cred)->group_info);
 
 	ret = auth->au_ops->lookup_cred(auth, &acred, flags);
@@ -400,6 +402,7 @@ rpcauth_bind_root_cred(struct rpc_task *
 	struct auth_cred acred = {
 		.uid = 0,
 		.gid = 0,
+		.tag = dx_current_tag(),
 	};
 	struct rpc_cred *ret;
 
diff -NurpP --minimal linux-2.6.32.17/net/sunrpc/auth_unix.c linux-2.6.32.17-vs2.3.0.36.29.4/net/sunrpc/auth_unix.c
--- linux-2.6.32.17/net/sunrpc/auth_unix.c	2008-12-25 00:26:37.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/sunrpc/auth_unix.c	2009-12-03 20:04:56.000000000 +0100
@@ -11,12 +11,14 @@
 #include <linux/module.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/auth.h>
+#include <linux/vs_tag.h>
 
 #define NFS_NGROUPS	16
 
 struct unx_cred {
 	struct rpc_cred		uc_base;
 	gid_t			uc_gid;
+	tag_t			uc_tag;
 	gid_t			uc_gids[NFS_NGROUPS];
 };
 #define uc_uid			uc_base.cr_uid
@@ -78,6 +80,7 @@ unx_create_cred(struct rpc_auth *auth, s
 		groups = NFS_NGROUPS;
 
 	cred->uc_gid = acred->gid;
+	cred->uc_tag = acred->tag;
 	for (i = 0; i < groups; i++)
 		cred->uc_gids[i] = GROUP_AT(acred->group_info, i);
 	if (i < NFS_NGROUPS)
@@ -119,7 +122,9 @@ unx_match(struct auth_cred *acred, struc
 	unsigned int i;
 
 
-	if (cred->uc_uid != acred->uid || cred->uc_gid != acred->gid)
+	if (cred->uc_uid != acred->uid ||
+		cred->uc_gid != acred->gid ||
+		cred->uc_tag != acred->tag)
 		return 0;
 
 	if (acred->group_info != NULL)
@@ -142,7 +147,7 @@ unx_marshal(struct rpc_task *task, __be3
 	struct rpc_clnt	*clnt = task->tk_client;
 	struct unx_cred	*cred = container_of(task->tk_msg.rpc_cred, struct unx_cred, uc_base);
 	__be32		*base, *hold;
-	int		i;
+	int		i, tag;
 
 	*p++ = htonl(RPC_AUTH_UNIX);
 	base = p++;
@@ -152,9 +157,12 @@ unx_marshal(struct rpc_task *task, __be3
 	 * Copy the UTS nodename captured when the client was created.
 	 */
 	p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen);
+	tag = task->tk_client->cl_tag;
 
-	*p++ = htonl((u32) cred->uc_uid);
-	*p++ = htonl((u32) cred->uc_gid);
+	*p++ = htonl((u32) TAGINO_UID(tag,
+		cred->uc_uid, cred->uc_tag));
+	*p++ = htonl((u32) TAGINO_GID(tag,
+		cred->uc_gid, cred->uc_tag));
 	hold = p++;
 	for (i = 0; i < 16 && cred->uc_gids[i] != (gid_t) NOGROUP; i++)
 		*p++ = htonl((u32) cred->uc_gids[i]);
diff -NurpP --minimal linux-2.6.32.17/net/sunrpc/clnt.c linux-2.6.32.17-vs2.3.0.36.29.4/net/sunrpc/clnt.c
--- linux-2.6.32.17/net/sunrpc/clnt.c	2009-12-03 20:03:01.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/sunrpc/clnt.c	2009-12-03 20:04:56.000000000 +0100
@@ -33,6 +33,7 @@
 #include <linux/utsname.h>
 #include <linux/workqueue.h>
 #include <linux/in6.h>
+#include <linux/vs_cvirt.h>
 
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/rpc_pipe_fs.h>
@@ -358,6 +359,9 @@ struct rpc_clnt *rpc_create(struct rpc_c
 	if (!(args->flags & RPC_CLNT_CREATE_QUIET))
 		clnt->cl_chatty = 1;
 
+	/* TODO: handle RPC_CLNT_CREATE_TAGGED
+	if (args->flags & RPC_CLNT_CREATE_TAGGED)
+		clnt->cl_tag = 1; */
 	return clnt;
 }
 EXPORT_SYMBOL_GPL(rpc_create);
diff -NurpP --minimal linux-2.6.32.17/net/unix/af_unix.c linux-2.6.32.17-vs2.3.0.36.29.4/net/unix/af_unix.c
--- linux-2.6.32.17/net/unix/af_unix.c	2009-12-03 20:03:01.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/unix/af_unix.c	2009-12-03 20:04:56.000000000 +0100
@@ -114,6 +114,8 @@
 #include <linux/mount.h>
 #include <net/checksum.h>
 #include <linux/security.h>
+#include <linux/vs_context.h>
+#include <linux/vs_limit.h>
 
 static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
 static DEFINE_SPINLOCK(unix_table_lock);
@@ -258,6 +260,8 @@ static struct sock *__unix_find_socket_b
 		if (!net_eq(sock_net(s), net))
 			continue;
 
+		if (!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT))
+			continue;
 		if (u->addr->len == len &&
 		    !memcmp(u->addr->name, sunname, len))
 			goto found;
@@ -2114,6 +2118,8 @@ static struct sock *unix_seq_idx(struct 
 	for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
 		if (sock_net(s) != seq_file_net(seq))
 			continue;
+		if (!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT))
+			continue;
 		if (off == pos)
 			return s;
 		++off;
@@ -2138,7 +2144,8 @@ static void *unix_seq_next(struct seq_fi
 		sk = first_unix_socket(&iter->i);
 	else
 		sk = next_unix_socket(&iter->i, sk);
-	while (sk && (sock_net(sk) != seq_file_net(seq)))
+	while (sk && (sock_net(sk) != seq_file_net(seq) ||
+		!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)))
 		sk = next_unix_socket(&iter->i, sk);
 	return sk;
 }
diff -NurpP --minimal linux-2.6.32.17/net/x25/af_x25.c linux-2.6.32.17-vs2.3.0.36.29.4/net/x25/af_x25.c
--- linux-2.6.32.17/net/x25/af_x25.c	2009-12-03 20:03:01.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/net/x25/af_x25.c	2009-12-03 20:04:56.000000000 +0100
@@ -519,7 +519,10 @@ static int x25_create(struct net *net, s
 
 	x25 = x25_sk(sk);
 
-	sock_init_data(sock, sk);
+	sk->sk_socket = sock;
+	sk->sk_type = sock->type;
+	sk->sk_sleep = &sock->wait;
+	sock->sk = sk;
 
 	x25_init_timers(sk);
 
diff -NurpP --minimal linux-2.6.32.17/scripts/checksyscalls.sh linux-2.6.32.17-vs2.3.0.36.29.4/scripts/checksyscalls.sh
--- linux-2.6.32.17/scripts/checksyscalls.sh	2009-09-10 15:26:31.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/scripts/checksyscalls.sh	2009-12-03 20:04:56.000000000 +0100
@@ -194,7 +194,6 @@ cat << EOF
 #define __IGNORE_afs_syscall
 #define __IGNORE_getpmsg
 #define __IGNORE_putpmsg
-#define __IGNORE_vserver
 EOF
 }
 
diff -NurpP --minimal linux-2.6.32.17/security/commoncap.c linux-2.6.32.17-vs2.3.0.36.29.4/security/commoncap.c
--- linux-2.6.32.17/security/commoncap.c	2009-12-03 20:03:02.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/security/commoncap.c	2010-03-13 21:50:18.000000000 +0100
@@ -27,6 +27,7 @@
 #include <linux/sched.h>
 #include <linux/prctl.h>
 #include <linux/securebits.h>
+#include <linux/vs_context.h>
 
 /*
  * If a non-root user executes a setuid-root binary in
@@ -52,7 +53,7 @@ static void warn_setuid_and_fcaps_mixed(
 
 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
 {
-	NETLINK_CB(skb).eff_cap = current_cap();
+	NETLINK_CB(skb).eff_cap = vx_mbcaps(current_cap());
 	return 0;
 }
 
@@ -62,6 +63,7 @@ int cap_netlink_recv(struct sk_buff *skb
 		return -EPERM;
 	return 0;
 }
+
 EXPORT_SYMBOL(cap_netlink_recv);
 
 /**
@@ -82,7 +84,22 @@ EXPORT_SYMBOL(cap_netlink_recv);
 int cap_capable(struct task_struct *tsk, const struct cred *cred, int cap,
 		int audit)
 {
-	return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM;
+	struct vx_info *vxi = tsk->vx_info;
+
+#if 0
+	printk("cap_capable() VXF_STATE_SETUP = %llx, raised = %x, eff = %08x:%08x\n",
+		vx_info_flags(vxi, VXF_STATE_SETUP, 0),
+		cap_raised(tsk->cap_effective, cap),
+		tsk->cap_effective.cap[1], tsk->cap_effective.cap[0]);
+#endif
+
+	/* special case SETUP */
+	if (vx_info_flags(vxi, VXF_STATE_SETUP, 0) &&
+		/* FIXME: maybe use cred instead? */
+		cap_raised(tsk->cred->cap_effective, cap))
+		return 0;
+
+	return vx_cap_raised(vxi, cred->cap_effective, cap) ? 0 : -EPERM;
 }
 
 /**
@@ -618,7 +635,7 @@ int cap_inode_setxattr(struct dentry *de
 
 	if (!strncmp(name, XATTR_SECURITY_PREFIX,
 		     sizeof(XATTR_SECURITY_PREFIX) - 1)  &&
-	    !capable(CAP_SYS_ADMIN))
+		!vx_capable(CAP_SYS_ADMIN, VXC_FS_SECURITY))
 		return -EPERM;
 	return 0;
 }
@@ -644,7 +661,7 @@ int cap_inode_removexattr(struct dentry 
 
 	if (!strncmp(name, XATTR_SECURITY_PREFIX,
 		     sizeof(XATTR_SECURITY_PREFIX) - 1)  &&
-	    !capable(CAP_SYS_ADMIN))
+		!vx_capable(CAP_SYS_ADMIN, VXC_FS_SECURITY))
 		return -EPERM;
 	return 0;
 }
@@ -962,7 +979,8 @@ error:
  */
 int cap_syslog(int type)
 {
-	if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN))
+	if ((type != 3 && type != 10) &&
+		!vx_capable(CAP_SYS_ADMIN, VXC_SYSLOG))
 		return -EPERM;
 	return 0;
 }
diff -NurpP --minimal linux-2.6.32.17/security/selinux/hooks.c linux-2.6.32.17-vs2.3.0.36.29.4/security/selinux/hooks.c
--- linux-2.6.32.17/security/selinux/hooks.c	2010-08-09 18:04:16.000000000 +0200
+++ linux-2.6.32.17-vs2.3.0.36.29.4/security/selinux/hooks.c	2010-02-12 10:59:55.000000000 +0100
@@ -64,7 +64,6 @@
 #include <linux/dccp.h>
 #include <linux/quota.h>
 #include <linux/un.h>		/* for Unix socket types */
-#include <net/af_unix.h>	/* for Unix socket types */
 #include <linux/parser.h>
 #include <linux/nfs_mount.h>
 #include <net/ipv6.h>
diff -NurpP --minimal linux-2.6.32.17/security/selinux/include/av_permissions.h linux-2.6.32.17-vs2.3.0.36.29.4/security/selinux/include/av_permissions.h
--- linux-2.6.32.17/security/selinux/include/av_permissions.h	2009-12-03 20:03:02.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/security/selinux/include/av_permissions.h	2009-12-03 20:04:56.000000000 +0100
@@ -565,6 +565,7 @@
 #define CAPABILITY__SETFCAP                       0x80000000UL
 #define CAPABILITY2__MAC_OVERRIDE                 0x00000001UL
 #define CAPABILITY2__MAC_ADMIN                    0x00000002UL
+#define CAPABILITY2__CONTEXT                      0x00000004UL
 #define NETLINK_ROUTE_SOCKET__IOCTL               0x00000001UL
 #define NETLINK_ROUTE_SOCKET__READ                0x00000002UL
 #define NETLINK_ROUTE_SOCKET__WRITE               0x00000004UL
diff -NurpP --minimal linux-2.6.32.17/security/selinux/include/av_perm_to_string.h linux-2.6.32.17-vs2.3.0.36.29.4/security/selinux/include/av_perm_to_string.h
--- linux-2.6.32.17/security/selinux/include/av_perm_to_string.h	2009-12-03 20:03:02.000000000 +0100
+++ linux-2.6.32.17-vs2.3.0.36.29.4/security/selinux/include/av_perm_to_string.h	2009-12-03 20:04:56.000000000 +0100
@@ -142,6 +142,7 @@
    S_(SECCLASS_CAPABILITY, CAPABILITY__SETFCAP, "setfcap")
    S_(SECCLASS_CAPABILITY2, CAPABILITY2__MAC_OVERRIDE, "mac_override")
    S_(SECCLASS_CAPABILITY2, CAPABILITY2__MAC_ADMIN, "mac_admin")
+   S_(SECCLASS_CAPABILITY2, CAPABILITY2__CONTEXT, "context")
    S_(SECCLASS_NETLINK_ROUTE_SOCKET, NETLINK_ROUTE_SOCKET__NLMSG_READ, "nlmsg_read")
    S_(SECCLASS_NETLINK_ROUTE_SOCKET, NETLINK_ROUTE_SOCKET__NLMSG_WRITE, "nlmsg_write")
    S_(SECCLASS_NETLINK_FIREWALL_SOCKET, NETLINK_FIREWALL_SOCKET__NLMSG_READ, "nlmsg_read")
