--- linux-2.6.16-rc1/arch/ia64/ia32/binfmt_elf32.c 2006-01-03 17:29:09 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/arch/ia64/ia32/binfmt_elf32.c 2006-01-21 18:28:13 +0100 @@ -199,7 +199,7 @@ ia64_elf32_init (struct pt_regs *regs) int ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack) { - unsigned long stack_base; + unsigned long stack_base, grow; struct vm_area_struct *mpnt; struct mm_struct *mm = current->mm; int i, ret; --- linux-2.6.16-rc1/arch/ia64/ia32/binfmt_elf32.c 2006-01-03 17:29:09 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/arch/ia64/ia32/binfmt_elf32.c 2006-01-21 18:28:13 +0100 @@ -236,7 +236,8 @@ ia32_setup_arg_pages (struct linux_binpr kmem_cache_free(vm_area_cachep, mpnt); return ret; } - current->mm->stack_vm = current->mm->total_vm = vma_pages(mpnt); + vx_vmpages_sub(current->mm, current->mm->total_vm - vma_pages(mpnt)); + current->mm->stack_vm = current->mm->total_vm; } for (i = 0 ; i < MAX_ARG_PAGES ; i++) { --- linux-2.6.16-rc1/arch/ia64/kernel/perfmon.c 2006-01-26 22:34:43 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/arch/ia64/kernel/perfmon.c 2006-01-21 18:28:13 +0100 @@ -2354,7 +2356,7 @@ pfm_smpl_buffer_alloc(struct task_struct */ insert_vm_struct(mm, vma); - mm->total_vm += size >> PAGE_SHIFT; + vx_vmpages_add(mm, size >> PAGE_SHIFT); vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, vma_pages(vma)); up_write(&task->mm->mmap_sem); --- linux-2.6.16-rc1/arch/powerpc/kernel/vdso.c 2006-01-03 17:29:13 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/arch/powerpc/kernel/vdso.c 2006-01-21 18:28:13 +0100 @@ -294,7 +295,7 @@ int arch_setup_additional_pages(struct l kmem_cache_free(vm_area_cachep, vma); return -ENOMEM; } - mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + vx_vmpages_add(mm, (vma->vm_end - vma->vm_start) >> PAGE_SHIFT); up_write(&mm->mmap_sem); return 0; --- linux-2.6.16-rc1/arch/x86_64/ia32/ia32_binfmt.c 2006-01-26 22:34:50 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/arch/x86_64/ia32/ia32_binfmt.c 2006-01-22 06:32:41 +0100 @@ -371,7 +371,8 @@ int ia32_setup_arg_pages(struct linux_bi kmem_cache_free(vm_area_cachep, mpnt); return ret; } - mm->stack_vm = mm->total_vm = vma_pages(mpnt); + vx_vmpages_sub(mm, mm->total_vm - vma_pages(mpnt)); + mm->stack_vm = mm->total_vm; } for (i = 0 ; i < MAX_ARG_PAGES ; i++) { --- linux-2.6.16-rc1/fs/exec.c 2006-01-26 22:35:10 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/fs/exec.c 2006-01-21 18:28:13 +0100 @@ -436,7 +437,8 @@ int setup_arg_pages(struct linux_binprm kmem_cache_free(vm_area_cachep, mpnt); return ret; } - mm->stack_vm = mm->total_vm = vma_pages(mpnt); + vx_vmpages_sub(mm, mm->total_vm - vma_pages(mpnt)); + mm->stack_vm = mm->total_vm; } for (i = 0 ; i < MAX_ARG_PAGES ; i++) { --- linux-2.6.16-rc1/include/linux/sched.h 2006-01-26 22:35:20 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/include/linux/sched.h 2006-01-29 02:45:26 +0100 @@ -257,12 +260,11 @@ extern void arch_unmap_area_topdown(stru * The mm counters are not protected by its page_table_lock, * so must be incremented atomically. */ -#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value) -#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member)) -#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member) -#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) -#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) typedef atomic_long_t mm_counter_t; +#define __set_mm_counter(mm, member, value) \ + atomic_long_set(&(mm)->_##member, value) +#define get_mm_counter(mm, member) \ + ((unsigned long)atomic_long_read(&(mm)->_##member)) #else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ /* --- linux-2.6.16-rc1/include/linux/sched.h 2006-01-26 22:35:20 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/include/linux/sched.h 2006-01-29 02:45:26 +0100 @@ -269,12 +271,9 @@ ***** * The mm counters are protected by its page_table_lock, * so can be incremented directly. */ -#define set_mm_counter(mm, member, value) (mm)->_##member = (value) -#define get_mm_counter(mm, member) ((mm)->_##member) -#define add_mm_counter(mm, member, value) (mm)->_##member += (value) -#define inc_mm_counter(mm, member) (mm)->_##member++ -#define dec_mm_counter(mm, member) (mm)->_##member-- typedef unsigned long mm_counter_t; +#define __set_mm_counter(mm, member, value) (mm)->_##member = (value) +#define get_mm_counter(mm, member) ((mm)->_##member) #endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ --- linux-2.6.16-rc1/include/linux/sched.h 2006-01-26 22:35:20 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/include/linux/sched.h 2006-01-29 02:45:26 +0100 @@ -278,6 +277,13 @@ ***** #endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ +#define set_mm_counter(mm, member, value) \ + vx_ ## member ## pages_sub((mm), (get_mm_counter(mm, member) - value)) +#define add_mm_counter(mm, member, value) \ + vx_ ## member ## pages_add((mm), (value)) +#define inc_mm_counter(mm, member) vx_ ## member ## pages_inc((mm)) +#define dec_mm_counter(mm, member) vx_ ## member ## pages_dec((mm)) + #define get_mm_rss(mm) \ (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss)) #define update_hiwater_rss(mm) do { \ --- linux-2.6.16-rc1/include/linux/sched.h 2006-01-26 22:35:20 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/include/linux/sched.h 2006-01-29 02:45:26 +0100 @@ -335,6 +341,7 @@ struct mm_struct { /* Architecture-specific MM context */ mm_context_t context; + struct vx_info *mm_vx_info; /* Token based thrashing protection. */ unsigned long swap_token_time; --- linux-2.6.16-rc1/kernel/fork.c 2006-01-26 22:35:32 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/kernel/fork.c 2006-01-21 18:28:17 +0100 @@ -201,6 +206,8 @@ static inline int dup_mmap(struct mm_str mm->free_area_cache = oldmm->mmap_base; mm->cached_hole_size = ~0UL; mm->map_count = 0; + __set_mm_counter(mm, file_rss, 0); + __set_mm_counter(mm, anon_rss, 0); cpus_clear(mm->cpu_vm_mask); mm->mm_rb = RB_ROOT; rb_link = &mm->mm_rb.rb_node; --- linux-2.6.16-rc1/kernel/fork.c 2006-01-26 22:35:32 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/kernel/fork.c 2006-01-21 18:28:17 +0100 @@ -212,7 +219,8 @@ static inline int dup_mmap(struct mm_str if (mpnt->vm_flags & VM_DONTCOPY) { long pages = vma_pages(mpnt); - mm->total_vm -= pages; + vx_vmpages_sub(mm, pages); + // mm->total_vm -= pages; vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, -pages); continue; --- linux-2.6.16-rc1/kernel/fork.c 2006-01-26 22:35:32 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/kernel/fork.c 2006-01-21 18:28:17 +0100 @@ -319,8 +327,6 @@ static struct mm_struct * mm_init(struct INIT_LIST_HEAD(&mm->mmlist); mm->core_waiters = 0; mm->nr_ptes = 0; - set_mm_counter(mm, file_rss, 0); - set_mm_counter(mm, anon_rss, 0); spin_lock_init(&mm->page_table_lock); rwlock_init(&mm->ioctx_list_lock); mm->ioctx_list = NULL; --- linux-2.6.16-rc1/kernel/fork.c 2006-01-26 22:35:32 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/kernel/fork.c 2006-01-21 18:28:17 +0100 @@ -329,6 +335,7 @@ static struct mm_struct * mm_init(struct if (likely(!mm_alloc_pgd(mm))) { mm->def_flags = 0; + set_vx_info(&mm->mm_vx_info, current->vx_info); return mm; } free_mm(mm); --- linux-2.6.16-rc1/kernel/fork.c 2006-01-26 22:35:32 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/kernel/fork.c 2006-01-21 18:28:17 +0100 @@ -360,6 +367,7 @@ void fastcall __mmdrop(struct mm_struct BUG_ON(mm == &init_mm); mm_free_pgd(mm); destroy_context(mm); + clr_vx_info(&mm->mm_vx_info); free_mm(mm); } --- linux-2.6.16-rc1/kernel/fork.c 2006-01-26 22:35:32 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/kernel/fork.c 2006-01-21 18:28:17 +0100 @@ -479,6 +487,7 @@ static int copy_mm(unsigned long clone_f /* Copy the current MM stuff.. */ memcpy(mm, oldmm, sizeof(*mm)); + mm->mm_vx_info = NULL; if (!mm_init(mm)) goto fail_nomem; --- linux-2.6.16-rc1/kernel/fork.c 2006-01-26 22:35:32 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/kernel/fork.c 2006-01-21 18:28:17 +0100 @@ -507,6 +516,7 @@ fail_nocontext: * If init_new_context() failed, we cannot use mmput() to free the mm * because it calls destroy_context() */ + clr_vx_info(&mm->mm_vx_info); mm_free_pgd(mm); free_mm(mm); return retval; --- linux-2.6.16-rc1/kernel/fork.c 2006-01-26 22:35:32 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/kernel/fork.c 2006-01-21 18:28:17 +0100 @@ -903,7 +917,25 @@ static task_t *copy_process(unsigned lon if (!p) goto fork_out; + init_vx_info(&p->vx_info, current->vx_info); + init_nx_info(&p->nx_info, current->nx_info); + + /* check vserver memory */ + if (p->mm && !(clone_flags & CLONE_VM)) { + if (vx_vmpages_avail(p->mm, p->mm->total_vm)) + vx_pages_add(p->mm->mm_vx_info, RLIMIT_AS, p->mm->total_vm); + else + goto bad_fork_free; + } + if (p->mm && vx_flags(VXF_FORK_RSS, 0)) { + if (!vx_rsspages_avail(p->mm, get_mm_counter(p->mm, file_rss))) + goto bad_fork_cleanup_vm; + } + retval = -EAGAIN; + if (!vx_nproc_avail(1)) + goto bad_fork_cleanup_vm; + if (atomic_read(&p->user->processes) >= p->signal->rlim[RLIMIT_NPROC].rlim_cur) { if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && --- linux-2.6.16-rc1/kernel/fork.c 2006-01-26 22:35:32 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/kernel/fork.c 2006-01-21 18:28:17 +0100 @@ -908,7 +940,7 @@ ***** p->signal->rlim[RLIMIT_NPROC].rlim_cur) { if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && p->user != &root_user) - goto bad_fork_free; + goto bad_fork_cleanup_vm; } atomic_inc(&p->user->__count); --- linux-2.6.16-rc1/kernel/fork.c 2006-01-26 22:35:32 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/kernel/fork.c 2006-01-21 18:28:17 +0100 @@ -1189,6 +1233,9 @@ bad_fork_cleanup_count: put_group_info(p->group_info); atomic_dec(&p->user->processes); free_uid(p->user); +bad_fork_cleanup_vm: + if (p->mm && !(clone_flags & CLONE_VM)) + vx_pages_sub(p->mm->mm_vx_info, RLIMIT_AS, p->mm->total_vm); bad_fork_free: free_task(p); fork_out: --- linux-2.6.16-rc1/mm/fremap.c 2006-01-03 17:30:13 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/fremap.c 2006-01-21 18:28:13 +0100 @@ -35,6 +36,7 @@ static int zap_pte(struct mm_struct *mm, set_page_dirty(page); page_remove_rmap(page); page_cache_release(page); + // dec_mm_counter(mm, file_rss); } } else { if (!pte_file(pte)) --- linux-2.6.16-rc1/mm/fremap.c 2006-01-03 17:30:13 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/fremap.c 2006-01-21 18:28:13 +0100 @@ -74,6 +76,8 @@ int install_page(struct mm_struct *mm, s err = -ENOMEM; if (page_mapcount(page) > INT_MAX/2) goto unlock; + if (!vx_rsspages_avail(mm, 1)) + goto unlock; if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte)) inc_mm_counter(mm, file_rss); --- linux-2.6.16-rc1/mm/memory.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/memory.c 2006-01-21 18:28:13 +0100 @@ -1892,6 +1892,10 @@ static int do_swap_page(struct mm_struct grab_swap_token(); } + if (!vx_rsspages_avail(mm, 1)) { + ret = VM_FAULT_OOM; + goto out; + } mark_page_accessed(page); lock_page(page); --- linux-2.6.16-rc1/mm/memory.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/memory.c 2006-01-21 18:28:13 +0100 @@ -1963,6 +1967,8 @@ static int do_anonymous_page(struct mm_s /* Allocate our own private page. */ pte_unmap(page_table); + if (!vx_rsspages_avail(mm, 1)) + goto oom; if (unlikely(anon_vma_prepare(vma))) goto oom; page = alloc_zeroed_user_highpage(vma, address); --- linux-2.6.16-rc1/mm/memory.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/memory.c 2006-01-21 18:28:13 +0100 @@ -2041,6 +2047,9 @@ static int do_no_page(struct mm_struct * smp_rmb(); /* serializes i_size against truncate_count */ } retry: + /* FIXME: is that check useful here? */ + if (!vx_rsspages_avail(mm, 1)) + return VM_FAULT_OOM; new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret); /* * No smp_rmb is needed here as long as there's a full --- linux-2.6.16-rc1/mm/memory.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/memory.c 2006-01-21 18:28:13 +0100 @@ -2188,6 +2197,7 @@ static inline int handle_pte_fault(struc pte_t entry; pte_t old_entry; spinlock_t *ptl; + int ret, type = VXPT_UNKNOWN; old_entry = entry = *pte; if (!pte_present(entry)) { --- linux-2.6.16-rc1/mm/memory.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/memory.c 2006-01-21 18:28:13 +0100 @@ -2192,17 +2202,27 @@ ***** old_entry = entry = *pte; if (!pte_present(entry)) { if (pte_none(entry)) { - if (!vma->vm_ops || !vma->vm_ops->nopage) - return do_anonymous_page(mm, vma, address, + if (!vma->vm_ops || !vma->vm_ops->nopage) { + ret = do_anonymous_page(mm, vma, address, pte, pmd, write_access); - return do_no_page(mm, vma, address, + type = VXPT_ANON; + goto out; + } + ret = do_no_page(mm, vma, address, pte, pmd, write_access); + type = VXPT_NONE; + goto out; } - if (pte_file(entry)) - return do_file_page(mm, vma, address, + if (pte_file(entry)) { + ret = do_file_page(mm, vma, address, pte, pmd, write_access, entry); - return do_swap_page(mm, vma, address, + type = VXPT_FILE; + goto out; + } + ret = do_swap_page(mm, vma, address, pte, pmd, write_access, entry); + type = VXPT_SWAP; + goto out; } ptl = pte_lockptr(mm, pmd); --- linux-2.6.16-rc1/mm/memory.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/memory.c 2006-01-21 18:28:13 +0100 @@ -2210,9 +2230,12 @@ static inline int handle_pte_fault(struc if (unlikely(!pte_same(*pte, entry))) goto unlock; if (write_access) { - if (!pte_write(entry)) - return do_wp_page(mm, vma, address, + if (!pte_write(entry)) { + ret = do_wp_page(mm, vma, address, pte, pmd, ptl, entry); + type = VXPT_WRITE; + goto out; + } entry = pte_mkdirty(entry); } entry = pte_mkyoung(entry); --- linux-2.6.16-rc1/mm/memory.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/memory.c 2006-01-21 18:28:13 +0100 @@ -2232,7 +2255,10 @@ static inline int handle_pte_fault(struc } unlock: pte_unmap_unlock(pte, ptl); - return VM_FAULT_MINOR; + ret = VM_FAULT_MINOR; +out: + vx_page_fault(mm, vma, type, ret); + return ret; } /* --- linux-2.6.16-rc1/mm/mlock.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/mlock.c 2006-01-21 18:28:13 +0100 @@ -65,7 +66,7 @@ success: ret = make_pages_present(start, end); } - vma->vm_mm->locked_vm -= pages; + vx_vmlocked_sub(vma->vm_mm, pages); out: if (ret == -ENOMEM) ret = -EAGAIN; --- linux-2.6.16-rc1/mm/mlock.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/mlock.c 2006-01-21 18:28:13 +0100 @@ -123,7 +124,7 @@ static int do_mlock(unsigned long start, asmlinkage long sys_mlock(unsigned long start, size_t len) { - unsigned long locked; + unsigned long locked, grow; unsigned long lock_limit; int error = -ENOMEM; --- linux-2.6.16-rc1/mm/mlock.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/mlock.c 2006-01-21 18:28:13 +0100 @@ -134,8 +135,10 @@ asmlinkage long sys_mlock(unsigned long len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; - locked = len >> PAGE_SHIFT; - locked += current->mm->locked_vm; + grow = len >> PAGE_SHIFT; + if (!vx_vmlocked_avail(current->mm, grow)) + goto out; + locked = current->mm->locked_vm + grow; lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; lock_limit >>= PAGE_SHIFT; --- linux-2.6.16-rc1/mm/mlock.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/mlock.c 2006-01-21 18:28:13 +0100 @@ -143,6 +146,7 @@ asmlinkage long sys_mlock(unsigned long /* check against resource limits */ if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) error = do_mlock(start, len, 1); +out: up_write(¤t->mm->mmap_sem); return error; } --- linux-2.6.16-rc1/mm/mlock.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/mlock.c 2006-01-21 18:28:13 +0100 @@ -202,6 +206,8 @@ asmlinkage long sys_mlockall(int flags) lock_limit >>= PAGE_SHIFT; ret = -ENOMEM; + if (!vx_vmlocked_avail(current->mm, current->mm->total_vm)) + goto out; if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || capable(CAP_IPC_LOCK)) ret = do_mlockall(flags); --- linux-2.6.16-rc1/mm/mmap.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/mmap.c 2006-01-21 18:28:13 +0100 @@ -1116,10 +1116,10 @@ munmap_back: kmem_cache_free(vm_area_cachep, vma); } out: - mm->total_vm += len >> PAGE_SHIFT; + vx_vmpages_add(mm, len >> PAGE_SHIFT); vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); if (vm_flags & VM_LOCKED) { - mm->locked_vm += len >> PAGE_SHIFT; + vx_vmlocked_add(mm, len >> PAGE_SHIFT); make_pages_present(addr, addr + len); } if (flags & MAP_POPULATE) { --- linux-2.6.16-rc1/mm/mmap.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/mmap.c 2006-01-21 18:28:13 +0100 @@ -1479,9 +1479,9 @@ static int acct_stack_growth(struct vm_a return -ENOMEM; /* Ok, everything looks good - let it rip */ - mm->total_vm += grow; + vx_vmpages_add(mm, grow); if (vma->vm_flags & VM_LOCKED) - mm->locked_vm += grow; + vx_vmlocked_add(mm, grow); vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); return 0; } --- linux-2.6.16-rc1/mm/mmap.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/mmap.c 2006-01-21 18:28:13 +0100 @@ -1634,9 +1634,9 @@ static void remove_vma_list(struct mm_st do { long nrpages = vma_pages(vma); - mm->total_vm -= nrpages; + vx_vmpages_sub(mm, nrpages); if (vma->vm_flags & VM_LOCKED) - mm->locked_vm -= nrpages; + vx_vmlocked_sub(mm, nrpages); vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); vma = remove_vma(vma); } while (vma); --- linux-2.6.16-rc1/mm/mmap.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/mmap.c 2006-01-21 18:28:13 +0100 @@ -1865,6 +1865,8 @@ unsigned long do_brk(unsigned long addr, lock_limit >>= PAGE_SHIFT; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) return -EAGAIN; + if (!vx_vmlocked_avail(mm, len >> PAGE_SHIFT)) + return -ENOMEM; } /* --- linux-2.6.16-rc1/mm/mmap.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/mmap.c 2006-01-21 18:28:13 +0100 @@ -1891,7 +1893,8 @@ unsigned long do_brk(unsigned long addr, if (mm->map_count > sysctl_max_map_count) return -ENOMEM; - if (security_vm_enough_memory(len >> PAGE_SHIFT)) + if (security_vm_enough_memory(len >> PAGE_SHIFT) || + !vx_vmpages_avail(mm, len >> PAGE_SHIFT)) return -ENOMEM; flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; --- linux-2.6.16-rc1/mm/mmap.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/mmap.c 2006-01-21 18:28:13 +0100 @@ -1919,9 +1922,9 @@ unsigned long do_brk(unsigned long addr, vma->vm_page_prot = protection_map[flags & 0x0f]; vma_link(mm, vma, prev, rb_link, rb_parent); out: - mm->total_vm += len >> PAGE_SHIFT; + vx_vmpages_add(mm, len >> PAGE_SHIFT); if (flags & VM_LOCKED) { - mm->locked_vm += len >> PAGE_SHIFT; + vx_vmlocked_add(mm, len >> PAGE_SHIFT); make_pages_present(addr, addr + len); } return addr; --- linux-2.6.16-rc1/mm/mmap.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/mmap.c 2006-01-21 18:28:13 +0100 @@ -1947,6 +1950,11 @@ void exit_mmap(struct mm_struct *mm) free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); tlb_finish_mmu(tlb, 0, end); + set_mm_counter(mm, file_rss, 0); + set_mm_counter(mm, anon_rss, 0); + vx_vmpages_sub(mm, mm->total_vm); + vx_vmlocked_sub(mm, mm->locked_vm); + /* * Walk the list again, actually closing and freeing it, * with preemption enabled, without holding any MM locks. --- linux-2.6.16-rc1/mm/mmap.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/mmap.c 2006-01-21 18:28:13 +0100 @@ -1986,7 +1994,8 @@ int insert_vm_struct(struct mm_struct * if (__vma && __vma->vm_start < vma->vm_end) return -ENOMEM; if ((vma->vm_flags & VM_ACCOUNT) && - security_vm_enough_memory(vma_pages(vma))) + (security_vm_enough_memory(vma_pages(vma)) || + !vx_vmpages_avail(mm, vma_pages(vma)))) return -ENOMEM; vma_link(mm, vma, prev, rb_link, rb_parent); return 0; --- linux-2.6.16-rc1/mm/mmap.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/mmap.c 2006-01-21 18:28:13 +0100 @@ -2059,5 +2068,7 @@ int may_expand_vm(struct mm_struct *mm, if (cur + npages > lim) return 0; + if (!vx_vmpages_avail(mm, npages)) + return 0; return 1; } --- linux-2.6.16-rc1/mm/mremap.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/mremap.c 2006-01-21 18:28:13 +0100 @@ -211,7 +212,7 @@ static unsigned long move_vma(struct vm_ * If this were a serious issue, we'd add a flag to do_munmap(). */ hiwater_vm = mm->hiwater_vm; - mm->total_vm += new_len >> PAGE_SHIFT; + vx_vmpages_add(mm, new_len >> PAGE_SHIFT); vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); if (do_munmap(mm, old_addr, old_len) < 0) { --- linux-2.6.16-rc1/mm/mremap.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/mremap.c 2006-01-21 18:28:13 +0100 @@ -229,7 +230,7 @@ static unsigned long move_vma(struct vm_ } if (vm_flags & VM_LOCKED) { - mm->locked_vm += new_len >> PAGE_SHIFT; + vx_vmlocked_add(mm, new_len >> PAGE_SHIFT); if (new_len > old_len) make_pages_present(new_addr + old_len, new_addr + new_len); --- linux-2.6.16-rc1/mm/mremap.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/mremap.c 2006-01-21 18:28:13 +0100 @@ -336,6 +337,9 @@ unsigned long do_mremap(unsigned long ad ret = -EAGAIN; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) goto out; + if (!vx_vmlocked_avail(current->mm, + (new_len - old_len) >> PAGE_SHIFT)) + goto out; } if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) { ret = -ENOMEM; --- linux-2.6.16-rc1/mm/mremap.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/mremap.c 2006-01-21 18:28:13 +0100 @@ -364,10 +368,10 @@ unsigned long do_mremap(unsigned long ad vma_adjust(vma, vma->vm_start, addr + new_len, vma->vm_pgoff, NULL); - mm->total_vm += pages; + vx_vmpages_add(mm, pages); vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); if (vma->vm_flags & VM_LOCKED) { - mm->locked_vm += pages; + vx_vmlocked_add(mm, pages); make_pages_present(addr + old_len, addr + new_len); } --- linux-2.6.16-rc1/mm/nommu.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/nommu.c 2006-01-21 18:28:13 +0100 @@ -812,7 +812,7 @@ unsigned long do_mmap_pgoff(struct file realalloc += kobjsize(vma); askedalloc += sizeof(*vma); - current->mm->total_vm += len >> PAGE_SHIFT; + vx_vmpages_add(current->mm, len >> PAGE_SHIFT); add_nommu_vma(vma); --- linux-2.6.16-rc1/mm/nommu.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/nommu.c 2006-01-21 18:28:13 +0100 @@ -929,7 +929,7 @@ int do_munmap(struct mm_struct *mm, unsi kfree(vml); update_hiwater_vm(mm); - mm->total_vm -= len >> PAGE_SHIFT; + vx_vmpages_sub(mm, len >> PAGE_SHIFT); #ifdef DEBUG show_process_blocks(); --- linux-2.6.16-rc1/mm/nommu.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/nommu.c 2006-01-21 18:28:13 +0100 @@ -948,7 +948,7 @@ void exit_mmap(struct mm_struct * mm) printk("Exit_mmap:\n"); #endif - mm->total_vm = 0; + vx_vmpages_sub(mm, mm->total_vm); while ((tmp = mm->context.vmlist)) { mm->context.vmlist = tmp->next; --- linux-2.6.16-rc1/mm/oom_kill.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/oom_kill.c 2006-01-21 18:28:14 +0100 @@ -55,6 +55,7 @@ unsigned long badness(struct task_struct * The memory size of the process is the basis for the badness. */ points = p->mm->total_vm; + /* FIXME: add vserver badness ;) */ /* * Processes which fork a lot of child processes are likely --- linux-2.6.16-rc1/mm/slab.c 2006-01-26 22:35:33 +0100 +++ linux-2.6.16-rc1-vs2.1.0.9.4/mm/slab.c 2006-01-21 18:28:15 +0100 @@ -2644,6 +2646,7 @@ static void *__cache_alloc_node(kmem_cac #endif slabp->free = next; check_slabp(cachep, slabp); + vx_slab_alloc(cachep, flags); l3->free_objects--; /* move slabp to correct slabp list: */ list_del(&slabp->list);