/* * linux/kernel/vserver/legacy.c * * Virtual Server: Legacy Funtions * * Copyright (C) 2001-2003 Jacques Gelinas * Copyright (C) 2003-2004 Herbert Pötzl * * V0.01 broken out from vcontext.c V0.05 * */ #include //#include //#include //#include #include #include //#include #include #include //#include #include #include static int vx_migrate_user(struct task_struct *p, struct vx_info *vxi) { struct user_struct *new_user, *old_user; if (!p || !vxi) BUG(); new_user = alloc_uid(vxi->vx_id, p->uid); if (!new_user) return -ENOMEM; old_user = p->user; if (new_user != old_user) { atomic_inc(&new_user->processes); atomic_dec(&old_user->processes); p->user = new_user; } free_uid(old_user); return 0; } /* * migrate task to new context * gets vxi, puts old_vxi on change */ static int vx_migrate_task(struct task_struct *p, struct vx_info *vxi) { struct vx_info *old_vxi = task_get_vx_info(p); int ret = 0; if (!p || !vxi) BUG(); vxdprintk("vx_migrate_task(%p,%p[#%d.%d)\n", p, vxi, vxi->vx_id, atomic_read(&vxi->vx_refcount)); spin_lock(&p->alloc_lock); if (old_vxi == vxi) goto out; if (!(ret = vx_migrate_user(p, vxi))) { if (old_vxi) { old_vxi->virt.nr_threads--; atomic_dec(&old_vxi->limit.res[RLIMIT_NPROC]); } vxi->virt.nr_threads++; atomic_inc(&vxi->limit.res[RLIMIT_NPROC]); p->vx_info = get_vx_info(vxi); p->xid = vxi->vx_id; if (old_vxi) put_vx_info(old_vxi); } out: spin_unlock(&p->alloc_lock); put_vx_info(old_vxi); return ret; } static int vx_set_initpid(struct vx_info *vxi, int pid) { int ret = 0; if (vxi->vx_initpid) ret = -EPERM; else vxi->vx_initpid = pid; return ret; } int vc_new_s_context(uint32_t ctx, void *data) { int ret = -ENOMEM; struct vcmd_new_s_context_v1 vc_data; struct vx_info *new_vxi; if (copy_from_user(&vc_data, data, sizeof(vc_data))) return -EFAULT; /* legacy hack, will be removed soon */ if (ctx == -2) { /* assign flags and initpid */ if (!current->vx_info) return -EINVAL; ret = 0; if (vc_data.flags & VX_INFO_INIT) ret = vx_set_initpid(current->vx_info, current->tgid); if (ret == 0) { /* We keep the same vx_id, but lower the capabilities */ current->cap_bset &= (~vc_data.remove_cap); ret = vx_current_xid(); current->vx_info->vx_flags |= vc_data.flags; } return ret; } if (!vx_check(0, VX_ADMIN) || !capable(CAP_SYS_ADMIN) || (current->vx_info && (current->vx_info->vx_flags & VX_INFO_LOCK))) return -EPERM; if (((ctx > MAX_S_CONTEXT) && (ctx != VX_DYNAMIC_ID)) || (ctx == 0)) return -EINVAL; if ((ctx == VX_DYNAMIC_ID) || (ctx < MIN_D_CONTEXT)) new_vxi = find_or_create_vx_info(ctx); else new_vxi = find_vx_info(ctx); if (!new_vxi) return -EINVAL; ret = vx_migrate_task(current, new_vxi); if (ret == 0) { current->cap_bset &= (~vc_data.remove_cap); new_vxi->vx_flags |= vc_data.flags; if (vc_data.flags & VX_INFO_INIT) vx_set_initpid(new_vxi, current->tgid); if (vc_data.flags & VX_INFO_NPROC) new_vxi->limit.rlim[RLIMIT_NPROC] = current->rlim[RLIMIT_NPROC].rlim_max; ret = new_vxi->vx_id; } put_vx_info(new_vxi); return ret; } /* set ipv4 root (syscall) */ int vc_set_ipv4root(uint32_t nbip, void *data) { int i, err = -EPERM; struct vcmd_set_ipv4root_v3 vc_data; struct ip_info *new_ipi, *ipi = current->ip_info; if (nbip < 0 || nbip > NB_IPV4ROOT) return -EINVAL; if (copy_from_user (&vc_data, data, sizeof(vc_data))) return -EFAULT; if (!ipi || ipi->ipv4[0] == 0 || capable(CAP_NET_ADMIN)) // We are allowed to change everything err = 0; else if (ipi) { int found = 0; // We are allowed to select a subset of the currently // installed IP numbers. No new one allowed // We can't change the broadcast address though for (i=0; inbipv4; j++) { if (ipip == ipi->ipv4[j]) { found++; break; } } } if ((found == nbip) && (vc_data.broadcast == ipi->v4_bcast)) err = 0; } if (err) return err; new_ipi = create_ip_info(); if (!new_ipi) return -EINVAL; new_ipi->nbipv4 = nbip; for (i=0; iipv4[i] = vc_data.ip_mask_pair[i].ip; new_ipi->mask[i] = vc_data.ip_mask_pair[i].mask; } new_ipi->v4_bcast = vc_data.broadcast; current->ip_info = new_ipi; put_ip_info(ipi); return 0; }