#ifndef _VX_INLINE_H #define _VX_INLINE_H // #define VX_DEBUG #include #include #include #include #if defined(VX_DEBUG) #define vxdprintk(x...) printk("vxd: " x) #else #define vxdprintk(x...) #endif void free_vx_info(struct vx_info *); extern int proc_pid_vinfo(struct task_struct *, char *); #define get_vx_info(i) __get_vx_info(i,__FILE__,__LINE__) static __inline__ struct vx_info *__get_vx_info(struct vx_info *vxi, const char *_file, int _line) { /* for now we allow vxi to be null */ if (!vxi) return NULL; vxdprintk("get_vx_info(%p[#%d.%d])\t%s:%d\n", vxi, vxi->vx_id, atomic_read(&vxi->vx_refcount), _file, _line); atomic_inc(&vxi->vx_refcount); return vxi; } #define put_vx_info(i) __put_vx_info(i,__FILE__,__LINE__) static __inline__ void __put_vx_info(struct vx_info *vxi, const char *_file, int _line) { /* for now we allow vxi to be null */ if (!vxi) return; vxdprintk("put_vx_info(%p[#%d.%d])\t%s:%d\n", vxi, vxi->vx_id, atomic_read(&vxi->vx_refcount), _file, _line); if (atomic_dec_and_lock(&vxi->vx_refcount, &vxlist_lock)) { list_del(&vxi->vx_list); spin_unlock(&vxlist_lock); free_vx_info(vxi); } } #define task_get_vx_info(i) __task_get_vx_info(i,__FILE__,__LINE__) static __inline__ struct vx_info *__task_get_vx_info(struct task_struct *p, const char *_file, int _line) { struct vx_info *vxi; task_lock(p); vxi = __get_vx_info(p->vx_info, _file, _line); task_unlock(p); return vxi; } #define vx_verify_info(p,i) \ __vx_verify_info((p)->vx_info,i,__FILE__,__LINE__) static __inline__ void __vx_verify_info( struct vx_info *vxa, struct vx_info *vxb, const char *_file, int _line) { if (vxa == vxb) return; printk(KERN_ERR "vx bad assumption (%p==%p) at %s:%d\n", vxa, vxb, _file, _line); } #define vx_task_xid(t) ((t)->xid) #define vx_current_xid() vx_task_xid(current) #define vx_check(c,m) __vx_check(vx_current_xid(),c,m) #define vx_weak_check(c,m) ((m) ? vx_check(c,m) : 1) /* * check current context for ADMIN/WATCH and * optionally agains supplied argument */ static __inline__ int __vx_check(xid_t cid, xid_t id, unsigned int mode) { if (mode & VX_ARG_MASK) { if ((mode & VX_IDENT) && (id == cid)) return 1; } if (mode & VX_ATR_MASK) { if ((mode & VX_DYNAMIC) && (id >= MIN_D_CONTEXT) && (id <= MAX_S_CONTEXT)) return 1; if ((mode & VX_STATIC) && (id > 1) && (id < MIN_D_CONTEXT)) return 1; } return (((mode & VX_ADMIN) && (cid == 0)) || ((mode & VX_WATCH) && (cid == 1))); } void free_ip_info(struct ip_info *); #define get_ip_info(i) __get_ip_info(i,__FILE__,__LINE__) static __inline__ struct ip_info *__get_ip_info(struct ip_info *ipi, const char *_file, int _line) { /* for now we allow vxi to be null */ if (!ipi) return NULL; vxdprintk("get_ip_info(%p[%d])\t%s:%d\n", ipi, atomic_read(&ipi->ip_refcount), _file, _line); atomic_inc(&ipi->ip_refcount); return ipi; } #define put_ip_info(i) __put_ip_info(i,__FILE__,__LINE__) static __inline__ void __put_ip_info(struct ip_info *ipi, const char *_file, int _line) { /* for now we allow vxi to be null */ if (!ipi) return; vxdprintk("put_ip_info(%p[%d])\t%s:%d\n", ipi, atomic_read(&ipi->ip_refcount), _file, _line); if (atomic_dec_and_lock(&ipi->ip_refcount, &iplist_lock)) { list_del(&ipi->ip_list); spin_unlock(&iplist_lock); free_ip_info(ipi); } } #define task_get_ip_info(i) __task_get_ip_info(i,__FILE__,__LINE__) static __inline__ struct ip_info *__task_get_ip_info(struct task_struct *p, const char *_file, int _line) { struct ip_info *ipi; task_lock(p); ipi = __get_ip_info(p->ip_info, _file, _line); task_unlock(p); return ipi; } #define ip_verify_info(p,i) \ __ip_verify_info((p)->ip_info,i,__FILE__,__LINE__) static __inline__ void __ip_verify_info( struct ip_info *ipa, struct ip_info *ipb, const char *_file, int _line) { if (ipa == ipb) return; printk(KERN_ERR "ip bad assumption (%p==%p) at %s:%d\n", ipa, ipb, _file, _line); } #define VX_DEBUG_ACC_RSS 0 #define VX_DEBUG_ACC_VM 0 #define VX_DEBUG_ACC_VML 0 #define vx_acc_page(m, d, v, r) \ __vx_acc_page(&(m->v), m->mm_vx_info, r, d, __FILE__, __LINE__) static inline void __vx_acc_page(unsigned long *v, struct vx_info *vxi, int res, int dir, char *file, int line) { if (v) { if (dir > 0) ++(*v); else --(*v); } if (vxi) { if (dir > 0) atomic_inc(&vxi->limit.res[res]); else atomic_dec(&vxi->limit.res[res]); } } #define vx_acc_pages(m, p, v, r) \ __vx_acc_pages(&(m->v), m->mm_vx_info, r, p, __FILE__, __LINE__) static inline void __vx_acc_pages(unsigned long *v, struct vx_info *vxi, int res, int pages, char *file, int line) { if ((pages > 1 || pages < -1) && ((res == RLIMIT_RSS && VX_DEBUG_ACC_RSS) || (res == RLIMIT_AS && VX_DEBUG_ACC_VM) || (res == RLIMIT_MEMLOCK && VX_DEBUG_ACC_VML))) vxdprintk("vx_acc_pages [%5d,%2d]: %5d += %5d in %s:%d\n", (vxi?vxi->vx_id:-1), res, (vxi?atomic_read(&vxi->limit.res[res]):0), pages, file, line); if (pages == 0) return; if (v) *v += pages; if (vxi) atomic_add(pages, &vxi->limit.res[res]); } #define vx_acc_vmpage(m,d) vx_acc_page(m, d, total_vm, RLIMIT_AS) #define vx_acc_vmlpage(m,d) vx_acc_page(m, d, locked_vm, RLIMIT_MEMLOCK) #define vx_acc_rsspage(m,d) vx_acc_page(m, d, rss, RLIMIT_RSS) #define vx_acc_vmpages(m,p) vx_acc_pages(m, p, total_vm, RLIMIT_AS) #define vx_acc_vmlpages(m,p) vx_acc_pages(m, p, locked_vm, RLIMIT_MEMLOCK) #define vx_acc_rsspages(m,p) vx_acc_pages(m, p, rss, RLIMIT_RSS) #define vx_pages_add(s,r,p) __vx_acc_pages(0, s, r, p, __FILE__, __LINE__) #define vx_pages_sub(s,r,p) __vx_pages_add(s, r, -(p)) #define vx_vmpages_inc(m) vx_acc_vmpage(m, 1) #define vx_vmpages_dec(m) vx_acc_vmpage(m,-1) #define vx_vmpages_add(m,p) vx_acc_vmpages(m, p) #define vx_vmpages_sub(m,p) vx_acc_vmpages(m,-(p)) #define vx_vmlocked_inc(m) vx_acc_vmlpage(m, 1) #define vx_vmlocked_dec(m) vx_acc_vmlpage(m,-1) #define vx_vmlocked_add(m,p) vx_acc_vmlpages(m, p) #define vx_vmlocked_sub(m,p) vx_acc_vmlpages(m,-(p)) #define vx_rsspages_inc(m) vx_acc_rsspage(m, 1) #define vx_rsspages_dec(m) vx_acc_rsspage(m,-1) #define vx_rsspages_add(m,p) vx_acc_rsspages(m, p) #define vx_rsspages_sub(m,p) vx_acc_rsspages(m,-(p)) #define vx_pages_avail(m, p, r) \ __vx_pages_avail((m)->mm_vx_info, (r), (p), __FILE__, __LINE__) static inline int __vx_pages_avail(struct vx_info *vxi, int res, int pages, char *file, int line) { if ((res == RLIMIT_RSS && VX_DEBUG_ACC_RSS) || (res == RLIMIT_AS && VX_DEBUG_ACC_VM) || (res == RLIMIT_MEMLOCK && VX_DEBUG_ACC_VML)) printk("vx_pages_avail[%5d,%2d]: %5ld > %5d + %5d in %s:%d\n", (vxi?vxi->vx_id:-1), res, (vxi?vxi->limit.rlim[res]:1), (vxi?atomic_read(&vxi->limit.res[res]):0), pages, file, line); if (!vxi) return 1; if (vxi->limit.rlim[res] == RLIM_INFINITY) return 1; if (vxi->limit.rlim[res] < atomic_read(&vxi->limit.res[res]) + pages) return 0; return 1; } #define vx_vmpages_avail(m,p) vx_pages_avail(m, p, RLIMIT_AS) #define vx_vmlocked_avail(m,p) vx_pages_avail(m, p, RLIMIT_MEMLOCK) #define vx_rsspages_avail(m,p) vx_pages_avail(m, p, RLIMIT_RSS) /* procfs ioctls */ #define FIOC_GETXFLG _IOR('x', 5, long) #define FIOC_SETXFLG _IOW('x', 6, long) #endif