kernel/exit.c ========================= static inline void forget_original_parent(struct task_struct * father) { struct task_struct * p; struct task_struct *vchild_reaper = child_reaper; read_lock(&tasklist_lock); read_lock(&ctx_ref_lock); if (father->s_info) { pid_t initpid = father->s_info->initpid; if ((initpid != 0) && (father->pid != initpid)) { struct task_struct *r = find_task_by_pid(initpid); if (r != NULL) vchild_reaper = r; } } read_unlock(&ctx_ref_lock); kernel/sched.c ========================= if (p->policy == SCHED_OTHER) { /* * Give the process a first-approximation goodness value * according to the number of clock-ticks it has left. * * Don't do any other calculations if the time slice is * over.. */ read_lock(&ctx_ref_lock); if (p->s_info && (p->s_info->flags & VX_INFO_SCHED)) { weight = atomic_read(&p->s_info->ticks) / atomic_read(&p->s_info->refcount); weight = (weight+p->counter) >> 1; } else weight = p->counter; read_unlock(&ctx_ref_lock); if (!weight) goto out; /* Do we need to re-calculate counters? */ if (unlikely(!c)) { struct task_struct *p; spin_unlock_irq(&runqueue_lock); read_lock(&tasklist_lock); /* Reset the s_info->ticks to the sum off all member processes p->counter */ read_lock(&ctx_ref_lock); for_each_task(p) { if (p->s_info && (p->s_info->flags & VX_INFO_SCHED)) atomic_set(&p->s_info->ticks, 0); } for_each_task(p) { p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice); if (p->s_info && (p->s_info->flags & VX_INFO_SCHED)) atomic_add(p->counter, &p->s_info->ticks); } read_unlock(&ctx_ref_lock); read_unlock(&tasklist_lock); spin_lock_irq(&runqueue_lock); goto repeat_schedule; } kernel/vcontext.c ========================= void vx_assign_info (struct task_struct *p) { read_lock(&ctx_ref_lock); if (p->s_info) atomic_inc(&p->s_info->refcount); read_unlock(&ctx_ref_lock); } void vx_release_info (struct task_struct *p) { write_lock(&ctx_ref_lock); if (p->s_info) { if (atomic_dec_and_test(&p->s_info->refcount)) { struct context_info *s_info; s_info = p->s_info; p->s_info = NULL; kfree(s_info); } } write_unlock(&ctx_ref_lock); } fs/proc/array.c ========================= int proc_pid_status(struct task_struct *task, char * buffer) { char * orig = buffer; struct mm_struct *mm; buffer = task_name(task, buffer); buffer = task_state(task, buffer); task_lock(task); mm = task->mm; if(mm) atomic_inc(&mm->mm_users); task_unlock(task); if (mm) { buffer = task_mem(mm, buffer); mmput(mm); } buffer = task_sig(task, buffer); buffer = task_cap(task, buffer); read_lock(&ctx_ref_lock); if (task->s_info) { int i; buffer += sprintf (buffer,"s_context: %d [", task->vx_id); for (i=0; is_info->vx_id[i]; if (ctx == 0) break; buffer += sprintf (buffer," %d",ctx); } *buffer++ = ']'; *buffer++ = '\n'; buffer += sprintf (buffer,"ctxticks: %d %ld %d\n" ,atomic_read(&task->s_info->ticks) ,task->counter ,atomic_read(&task->s_info->refcount)); buffer += sprintf (buffer,"ctxflags: %d\n" ,task->s_info->flags); buffer += sprintf (buffer,"initpid: %d\n" ,task->s_info->initpid); } else { buffer += sprintf (buffer,"s_context: %d\n", task->vx_id); buffer += sprintf (buffer,"ctxticks: none\n"); buffer += sprintf (buffer,"ctxflags: none\n"); buffer += sprintf (buffer,"initpid: none\n"); } if (task->ip_info) { int i; buffer += sprintf (buffer,"ipv4root:"); for (i=0; iip_info->nbipv4; i++){ buffer += sprintf (buffer," %08x/%08x" ,task->ip_info->ipv4[i] ,task->ip_info->mask[i]); } *buffer++ = '\n'; buffer += sprintf (buffer,"ipv4root_bcast: %08x\n" ,task->ip_info->v4_bcast); buffer += sprintf (buffer,"ipv4root_refcnt: %d\n" ,atomic_read(&task->ip_info->refcount)); } else { buffer += sprintf (buffer,"ipv4root: 0\n"); buffer += sprintf (buffer,"ipv4root_bcast: 0\n"); } read_unlock(&ctx_ref_lock);