diff --git a/include/linux/sched.h b/include/linux/sched.h index f2f842d..025a594 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1191,6 +1191,12 @@ struct sched_entity { u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; +#ifdef CONFIG_CFS_HARD_LIMITS + u64 throttle_start; + u64 throttle_max; + u64 throttle_count; + u64 throttle_sum; +#endif #endif #ifdef CONFIG_FAIR_GROUP_SCHED diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 1b67698..c833aa2 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -80,6 +80,11 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, PN(se->wait_max); PN(se->wait_sum); P(se->wait_count); +#ifdef CONFIG_CFS_HARD_LIMITS + PN(se->throttle_max); + PN(se->throttle_sum); + P(se->throttle_count); +#endif #endif P(se->load.weight); #undef PN @@ -214,6 +219,16 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) #ifdef CONFIG_SMP SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares); #endif +#ifdef CONFIG_CFS_HARD_LIMITS + raw_spin_lock_irqsave(&rq->lock, flags); + SEQ_printf(m, " .%-30s: %d\n", "rq_bandwidth.throttled", + cfs_rq->rq_bandwidth.throttled); + SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "rq_bandwidth.time", + SPLIT_NS(cfs_rq->rq_bandwidth.time)); + SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "rq_bandwidth.runtime", + SPLIT_NS(cfs_rq->rq_bandwidth.runtime)); + raw_spin_unlock_irqrestore(&rq->lock, flags); +#endif /* CONFIG_CFS_HARD_LIMITS */ print_cfs_group_stats(m, cpu, cfs_rq->tg); #endif } @@ -320,7 +335,7 @@ static int sched_debug_show(struct seq_file *m, void *v) u64 now = ktime_to_ns(ktime_get()); int cpu; - SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s\n", + SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n", init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index f791332..16ed209 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -219,6 +219,23 @@ static inline void start_cfs_bandwidth(struct cfs_rq *cfs_rq) #ifdef CONFIG_CFS_HARD_LIMITS +static inline void update_stats_throttle_start(struct cfs_rq *cfs_rq, + struct sched_entity *se) +{ + schedstat_set(se->throttle_start, rq_of(cfs_rq)->clock); +} + +static inline void update_stats_throttle_end(struct cfs_rq *cfs_rq, + struct sched_entity *se) +{ + schedstat_set(se->throttle_max, max(se->throttle_max, + rq_of(cfs_rq)->clock - se->throttle_start)); + schedstat_set(se->throttle_count, se->throttle_count + 1); + schedstat_set(se->throttle_sum, se->throttle_sum + + rq_of(cfs_rq)->clock - se->throttle_start); + schedstat_set(se->throttle_start, 0); +} + static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) { return cfs_rq->rq_bandwidth.throttled; @@ -245,6 +262,7 @@ static void sched_cfs_runtime_exceeded(struct sched_entity *se, if (cfs_rq->rq_bandwidth.time > cfs_rq->rq_bandwidth.runtime) { cfs_rq->rq_bandwidth.throttled = 1; + update_stats_throttle_start(cfs_rq, se); resched_task(tsk_curr); } } @@ -300,6 +318,8 @@ static int do_sched_cfs_period_timer(struct sched_bandwidth *cfs_b, int overrun) if (cfs_rq_throttled(cfs_rq) && cfs_rq->rq_bandwidth.time < runtime) { cfs_rq->rq_bandwidth.throttled = 0; + update_rq_clock(rq); + update_stats_throttle_end(cfs_rq, se); enqueue = 1; } if (cfs_rq->rq_bandwidth.time || cfs_rq->nr_running)