diff --git a/kernel/sched.c b/kernel/sched.c index c4ab583..857e567 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1960,6 +1960,8 @@ void __disable_runtime(struct rq *rq, struct sched_bandwidth *sched_b, if (rt) iter = &(sched_rt_period_rt_rq(sched_b, i)->rq_bandwidth); + else + iter = &(sched_cfs_period_cfs_rq(sched_b, i)->rq_bandwidth); /* * Can't reclaim from ourselves or disabled runqueues. */ @@ -1999,12 +2001,16 @@ balanced: } void disable_runtime_rt(struct rq *rq); +void disable_runtime_cfs(struct rq *rq); static void disable_runtime(struct rq *rq) { unsigned long flags; raw_spin_lock_irqsave(&rq->lock, flags); disable_runtime_rt(rq); +#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_CFS_HARD_LIMITS) + disable_runtime_cfs(rq); +#endif raw_spin_unlock_irqrestore(&rq->lock, flags); } @@ -2021,12 +2027,16 @@ void __enable_runtime(struct sched_bandwidth *sched_b, } void enable_runtime_rt(struct rq *rq); +void enable_runtime_cfs(struct rq *rq); static void enable_runtime(struct rq *rq) { unsigned long flags; raw_spin_lock_irqsave(&rq->lock, flags); enable_runtime_rt(rq); +#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_CFS_HARD_LIMITS) + enable_runtime_cfs(rq); +#endif raw_spin_unlock_irqrestore(&rq->lock, flags); } @@ -2050,6 +2060,9 @@ static void do_balance_runtime(struct rq_bandwidth *rq_b, if (rt) iter = &(sched_rt_period_rt_rq(sched_b, i)->rq_bandwidth); + else + iter = &(sched_cfs_period_cfs_rq(sched_b, i)->rq_bandwidth); + if (iter == rq_b) continue; diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 16ed209..dcd093b 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -241,6 +241,41 @@ static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) return cfs_rq->rq_bandwidth.throttled; } +#ifdef CONFIG_SMP +/* + * Ensure this RQ takes back all the runtime it lend to its neighbours. + */ +void disable_runtime_cfs(struct rq *rq) +{ + struct cfs_rq *cfs_rq; + + if (unlikely(!scheduler_running)) + return; + + for_each_leaf_cfs_rq(rq, cfs_rq) { + struct sched_bandwidth *sched_b = sched_cfs_bandwidth(cfs_rq); + __disable_runtime(rq, sched_b, &cfs_rq->rq_bandwidth, 0); + } +} + +void enable_runtime_cfs(struct rq *rq) +{ + struct cfs_rq *cfs_rq; + + if (unlikely(!scheduler_running)) + return; + + /* + * Reset each runqueue's bandwidth settings + */ + for_each_leaf_cfs_rq(rq, cfs_rq) { + struct sched_bandwidth *sched_b = sched_cfs_bandwidth(cfs_rq); + __enable_runtime(sched_b, &cfs_rq->rq_bandwidth); + } +} + +#endif /* CONFIG_SMP */ + /* * Check if group entity exceeded its runtime. If so, mark the cfs_rq as * throttled mark the current task for reschedling. @@ -260,6 +295,10 @@ static void sched_cfs_runtime_exceeded(struct sched_entity *se, if (cfs_rq_throttled(cfs_rq)) return; + if (cfs_rq->rq_bandwidth.time > cfs_rq->rq_bandwidth.runtime) + balance_runtime(&cfs_rq->rq_bandwidth, + sched_cfs_bandwidth(cfs_rq), 0); + if (cfs_rq->rq_bandwidth.time > cfs_rq->rq_bandwidth.runtime) { cfs_rq->rq_bandwidth.throttled = 1; update_stats_throttle_start(cfs_rq, se); @@ -313,6 +352,9 @@ static int do_sched_cfs_period_timer(struct sched_bandwidth *cfs_b, int overrun) u64 runtime; raw_spin_lock(&cfs_rq->rq_bandwidth.runtime_lock); + if (cfs_rq_throttled(cfs_rq)) + balance_runtime(&cfs_rq->rq_bandwidth, + sched_cfs_bandwidth(cfs_rq), 0); runtime = cfs_rq->rq_bandwidth.runtime; cfs_rq->rq_bandwidth.time -= min(cfs_rq->rq_bandwidth.time, overrun*runtime); if (cfs_rq_throttled(cfs_rq) &&