From ee4cebd75ed7b77132c39c0093923f9ff1bcafaa Mon Sep 17 00:00:00 2001 From: Joonwoo Park Date: Thu, 8 Dec 2016 16:12:12 -0800 Subject: [PATCH] sched: EAS/WALT: use cr_avg instead of prev_runnable_sum WALT accounts two major statistics; CPU load and cumulative tasks demand. The CPU load which is account of accumulated each CPU's absolute execution time is for CPU frequency guidance. Whereas cumulative tasks demand which is each CPU's instantaneous load to reflect CPU's load at given time is for task placement decision. Use cumulative tasks demand for cpu_util() for task placement and introduce cpu_util_freq() for frequency guidance. Change-Id: Id928f01dbc8cb2a617cdadc584c1f658022565c5 Signed-off-by: Joonwoo Park --- kernel/sched/core.c | 2 +- kernel/sched/fair.c | 4 ++-- kernel/sched/sched.h | 16 +++++++++++++++- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9307827cc7b1..4f97de8e0b18 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2992,7 +2992,7 @@ static void sched_freq_tick_pelt(int cpu) #ifdef CONFIG_SCHED_WALT static void sched_freq_tick_walt(int cpu) { - unsigned long cpu_utilization = cpu_util(cpu); + unsigned long cpu_utilization = cpu_util_freq(cpu); unsigned long capacity_curr = capacity_curr_of(cpu); if (walt_disabled || !sysctl_sched_use_walt_cpu_util) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c193e9b1c38f..3641dad3d4cc 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4659,7 +4659,7 @@ static inline void hrtick_update(struct rq *rq) static bool cpu_overutilized(int cpu); unsigned long boosted_cpu_util(int cpu); #else -#define boosted_cpu_util(cpu) cpu_util(cpu) +#define boosted_cpu_util(cpu) cpu_util_freq(cpu) #endif #ifdef CONFIG_SMP @@ -5937,7 +5937,7 @@ schedtune_task_margin(struct task_struct *task) unsigned long boosted_cpu_util(int cpu) { - unsigned long util = cpu_util(cpu); + unsigned long util = cpu_util_freq(cpu); long margin = schedtune_cpu_margin(util, cpu); trace_sched_boost_cpu(cpu, util, margin); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 029cf2bbeda2..73077f535e95 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1592,7 +1592,7 @@ static inline unsigned long __cpu_util(int cpu, int delta) #ifdef CONFIG_SCHED_WALT if (!walt_disabled && sysctl_sched_use_walt_cpu_util) { - util = cpu_rq(cpu)->prev_runnable_sum << SCHED_LOAD_SHIFT; + util = cpu_rq(cpu)->cumulative_runnable_avg << SCHED_LOAD_SHIFT; do_div(util, walt_ravg_window); } #endif @@ -1608,6 +1608,20 @@ static inline unsigned long cpu_util(int cpu) return __cpu_util(cpu, 0); } +static inline unsigned long cpu_util_freq(int cpu) +{ + unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg; + unsigned long capacity = capacity_orig_of(cpu); + +#ifdef CONFIG_SCHED_WALT + if (!walt_disabled && sysctl_sched_use_walt_cpu_util) { + util = cpu_rq(cpu)->prev_runnable_sum << SCHED_LOAD_SHIFT; + do_div(util, walt_ravg_window); + } +#endif + return (util >= capacity) ? capacity : util; +} + #endif #ifdef CONFIG_CPU_FREQ_GOV_SCHED