mirror of
https://github.com/hardkernel/linux.git
synced 2026-04-01 18:53:02 +09:00
ANDROID: sched: Align EAS with upstream
The core EAS patches have now been accepted upstream. The patches used in Android are based on a slightly earlier version of the series. In order to reduce the delta with mainline and ease backports, align the EAS code paths with their upstream version. This basically applies the output of git range-diff on the appropriate commits, and fixes a conflict in schedutil regarding the integration of schedtune. Bug: 120440300 Change-Id: I208ebeb4207e3f4f4bbb5103c606b293a464c20f Signed-off-by: Quentin Perret <quentin.perret@arm.com>
This commit is contained in:
@@ -25,7 +25,6 @@
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/sched/cpufreq.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
|
||||
@@ -950,6 +950,14 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
|
||||
void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
|
||||
struct cpufreq_governor *old_gov);
|
||||
#else
|
||||
static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
|
||||
struct cpufreq_governor *old_gov) { }
|
||||
#endif
|
||||
|
||||
extern void arch_freq_prepare_all(void);
|
||||
extern unsigned int arch_freq_get_on_cpu(int cpu);
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
#ifndef _LINUX_SCHED_CPUFREQ_H
|
||||
#define _LINUX_SCHED_CPUFREQ_H
|
||||
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
@@ -29,12 +28,4 @@ static inline unsigned long map_util_freq(unsigned long util,
|
||||
}
|
||||
#endif /* CONFIG_CPU_FREQ */
|
||||
|
||||
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
|
||||
void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
|
||||
struct cpufreq_governor *old_gov);
|
||||
#else
|
||||
static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
|
||||
struct cpufreq_governor *old_gov) { }
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_SCHED_CPUFREQ_H */
|
||||
|
||||
@@ -233,8 +233,8 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
|
||||
unsigned long schedutil_freq_util(int cpu, unsigned long util,
|
||||
unsigned long max, enum schedutil_type type)
|
||||
{
|
||||
unsigned long dl_util, irq;
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
unsigned long irq;
|
||||
|
||||
if (sched_feat(SUGOV_RT_MAX_FREQ) && type == FREQUENCY_UTIL &&
|
||||
rt_rq_is_runnable(&rq->rt))
|
||||
@@ -255,29 +255,26 @@ unsigned long schedutil_freq_util(int cpu, unsigned long util,
|
||||
* to be delt with. The exact way of doing things depend on the calling
|
||||
* context.
|
||||
*/
|
||||
if (type == FREQUENCY_UTIL) {
|
||||
/*
|
||||
* For frequency selection we do not make cpu_util_dl() a
|
||||
* permanent part of this sum because we want to use
|
||||
* cpu_bw_dl() later on, but we need to check if the
|
||||
* CFS+RT+DL sum is saturated (ie. no idle time) such
|
||||
* that we select f_max when there is no idle time.
|
||||
*
|
||||
* NOTE: numerical errors or stop class might cause us
|
||||
* to not quite hit saturation when we should --
|
||||
* something for later.
|
||||
*/
|
||||
if ((util + cpu_util_dl(rq)) >= max)
|
||||
return max;
|
||||
} else {
|
||||
/*
|
||||
* OTOH, for energy computation we need the estimated
|
||||
* running time, so include util_dl and ignore dl_bw.
|
||||
*/
|
||||
util += cpu_util_dl(rq);
|
||||
if (util >= max)
|
||||
return max;
|
||||
}
|
||||
dl_util = cpu_util_dl(rq);
|
||||
|
||||
/*
|
||||
* For frequency selection we do not make cpu_util_dl() a permanent part
|
||||
* of this sum because we want to use cpu_bw_dl() later on, but we need
|
||||
* to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
|
||||
* that we select f_max when there is no idle time.
|
||||
*
|
||||
* NOTE: numerical errors or stop class might cause us to not quite hit
|
||||
* saturation when we should -- something for later.
|
||||
*/
|
||||
if (util + dl_util >= max)
|
||||
return max;
|
||||
|
||||
/*
|
||||
* OTOH, for energy computation we need the estimated running time, so
|
||||
* include util_dl and ignore dl_bw.
|
||||
*/
|
||||
if (type == ENERGY_UTIL)
|
||||
util += dl_util;
|
||||
|
||||
/*
|
||||
* There is still idle time; further improve the number by using the
|
||||
@@ -291,21 +288,18 @@ unsigned long schedutil_freq_util(int cpu, unsigned long util,
|
||||
util = scale_irq_capacity(util, irq, max);
|
||||
util += irq;
|
||||
|
||||
if (type == FREQUENCY_UTIL) {
|
||||
/*
|
||||
* Bandwidth required by DEADLINE must always be granted
|
||||
* while, for FAIR and RT, we use blocked utilization of
|
||||
* IDLE CPUs as a mechanism to gracefully reduce the
|
||||
* frequency when no tasks show up for longer periods of
|
||||
* time.
|
||||
*
|
||||
* Ideally we would like to set bw_dl as min/guaranteed
|
||||
* freq and util + bw_dl as requested freq. However,
|
||||
* cpufreq is not yet ready for such an interface. So,
|
||||
* we only do the latter for now.
|
||||
*/
|
||||
/*
|
||||
* Bandwidth required by DEADLINE must always be granted while, for
|
||||
* FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
|
||||
* to gracefully reduce the frequency when no tasks show up for longer
|
||||
* periods of time.
|
||||
*
|
||||
* Ideally we would like to set bw_dl as min/guaranteed freq and util +
|
||||
* bw_dl as requested freq. However, cpufreq is not yet ready for such
|
||||
* an interface. So, we only do the latter for now.
|
||||
*/
|
||||
if (type == FREQUENCY_UTIL)
|
||||
util += cpu_bw_dl(rq);
|
||||
}
|
||||
|
||||
return min(max, util);
|
||||
}
|
||||
|
||||
@@ -279,7 +279,7 @@ static void perf_domain_debug(const struct cpumask *cpu_map,
|
||||
if (!sched_debug() || !pd)
|
||||
return;
|
||||
|
||||
printk(KERN_DEBUG "root_domain %*pbl: ", cpumask_pr_args(cpu_map));
|
||||
printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map));
|
||||
|
||||
while (pd) {
|
||||
printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_cstate=%d }",
|
||||
@@ -300,29 +300,13 @@ static void destroy_perf_domain_rcu(struct rcu_head *rp)
|
||||
free_pd(pd);
|
||||
}
|
||||
|
||||
static void sched_energy_start(int ndoms_new, cpumask_var_t doms_new[])
|
||||
static void sched_energy_set(bool has_eas)
|
||||
{
|
||||
/*
|
||||
* The conditions for EAS to start are checked during the creation of
|
||||
* root domains. If one of them meets all conditions, it will have a
|
||||
* non-null list of performance domains.
|
||||
*/
|
||||
while (ndoms_new) {
|
||||
if (cpu_rq(cpumask_first(doms_new[ndoms_new - 1]))->rd->pd)
|
||||
goto enable;
|
||||
ndoms_new--;
|
||||
}
|
||||
|
||||
if (static_branch_unlikely(&sched_energy_present)) {
|
||||
if (!has_eas && static_branch_unlikely(&sched_energy_present)) {
|
||||
if (sched_debug())
|
||||
pr_info("%s: stopping EAS\n", __func__);
|
||||
static_branch_disable_cpuslocked(&sched_energy_present);
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
enable:
|
||||
if (!static_branch_unlikely(&sched_energy_present)) {
|
||||
} else if (has_eas && !static_branch_unlikely(&sched_energy_present)) {
|
||||
if (sched_debug())
|
||||
pr_info("%s: starting EAS\n", __func__);
|
||||
static_branch_enable_cpuslocked(&sched_energy_present);
|
||||
@@ -355,7 +339,7 @@ enable:
|
||||
#define EM_MAX_COMPLEXITY 2048
|
||||
|
||||
extern struct cpufreq_governor schedutil_gov;
|
||||
static void build_perf_domains(const struct cpumask *cpu_map)
|
||||
static bool build_perf_domains(const struct cpumask *cpu_map)
|
||||
{
|
||||
int i, nr_pd = 0, nr_cs = 0, nr_cpus = cpumask_weight(cpu_map);
|
||||
struct perf_domain *pd = NULL, *tmp;
|
||||
@@ -424,7 +408,7 @@ static void build_perf_domains(const struct cpumask *cpu_map)
|
||||
if (tmp)
|
||||
call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
|
||||
|
||||
return;
|
||||
return !!pd;
|
||||
|
||||
free:
|
||||
free_pd(pd);
|
||||
@@ -432,6 +416,8 @@ free:
|
||||
rcu_assign_pointer(rd->pd, NULL);
|
||||
if (tmp)
|
||||
call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
|
||||
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
static void free_pd(struct perf_domain *pd) { }
|
||||
@@ -2166,6 +2152,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
|
||||
void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
|
||||
struct sched_domain_attr *dattr_new)
|
||||
{
|
||||
bool __maybe_unused has_eas = false;
|
||||
int i, j, n;
|
||||
int new_topology;
|
||||
|
||||
@@ -2229,15 +2216,17 @@ match2:
|
||||
for (i = 0; i < ndoms_new; i++) {
|
||||
for (j = 0; j < n && !sched_energy_update; j++) {
|
||||
if (cpumask_equal(doms_new[i], doms_cur[j]) &&
|
||||
cpu_rq(cpumask_first(doms_cur[j]))->rd->pd)
|
||||
cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {
|
||||
has_eas = true;
|
||||
goto match3;
|
||||
}
|
||||
}
|
||||
/* No match - add perf. domains for a new rd */
|
||||
build_perf_domains(doms_new[i]);
|
||||
has_eas |= build_perf_domains(doms_new[i]);
|
||||
match3:
|
||||
;
|
||||
}
|
||||
sched_energy_start(ndoms_new, doms_new);
|
||||
sched_energy_set(has_eas);
|
||||
#endif
|
||||
|
||||
/* Remember the new sched domains: */
|
||||
|
||||
Reference in New Issue
Block a user