From 3a9495889492ae5132ea995723bf5ed6b045d8fa Mon Sep 17 00:00:00 2001 From: "J. Avila" Date: Tue, 14 Jul 2020 22:03:38 +0000 Subject: [PATCH] ANDROID: sched/rt: Add support for rt sync wakeups Some rt tasks undergo sync wakeup. Currently, these tasks will be placed on other, often sleeping or otherwise idle CPUs, which can lead to unnecessary power hits. Support rt sync wakeups, but only enable rt sync for SMP targets. Bug: 157906395 Change-Id: I48864d0847bbe4f7813c842032880ad3f3b8b06b Signed-off-by: J. Avila [quic_dickey@quicinc.com: Port to mainline] Signed-off-by: Stephen Dickey --- kernel/sched/core.c | 3 +++ kernel/sched/rt.c | 39 ++++++++++++++++++++++++++++++++++++++- kernel/sched/sched.h | 2 ++ 3 files changed, 43 insertions(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fe2f1ef73dfe..01115e2742e6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3709,6 +3709,9 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, { int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; + if (wake_flags & WF_SYNC) + en_flags |= ENQUEUE_WAKEUP_SYNC; + lockdep_assert_rq_held(rq); if (p->sched_contributes_to_load) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index e3cac4b64330..88318f67a28b 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1487,6 +1487,27 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) enqueue_top_rt_rq(&rq->rt); } +#ifdef CONFIG_SMP +static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p, + bool sync) +{ + /* + * If the waker is CFS, then an RT sync wakeup would preempt the waker + * and force it to run for a likely small time after the RT wakee is + * done. So, only honor RT sync wakeups from RT wakers. + */ + return sync && task_has_rt_policy(rq->curr) && + p->prio <= rq->rt.highest_prio.next && + rq->rt.rt_nr_running <= 2; +} +#else +static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p, + bool sync) +{ + return 0; +} +#endif + /* * Adding/removing a task to/from a priority array: */ @@ -1494,6 +1515,7 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) { struct sched_rt_entity *rt_se = &p->rt; + bool sync = !!(flags & ENQUEUE_WAKEUP_SYNC); if (flags & ENQUEUE_WAKEUP) rt_se->timeout = 0; @@ -1503,7 +1525,8 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) enqueue_rt_entity(rt_se, flags); - if (!task_current(rq, p) && p->nr_cpus_allowed > 1) + if (!task_current(rq, p) && p->nr_cpus_allowed > 1 && + !should_honor_rt_sync(rq, p, sync)) enqueue_pushable_task(rq, p); } @@ -1559,8 +1582,11 @@ select_task_rq_rt(struct task_struct *p, int cpu, int flags) { struct task_struct *curr; struct rq *rq; + struct rq *this_cpu_rq; bool test; int target_cpu = -1; + bool sync = !!(flags & WF_SYNC); + int this_cpu; trace_android_rvh_select_task_rq_rt(p, cpu, flags & 0xF, flags, &target_cpu); @@ -1575,6 +1601,8 @@ select_task_rq_rt(struct task_struct *p, int cpu, int flags) rcu_read_lock(); curr = READ_ONCE(rq->curr); /* unlocked access */ + this_cpu = smp_processor_id(); + this_cpu_rq = cpu_rq(this_cpu); /* * If the current task on @p's runqueue is an RT task, then @@ -1606,6 +1634,15 @@ select_task_rq_rt(struct task_struct *p, int cpu, int flags) unlikely(rt_task(curr)) && (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio); + /* + * Respect the sync flag as long as the task can run on this CPU. + */ + if (should_honor_rt_sync(this_cpu_rq, p, sync) && + cpumask_test_cpu(this_cpu, p->cpus_ptr)) { + cpu = this_cpu; + goto out_unlock; + } + if (test || !rt_task_fits_capacity(p, cpu)) { int target = find_lowest_rq(p); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 3e303411121f..5c322db39374 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2111,6 +2111,8 @@ extern const u32 sched_prio_to_wmult[40]; #define ENQUEUE_MIGRATED 0x00 #endif +#define ENQUEUE_WAKEUP_SYNC 0x80 + #define RETRY_TASK ((void *)-1UL) struct sched_class {