mirror of
https://github.com/hardkernel/linux.git
synced 2026-03-29 05:50:28 +09:00
Revert "Revert "Revert "ANDROID: Sched: Add restricted vendor hooks for scheduler"""
This reverts commit ca60d78542. It causes
merge issues with 5.18-rc1 and has to be reverted for now. If it is
still needed, it can be added back after 5.18-rc1.
Bug: 200103201
Cc: Ashay Jaiswal <quic_ashayj@quicinc.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I08e5d2f87ab47024344e229b28d08baaa8ffae09
This commit is contained in:
@@ -100,22 +100,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_preempt_disable);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_preempt_enable);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_irqs_disable);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_irqs_enable);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_task_cpu);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_try_to_wake_up);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_try_to_wake_up_success);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_fork);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_wake_up_new_task);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_new_task_stats);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_flush_task);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_tick_entry);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_schedule);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_cpu_starting);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_cpu_dying);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_account_irq);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_place_entity);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_build_perf_domains);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_cpu_capacity);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_misfit_status);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_attach);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_can_attach);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_online);
|
||||
|
||||
@@ -135,71 +135,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_update_cpus_allowed,
|
||||
const struct cpumask *new_mask, int *ret),
|
||||
TP_ARGS(p, cpus_requested, new_mask, ret), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_set_task_cpu,
|
||||
TP_PROTO(struct task_struct *p, unsigned int new_cpu),
|
||||
TP_ARGS(p, new_cpu), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_try_to_wake_up,
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_try_to_wake_up_success,
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_sched_fork,
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_wake_up_new_task,
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_new_task_stats,
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_flush_task,
|
||||
TP_PROTO(struct task_struct *prev),
|
||||
TP_ARGS(prev), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_tick_entry,
|
||||
TP_PROTO(struct rq *rq),
|
||||
TP_ARGS(rq), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_schedule,
|
||||
TP_PROTO(struct task_struct *prev, struct task_struct *next, struct rq *rq),
|
||||
TP_ARGS(prev, next, rq), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_sched_cpu_starting,
|
||||
TP_PROTO(int cpu),
|
||||
TP_ARGS(cpu), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_sched_cpu_dying,
|
||||
TP_PROTO(int cpu),
|
||||
TP_ARGS(cpu), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_account_irq,
|
||||
TP_PROTO(struct task_struct *curr, int cpu, s64 delta),
|
||||
TP_ARGS(curr, cpu, delta), 1);
|
||||
|
||||
struct sched_entity;
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_place_entity,
|
||||
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial, u64 *vruntime),
|
||||
TP_ARGS(cfs_rq, se, initial, vruntime), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_build_perf_domains,
|
||||
TP_PROTO(bool *eas_check),
|
||||
TP_ARGS(eas_check), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_update_cpu_capacity,
|
||||
TP_PROTO(int cpu, unsigned long *capacity),
|
||||
TP_ARGS(cpu, capacity), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_update_misfit_status,
|
||||
TP_PROTO(struct task_struct *p, struct rq *rq, bool *need_update),
|
||||
TP_ARGS(p, rq, need_update), 1);
|
||||
|
||||
struct cgroup_taskset;
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_cpu_cgroup_attach,
|
||||
TP_PROTO(struct cgroup_taskset *tset),
|
||||
|
||||
@@ -3117,7 +3117,6 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
||||
p->se.nr_migrations++;
|
||||
rseq_migrate(p);
|
||||
perf_event_task_migrate(p);
|
||||
trace_android_rvh_set_task_cpu(p, new_cpu);
|
||||
}
|
||||
|
||||
__set_task_cpu(p, new_cpu);
|
||||
@@ -4173,8 +4172,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
||||
*/
|
||||
smp_cond_load_acquire(&p->on_cpu, !VAL);
|
||||
|
||||
trace_android_rvh_try_to_wake_up(p);
|
||||
|
||||
cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
|
||||
if (task_cpu(p) != cpu) {
|
||||
if (p->in_iowait) {
|
||||
@@ -4194,10 +4191,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
||||
unlock:
|
||||
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||
out:
|
||||
if (success) {
|
||||
trace_android_rvh_try_to_wake_up_success(p);
|
||||
if (success)
|
||||
ttwu_stat(p, task_cpu(p), wake_flags);
|
||||
}
|
||||
preempt_enable();
|
||||
|
||||
return success;
|
||||
@@ -4443,8 +4438,6 @@ int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
|
||||
*/
|
||||
int sched_fork(unsigned long clone_flags, struct task_struct *p)
|
||||
{
|
||||
trace_android_rvh_sched_fork(p);
|
||||
|
||||
__sched_fork(clone_flags, p);
|
||||
/*
|
||||
* We mark the process as NEW here. This guarantees that
|
||||
@@ -4570,8 +4563,6 @@ void wake_up_new_task(struct task_struct *p)
|
||||
struct rq_flags rf;
|
||||
struct rq *rq;
|
||||
|
||||
trace_android_rvh_wake_up_new_task(p);
|
||||
|
||||
raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
|
||||
WRITE_ONCE(p->__state, TASK_RUNNING);
|
||||
#ifdef CONFIG_SMP
|
||||
@@ -4590,7 +4581,6 @@ void wake_up_new_task(struct task_struct *p)
|
||||
rq = __task_rq_lock(p, &rf);
|
||||
update_rq_clock(rq);
|
||||
post_init_entity_util_avg(p);
|
||||
trace_android_rvh_new_task_stats(p);
|
||||
|
||||
activate_task(rq, p, ENQUEUE_NOCLOCK);
|
||||
trace_sched_wakeup_new(p);
|
||||
@@ -4975,8 +4965,6 @@ static struct rq *finish_task_switch(struct task_struct *prev)
|
||||
if (prev->sched_class->task_dead)
|
||||
prev->sched_class->task_dead(prev);
|
||||
|
||||
trace_android_rvh_flush_task(prev);
|
||||
|
||||
/* Task is done with its stack. */
|
||||
put_task_stack(prev);
|
||||
|
||||
@@ -5340,7 +5328,6 @@ void scheduler_tick(void)
|
||||
rq_lock(rq, &rf);
|
||||
|
||||
update_rq_clock(rq);
|
||||
trace_android_rvh_tick_entry(rq);
|
||||
|
||||
thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
|
||||
update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
|
||||
@@ -6357,7 +6344,6 @@ static void __sched notrace __schedule(unsigned int sched_mode)
|
||||
rq->last_seen_need_resched_ns = 0;
|
||||
#endif
|
||||
|
||||
trace_android_rvh_schedule(prev, next, rq);
|
||||
if (likely(prev != next)) {
|
||||
rq->nr_switches++;
|
||||
/*
|
||||
@@ -9235,7 +9221,6 @@ int sched_cpu_starting(unsigned int cpu)
|
||||
sched_core_cpu_starting(cpu);
|
||||
sched_rq_cpu_starting(cpu);
|
||||
sched_tick_start(cpu);
|
||||
trace_android_rvh_sched_cpu_starting(cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -9309,8 +9294,6 @@ int sched_cpu_dying(unsigned int cpu)
|
||||
}
|
||||
rq_unlock_irqrestore(rq, &rf);
|
||||
|
||||
trace_android_rvh_sched_cpu_dying(cpu);
|
||||
|
||||
calc_load_migrate(rq);
|
||||
update_max_interval();
|
||||
hrtick_clear(rq);
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
*/
|
||||
#include <linux/cpufreq_times.h>
|
||||
#include "sched.h"
|
||||
#include <trace/hooks/sched.h>
|
||||
|
||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||
|
||||
@@ -75,8 +74,6 @@ void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
|
||||
irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
|
||||
else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd())
|
||||
irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
|
||||
|
||||
trace_android_rvh_account_irq(curr, cpu, delta);
|
||||
}
|
||||
|
||||
static u64 irqtime_tick_accounted(u64 maxtime)
|
||||
|
||||
@@ -4123,10 +4123,7 @@ static inline int task_fits_capacity(struct task_struct *p,
|
||||
|
||||
static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
|
||||
{
|
||||
bool need_update = true;
|
||||
|
||||
trace_android_rvh_update_misfit_status(p, rq, &need_update);
|
||||
if (!static_branch_unlikely(&sched_asym_cpucapacity) || !need_update)
|
||||
if (!static_branch_unlikely(&sched_asym_cpucapacity))
|
||||
return;
|
||||
|
||||
if (!p || p->nr_cpus_allowed == 1) {
|
||||
@@ -4235,7 +4232,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
|
||||
|
||||
/* ensure we never gain time by being placed backwards. */
|
||||
se->vruntime = max_vruntime(se->vruntime, vruntime);
|
||||
trace_android_rvh_place_entity(cfs_rq, se, initial, &vruntime);
|
||||
}
|
||||
|
||||
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
|
||||
@@ -8455,7 +8451,6 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
|
||||
if (!capacity)
|
||||
capacity = 1;
|
||||
|
||||
trace_android_rvh_update_cpu_capacity(cpu, &capacity);
|
||||
cpu_rq(cpu)->cpu_capacity = capacity;
|
||||
trace_sched_cpu_capacity_tp(cpu_rq(cpu));
|
||||
|
||||
|
||||
@@ -364,7 +364,6 @@ static bool build_perf_domains(const struct cpumask *cpu_map)
|
||||
* EAS is enabled for asymmetric CPU capacity topologies.
|
||||
* Allow vendor to override if desired.
|
||||
*/
|
||||
trace_android_rvh_build_perf_domains(&eas_check);
|
||||
if (!per_cpu(sd_asym_cpucapacity, cpu) && !eas_check) {
|
||||
if (sched_debug()) {
|
||||
pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n",
|
||||
|
||||
Reference in New Issue
Block a user