ANDROID: sched/events: Introduce util_est trace events

Signed-off-by: Patrick Bellasi <patrick.bellasi@arm.com>
Change-Id: I65e294c454369cbc15a29370d8a13ce358a95c39
This commit is contained in:
Patrick Bellasi
2017-10-27 16:12:51 +01:00
committed by Quentin Perret
parent 915679307f
commit 8eb64d5f73
2 changed files with 74 additions and 0 deletions

View File

@@ -715,6 +715,70 @@ TRACE_EVENT(sched_load_tg,
__entry->load)
);
#endif /* CONFIG_FAIR_GROUP_SCHED */
/*
* Tracepoint for tasks' estimated utilization.
*/
TRACE_EVENT(sched_util_est_task,
TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
TP_ARGS(tsk, avg),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, cpu )
__field( unsigned int, util_avg )
__field( unsigned int, est_enqueued )
__field( unsigned int, est_ewma )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->cpu = task_cpu(tsk);
__entry->util_avg = avg->util_avg;
__entry->est_enqueued = avg->util_est.enqueued;
__entry->est_ewma = avg->util_est.ewma;
),
TP_printk("comm=%s pid=%d cpu=%d util_avg=%u util_est_ewma=%u util_est_enqueued=%u",
__entry->comm,
__entry->pid,
__entry->cpu,
__entry->util_avg,
__entry->est_ewma,
__entry->est_enqueued)
);
/*
* Tracepoint for root cfs_rq's estimated utilization.
*/
TRACE_EVENT(sched_util_est_cpu,
TP_PROTO(int cpu, struct cfs_rq *cfs_rq),
TP_ARGS(cpu, cfs_rq),
TP_STRUCT__entry(
__field( int, cpu )
__field( unsigned int, util_avg )
__field( unsigned int, util_est_enqueued )
),
TP_fast_assign(
__entry->cpu = cpu;
__entry->util_avg = cfs_rq->avg.util_avg;
__entry->util_est_enqueued = cfs_rq->avg.util_est.enqueued;
),
TP_printk("cpu=%d util_avg=%u util_est_enqueued=%u",
__entry->cpu,
__entry->util_avg,
__entry->util_est_enqueued)
);
#endif /* CONFIG_SMP */
#endif /* _TRACE_SCHED_H */

View File

@@ -3665,6 +3665,10 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
enqueued = cfs_rq->avg.util_est.enqueued;
enqueued += (_task_util_est(p) | UTIL_AVG_UNCHANGED);
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
/* Update plots for Task and CPU estimated utilization */
trace_sched_util_est_task(p, &p->se.avg);
trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
}
/*
@@ -3695,6 +3699,9 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
(_task_util_est(p) | UTIL_AVG_UNCHANGED));
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
/* Update plots for CPU's estimated utilization */
trace_sched_util_est_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
/*
* Skip update of task's estimated utilization when the task has not
* yet completed an activation, e.g. being migrated.
@@ -3740,6 +3747,9 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
ue.ewma += last_ewma_diff;
ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
WRITE_ONCE(p->se.avg.util_est, ue);
/* Update plots for Task's estimated utilization */
trace_sched_util_est_task(p, &p->se.avg);
}
static inline int task_fits_capacity(struct task_struct *p, long capacity)