ANDROID: vendor_hooks: Add hooks for util-update related functions

Add vendor hooks for vendors to implement their own load tracker

Through this vendor hooks, vendors can implement their own load tracker
to enhance performance and power consumption.
For example, if the sensitivity of PELT based on cgroup can be controlled,
it will be possible to manage the scheduler for various scenarios.

Bug: 343593793
Change-Id: Icb6b0c408b0c243042a8d481e7bd0249d0d29980
Signed-off-by: Jinho Jeong <jh9317.jeong@samsung.com>
Signed-off-by: lakkyung jung <lakkyung.jung@samsung.com>
This commit is contained in:
Jinho Jeong 2024-05-29 14:07:06 +09:00
parent e484bff0c4
commit 7ee86e5b15
3 changed files with 35 additions and 0 deletions

View File

@ -420,6 +420,26 @@ DECLARE_HOOK(android_vh_mmput,
TP_PROTO(void *unused),
TP_ARGS(unused));
DECLARE_RESTRICTED_HOOK(android_rvh_attach_entity_load_avg,
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(cfs_rq, se), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_detach_entity_load_avg,
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(cfs_rq, se), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_update_load_avg,
TP_PROTO(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(now, cfs_rq, se), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_remove_entity_load_avg,
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(cfs_rq, se), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_update_blocked_fair,
TP_PROTO(struct rq *rq),
TP_ARGS(rq), 1);
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_SCHED_H */

View File

@ -4582,6 +4582,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
else
se->avg.load_sum = 1;
trace_android_rvh_attach_entity_load_avg(cfs_rq, se);
enqueue_load_avg(cfs_rq, se);
cfs_rq->avg.util_avg += se->avg.util_avg;
cfs_rq->avg.util_sum += se->avg.util_sum;
@ -4605,6 +4607,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
*/
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
trace_android_rvh_detach_entity_load_avg(cfs_rq, se);
dequeue_load_avg(cfs_rq, se);
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
@ -4649,6 +4653,8 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
decayed = update_cfs_rq_load_avg(now, cfs_rq);
decayed |= propagate_entity_load_avg(se);
trace_android_rvh_update_load_avg(now, cfs_rq, se);
if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
/*
@ -4706,6 +4712,8 @@ static void remove_entity_load_avg(struct sched_entity *se)
sync_entity_load_avg(se);
trace_android_rvh_remove_entity_load_avg(cfs_rq, se);
raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
++cfs_rq->removed.nr;
cfs_rq->removed.util_avg += se->avg.util_avg;
@ -9286,6 +9294,8 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
bool decayed = false;
int cpu = cpu_of(rq);
trace_android_rvh_update_blocked_fair(rq);
/*
* Iterates the task_group tree in a bottom up fashion, see
* list_add_leaf_cfs_rq() for details.

View File

@ -108,3 +108,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_cgroup_css_free);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_reweight_entity);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_context_switch);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmput);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_attach_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_detach_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_remove_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_blocked_fair);