mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-13 12:49:35 +02:00
ANDROID: sched: Export symbols needed for vendor hooks
Bug: 344826816
Bug: 297343949
Change-Id: I0cb65e85b36687bfaae6a185ca373d7fb8de0a77
Signed-off-by: Rick Yiu <rickyiu@google.com>
(cherry picked from commit eb9686932b
)
Signed-off-by: Qais Yousef <qyousef@google.com>
This commit is contained in:
parent
38fdc88203
commit
ed558fd9d8
|
@ -2798,6 +2798,7 @@ out_unlock:
|
||||||
put_task_struct(p);
|
put_task_struct(p);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(push_cpu_stop);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* sched_class::set_cpus_allowed must do the below, but is not required to
|
* sched_class::set_cpus_allowed must do the below, but is not required to
|
||||||
|
|
|
@ -3807,6 +3807,7 @@ void reweight_task(struct task_struct *p, int prio)
|
||||||
reweight_entity(cfs_rq, se, weight);
|
reweight_entity(cfs_rq, se, weight);
|
||||||
load->inv_weight = sched_prio_to_wmult[prio];
|
load->inv_weight = sched_prio_to_wmult[prio];
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(reweight_task);
|
||||||
|
|
||||||
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
|
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
|
||||||
|
|
||||||
|
|
|
@ -178,7 +178,7 @@ accumulate_sum(u64 delta, struct sched_avg *sa,
|
||||||
* load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
|
* load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
|
||||||
* = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
|
* = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
|
||||||
*/
|
*/
|
||||||
static __always_inline int
|
int
|
||||||
___update_load_sum(u64 now, struct sched_avg *sa,
|
___update_load_sum(u64 now, struct sched_avg *sa,
|
||||||
unsigned long load, unsigned long runnable, int running)
|
unsigned long load, unsigned long runnable, int running)
|
||||||
{
|
{
|
||||||
|
@ -232,6 +232,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(___update_load_sum);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When syncing *_avg with *_sum, we must take into account the current
|
* When syncing *_avg with *_sum, we must take into account the current
|
||||||
|
@ -257,7 +258,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
|
||||||
* the period_contrib of cfs_rq when updating the sched_avg of a sched_entity
|
* the period_contrib of cfs_rq when updating the sched_avg of a sched_entity
|
||||||
* if it's more convenient.
|
* if it's more convenient.
|
||||||
*/
|
*/
|
||||||
static __always_inline void
|
void
|
||||||
___update_load_avg(struct sched_avg *sa, unsigned long load)
|
___update_load_avg(struct sched_avg *sa, unsigned long load)
|
||||||
{
|
{
|
||||||
u32 divider = get_pelt_divider(sa);
|
u32 divider = get_pelt_divider(sa);
|
||||||
|
@ -269,6 +270,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
|
||||||
sa->runnable_avg = div_u64(sa->runnable_sum, divider);
|
sa->runnable_avg = div_u64(sa->runnable_sum, divider);
|
||||||
WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
|
WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(___update_load_avg);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* sched_entity:
|
* sched_entity:
|
||||||
|
|
Loading…
Reference in New Issue
Block a user