sched_ext: idle: Make local functions static in ext_idle.c

Functions that are only used within ext_idle.c can be marked as static
to limit their scope.

No functional changes.

Signed-off-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Andrea Righi 2025-06-04 16:33:12 +02:00 committed by Tejun Heo
parent c68ea8243c
commit 353656eb84
2 changed files with 17 additions and 14 deletions

View File

@ -75,7 +75,7 @@ static int scx_cpu_node_if_enabled(int cpu)
return cpu_to_node(cpu);
}
bool scx_idle_test_and_clear_cpu(int cpu)
static bool scx_idle_test_and_clear_cpu(int cpu)
{
int node = scx_cpu_node_if_enabled(cpu);
struct cpumask *idle_cpus = idle_cpumask(node)->cpu;
@ -198,7 +198,7 @@ pick_idle_cpu_from_online_nodes(const struct cpumask *cpus_allowed, int node, u6
/*
* Find an idle CPU in the system, starting from @node.
*/
s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags)
static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags)
{
s32 cpu;
@ -794,6 +794,16 @@ static void reset_idle_masks(struct sched_ext_ops *ops)
cpumask_and(idle_cpumask(node)->smt, cpu_online_mask, node_mask);
}
}
#else /* !CONFIG_SMP */
static bool scx_idle_test_and_clear_cpu(int cpu)
{
return -EBUSY;
}
static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags)
{
return -EBUSY;
}
#endif /* CONFIG_SMP */
void scx_idle_enable(struct sched_ext_ops *ops)
@ -860,7 +870,7 @@ static bool check_builtin_idle_enabled(void)
return false;
}
s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
const struct cpumask *allowed, u64 flags)
{
struct rq *rq;
@ -1121,10 +1131,10 @@ __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
if (!check_builtin_idle_enabled())
return false;
if (kf_cpu_valid(cpu, NULL))
return scx_idle_test_and_clear_cpu(cpu);
else
if (!kf_cpu_valid(cpu, NULL))
return false;
return scx_idle_test_and_clear_cpu(cpu);
}
/**

View File

@ -15,16 +15,9 @@ struct sched_ext_ops;
#ifdef CONFIG_SMP
void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops);
void scx_idle_init_masks(void);
bool scx_idle_test_and_clear_cpu(int cpu);
s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags);
#else /* !CONFIG_SMP */
static inline void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops) {}
static inline void scx_idle_init_masks(void) {}
static inline bool scx_idle_test_and_clear_cpu(int cpu) { return false; }
static inline s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags)
{
return -EBUSY;
}
#endif /* CONFIG_SMP */
s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,