mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-18 23:29:57 +02:00

With PREEMPT_RT there is a rt_mutex recursion problem where sched_submit_work() can use an rtlock (aka spinlock_t). More specifically what happens is: mutex_lock() /* really rt_mutex */ ... __rt_mutex_slowlock_locked() task_blocks_on_rt_mutex() // enqueue current task as waiter // do PI chain walk rt_mutex_slowlock_block() schedule() sched_submit_work() ... spin_lock() /* really rtlock */ ... __rt_mutex_slowlock_locked() task_blocks_on_rt_mutex() // enqueue current task as waiter *AGAIN* // *CONFUSION* Fix this by making rt_mutex do the sched_submit_work() early, before it enqueues itself as a waiter -- before it even knows *if* it will wait. [[ basically Thomas' patch but with different naming and a few asserts added ]] Originally-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20230908162254.999499-5-bigeasy@linutronix.de
64 lines
1.3 KiB
C
64 lines
1.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_SCHED_RT_H
|
|
#define _LINUX_SCHED_RT_H
|
|
|
|
#include <linux/sched.h>
|
|
|
|
struct task_struct;
|
|
|
|
static inline int rt_prio(int prio)
|
|
{
|
|
if (unlikely(prio < MAX_RT_PRIO))
|
|
return 1;
|
|
return 0;
|
|
}
|
|
|
|
static inline int rt_task(struct task_struct *p)
|
|
{
|
|
return rt_prio(p->prio);
|
|
}
|
|
|
|
static inline bool task_is_realtime(struct task_struct *tsk)
|
|
{
|
|
int policy = tsk->policy;
|
|
|
|
if (policy == SCHED_FIFO || policy == SCHED_RR)
|
|
return true;
|
|
if (policy == SCHED_DEADLINE)
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
#ifdef CONFIG_RT_MUTEXES
|
|
extern void rt_mutex_pre_schedule(void);
|
|
extern void rt_mutex_schedule(void);
|
|
extern void rt_mutex_post_schedule(void);
|
|
|
|
/*
|
|
* Must hold either p->pi_lock or task_rq(p)->lock.
|
|
*/
|
|
static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p)
|
|
{
|
|
return p->pi_top_task;
|
|
}
|
|
extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task);
|
|
extern void rt_mutex_adjust_pi(struct task_struct *p);
|
|
#else
|
|
static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
|
|
{
|
|
return NULL;
|
|
}
|
|
# define rt_mutex_adjust_pi(p) do { } while (0)
|
|
#endif
|
|
|
|
extern void normalize_rt_tasks(void);
|
|
|
|
|
|
/*
|
|
* default timeslice is 100 msecs (used only for SCHED_RR tasks).
|
|
* Timeslices get refilled after they expire.
|
|
*/
|
|
#define RR_TIMESLICE (100 * HZ / 1000)
|
|
|
|
#endif /* _LINUX_SCHED_RT_H */
|