mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 23:13:01 +02:00
locking/lock_events: Add locking events for rtmutex slow paths
Add locking events for rtlock_slowlock() and rt_mutex_slowlock() for profiling the slow path behavior of rt_spin_lock() and rt_mutex_lock(). Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20250307232717.1759087-4-boqun.feng@gmail.com
This commit is contained in:
parent
f23ecef20a
commit
b76b44fb65
|
@ -67,3 +67,24 @@ LOCK_EVENT(rwsem_rlock_handoff) /* # of read lock handoffs */
|
|||
LOCK_EVENT(rwsem_wlock) /* # of write locks acquired */
|
||||
LOCK_EVENT(rwsem_wlock_fail) /* # of failed write lock acquisitions */
|
||||
LOCK_EVENT(rwsem_wlock_handoff) /* # of write lock handoffs */
|
||||
|
||||
/*
|
||||
* Locking events for rtlock_slowlock()
|
||||
*/
|
||||
LOCK_EVENT(rtlock_slowlock) /* # of rtlock_slowlock() calls */
|
||||
LOCK_EVENT(rtlock_slow_acq1) /* # of locks acquired after wait_lock */
|
||||
LOCK_EVENT(rtlock_slow_acq2) /* # of locks acquired in for loop */
|
||||
LOCK_EVENT(rtlock_slow_sleep) /* # of sleeps */
|
||||
LOCK_EVENT(rtlock_slow_wake) /* # of wakeup's */
|
||||
|
||||
/*
|
||||
* Locking events for rt_mutex_slowlock()
|
||||
*/
|
||||
LOCK_EVENT(rtmutex_slowlock) /* # of rt_mutex_slowlock() calls */
|
||||
LOCK_EVENT(rtmutex_slow_block) /* # of rt_mutex_slowlock_block() calls */
|
||||
LOCK_EVENT(rtmutex_slow_acq1) /* # of locks acquired after wait_lock */
|
||||
LOCK_EVENT(rtmutex_slow_acq2) /* # of locks acquired at the end */
|
||||
LOCK_EVENT(rtmutex_slow_acq3) /* # of locks acquired in *block() */
|
||||
LOCK_EVENT(rtmutex_slow_sleep) /* # of sleeps */
|
||||
LOCK_EVENT(rtmutex_slow_wake) /* # of wakeup's */
|
||||
LOCK_EVENT(rtmutex_deadlock) /* # of rt_mutex_handle_deadlock()'s */
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <trace/events/lock.h>
|
||||
|
||||
#include "rtmutex_common.h"
|
||||
#include "lock_events.h"
|
||||
|
||||
#ifndef WW_RT
|
||||
# define build_ww_mutex() (false)
|
||||
|
@ -1612,10 +1613,13 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
|
|||
struct task_struct *owner;
|
||||
int ret = 0;
|
||||
|
||||
lockevent_inc(rtmutex_slow_block);
|
||||
for (;;) {
|
||||
/* Try to acquire the lock: */
|
||||
if (try_to_take_rt_mutex(lock, current, waiter))
|
||||
if (try_to_take_rt_mutex(lock, current, waiter)) {
|
||||
lockevent_inc(rtmutex_slow_acq3);
|
||||
break;
|
||||
}
|
||||
|
||||
if (timeout && !timeout->task) {
|
||||
ret = -ETIMEDOUT;
|
||||
|
@ -1638,8 +1642,10 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
|
|||
owner = NULL;
|
||||
raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
|
||||
|
||||
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
|
||||
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) {
|
||||
lockevent_inc(rtmutex_slow_sleep);
|
||||
rt_mutex_schedule();
|
||||
}
|
||||
|
||||
raw_spin_lock_irq(&lock->wait_lock);
|
||||
set_current_state(state);
|
||||
|
@ -1694,6 +1700,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
|
|||
int ret;
|
||||
|
||||
lockdep_assert_held(&lock->wait_lock);
|
||||
lockevent_inc(rtmutex_slowlock);
|
||||
|
||||
/* Try to acquire the lock again: */
|
||||
if (try_to_take_rt_mutex(lock, current, NULL)) {
|
||||
|
@ -1701,6 +1708,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
|
|||
__ww_mutex_check_waiters(rtm, ww_ctx, wake_q);
|
||||
ww_mutex_lock_acquired(ww, ww_ctx);
|
||||
}
|
||||
lockevent_inc(rtmutex_slow_acq1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1719,10 +1727,12 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
|
|||
__ww_mutex_check_waiters(rtm, ww_ctx, wake_q);
|
||||
ww_mutex_lock_acquired(ww, ww_ctx);
|
||||
}
|
||||
lockevent_inc(rtmutex_slow_acq2);
|
||||
} else {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
remove_waiter(lock, waiter);
|
||||
rt_mutex_handle_deadlock(ret, chwalk, lock, waiter);
|
||||
lockevent_inc(rtmutex_deadlock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1751,6 +1761,7 @@ static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
|
|||
&waiter, wake_q);
|
||||
|
||||
debug_rt_mutex_free_waiter(&waiter);
|
||||
lockevent_cond_inc(rtmutex_slow_wake, !wake_q_empty(wake_q));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1823,9 +1834,12 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
|
|||
struct task_struct *owner;
|
||||
|
||||
lockdep_assert_held(&lock->wait_lock);
|
||||
lockevent_inc(rtlock_slowlock);
|
||||
|
||||
if (try_to_take_rt_mutex(lock, current, NULL))
|
||||
if (try_to_take_rt_mutex(lock, current, NULL)) {
|
||||
lockevent_inc(rtlock_slow_acq1);
|
||||
return;
|
||||
}
|
||||
|
||||
rt_mutex_init_rtlock_waiter(&waiter);
|
||||
|
||||
|
@ -1838,8 +1852,10 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
|
|||
|
||||
for (;;) {
|
||||
/* Try to acquire the lock again */
|
||||
if (try_to_take_rt_mutex(lock, current, &waiter))
|
||||
if (try_to_take_rt_mutex(lock, current, &waiter)) {
|
||||
lockevent_inc(rtlock_slow_acq2);
|
||||
break;
|
||||
}
|
||||
|
||||
if (&waiter == rt_mutex_top_waiter(lock))
|
||||
owner = rt_mutex_owner(lock);
|
||||
|
@ -1847,8 +1863,10 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
|
|||
owner = NULL;
|
||||
raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
|
||||
|
||||
if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
|
||||
if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) {
|
||||
lockevent_inc(rtlock_slow_sleep);
|
||||
schedule_rtlock();
|
||||
}
|
||||
|
||||
raw_spin_lock_irq(&lock->wait_lock);
|
||||
set_current_state(TASK_RTLOCK_WAIT);
|
||||
|
@ -1865,6 +1883,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
|
|||
debug_rt_mutex_free_waiter(&waiter);
|
||||
|
||||
trace_contention_end(lock, 0);
|
||||
lockevent_cond_inc(rtlock_slow_wake, !wake_q_empty(wake_q));
|
||||
}
|
||||
|
||||
static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
|
||||
|
|
Loading…
Reference in New Issue
Block a user