linux-yocto/include/linux/mutex.h
Maxim Levitsky fb49f07ba1 locking/mutex: implement mutex_lock_killable_nest_lock
KVM's SEV intra-host migration code needs to lock all vCPUs
of the source and the target VM, before it proceeds with the migration.

The number of vCPUs that belong to each VM is not bounded by anything
except a self-imposed KVM limit of CONFIG_KVM_MAX_NR_VCPUS vCPUs which is
significantly larger than the depth of lockdep's lock stack.

Luckily, the locks in both of the cases mentioned above, are held under
the 'kvm->lock' of each VM, which means that we can use the little
known lockdep feature called a "nest_lock" to support this use case in
a cleaner way, compared to the way it's currently done.

Implement and expose 'mutex_lock_killable_nest_lock' for this
purpose.

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Message-ID: <20250512180407.659015-3-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2025-05-27 12:16:41 -04:00

232 lines
6.7 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* Mutexes: blocking mutual exclusion locks
*
* started by Ingo Molnar:
*
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*
* This file contains the main data structure and API definitions.
*/
#ifndef __LINUX_MUTEX_H
#define __LINUX_MUTEX_H
#include <asm/current.h>
#include <linux/list.h>
#include <linux/spinlock_types.h>
#include <linux/lockdep.h>
#include <linux/atomic.h>
#include <asm/processor.h>
#include <linux/osq_lock.h>
#include <linux/debug_locks.h>
#include <linux/cleanup.h>
#include <linux/mutex_types.h>
struct device;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
, .dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_SLEEP, \
}
#else
# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
#endif
#ifdef CONFIG_DEBUG_MUTEXES
# define __DEBUG_MUTEX_INITIALIZER(lockname) \
, .magic = &lockname
extern void mutex_destroy(struct mutex *lock);
#else
# define __DEBUG_MUTEX_INITIALIZER(lockname)
static inline void mutex_destroy(struct mutex *lock) {}
#endif
/**
* mutex_init - initialize the mutex
* @mutex: the mutex to be initialized
*
* Initialize the mutex to unlocked state.
*
* It is not allowed to initialize an already locked mutex.
*/
#define mutex_init(mutex) \
do { \
static struct lock_class_key __key; \
\
__mutex_init((mutex), #mutex, &__key); \
} while (0)
/**
* mutex_init_with_key - initialize a mutex with a given lockdep key
* @mutex: the mutex to be initialized
* @key: the lockdep key to be associated with the mutex
*
* Initialize the mutex to the unlocked state.
*
* It is not allowed to initialize an already locked mutex.
*/
#define mutex_init_with_key(mutex, key) __mutex_init((mutex), #mutex, (key))
#ifndef CONFIG_PREEMPT_RT
#define __MUTEX_INITIALIZER(lockname) \
{ .owner = ATOMIC_LONG_INIT(0) \
, .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
__DEBUG_MUTEX_INITIALIZER(lockname) \
__DEP_MAP_MUTEX_INITIALIZER(lockname) }
#define DEFINE_MUTEX(mutexname) \
struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
extern void __mutex_init(struct mutex *lock, const char *name,
struct lock_class_key *key);
/**
* mutex_is_locked - is the mutex locked
* @lock: the mutex to be queried
*
* Returns true if the mutex is locked, false if unlocked.
*/
extern bool mutex_is_locked(struct mutex *lock);
#else /* !CONFIG_PREEMPT_RT */
/*
* Preempt-RT variant based on rtmutexes.
*/
#define __MUTEX_INITIALIZER(mutexname) \
{ \
.rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex) \
__DEP_MAP_MUTEX_INITIALIZER(mutexname) \
}
#define DEFINE_MUTEX(mutexname) \
struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
extern void __mutex_rt_init(struct mutex *lock, const char *name,
struct lock_class_key *key);
#define mutex_is_locked(l) rt_mutex_base_is_locked(&(l)->rtmutex)
#define __mutex_init(mutex, name, key) \
do { \
rt_mutex_base_init(&(mutex)->rtmutex); \
__mutex_rt_init((mutex), name, key); \
} while (0)
#endif /* CONFIG_PREEMPT_RT */
#ifdef CONFIG_DEBUG_MUTEXES
int __devm_mutex_init(struct device *dev, struct mutex *lock);
#else
static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
{
/*
* When CONFIG_DEBUG_MUTEXES is off mutex_destroy() is just a nop so
* no really need to register it in the devm subsystem.
*/
return 0;
}
#endif
#define devm_mutex_init(dev, mutex) \
({ \
typeof(mutex) mutex_ = (mutex); \
\
mutex_init(mutex_); \
__devm_mutex_init(dev, mutex_); \
})
/*
* See kernel/locking/mutex.c for detailed documentation of these APIs.
* Also see Documentation/locking/mutex-design.rst.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
unsigned int subclass);
extern int __must_check _mutex_lock_killable(struct mutex *lock,
unsigned int subclass, struct lockdep_map *nest_lock);
extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
#define mutex_lock_killable(lock) _mutex_lock_killable(lock, 0, NULL)
#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
#define mutex_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#define mutex_lock_killable_nest_lock(lock, nest_lock) \
( \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map), \
_mutex_lock_killable(lock, 0, &(nest_lock)->dep_map) \
)
#define mutex_lock_killable_nested(lock, subclass) \
_mutex_lock_killable(lock, subclass, NULL)
#else
extern void mutex_lock(struct mutex *lock);
extern int __must_check mutex_lock_interruptible(struct mutex *lock);
extern int __must_check mutex_lock_killable(struct mutex *lock);
extern void mutex_lock_io(struct mutex *lock);
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
# define mutex_lock_killable_nest_lock(lock, nest_lock) mutex_lock_killable(lock)
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
#endif
/*
* NOTE: mutex_trylock() follows the spin_trylock() convention,
* not the down_trylock() convention!
*
* Returns 1 if the mutex has been acquired successfully, and 0 on contention.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
#define mutex_trylock_nest_lock(lock, nest_lock) \
( \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map), \
_mutex_trylock_nest_lock(lock, &(nest_lock)->dep_map) \
)
#define mutex_trylock(lock) _mutex_trylock_nest_lock(lock, NULL)
#else
extern int mutex_trylock(struct mutex *lock);
#define mutex_trylock_nest_lock(lock, nest_lock) mutex_trylock(lock)
#endif
extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0)
extern unsigned long mutex_get_owner(struct mutex *lock);
#endif /* __LINUX_MUTEX_H */