mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-23 07:23:12 +02:00
bitmap patches for v6.14.
Hi Linus, Please pull bitmap patches for v6.14. This includes const_true() series from Vincent Mailhol, another __always_inline rework from Nathan Chancellor for RISCV, and a couple random fixes from Dr. David Alan Gilbert and I Hsin Cheng. Thanks, Yury -----BEGIN PGP SIGNATURE----- iQGzBAABCgAdFiEEi8GdvG6xMhdgpu/4sUSA/TofvsgFAmeUMpEACgkQsUSA/Tof vsiqEAv/VvtTD6I3Ms3kIl2G1pBP/EFcYQGbwS1PCsX3RX16rinZ2XUDRtjvRy1Y FA+OsZ2yPtH8G1WRM8YauZsh2cZSCA4xTFadLSZkT8leSWERKaTyJI+PXe2A43IU d+FV4zYH5JYqV2u9aLHWMO8Voq9nNHZXOYHRu0q53TBFn7V294Lma9oDlK3Wjfur vSZZU9SKKlMV8Oy6/hZ3tDemUDM1jAGlqrxFb8aXRsTsCpsmlqE1bQdZ+AadjevZ cVeplB8OCCnqcYV28szIwsJpSzmd5/WBP6jLpeMgBYFGS0JT2USdZ8gsw+Yq/On5 hjxek3cHBKdv0CINk0Ejf4aV0IvoX5S/VRlTjhttzyX68no1DoibDuWJWB42PRWS frllVOmdkm2DqA0G9mgxtwzBl5UqMFVe5LuVU9E9BZZeDmRZmS3obrUiMzpiqUOs zkdDvA0uaKgjx2qZADDEFqg1+XdX0A0iPebEv9vLaULXv0+D/PbkClNqIf8p7778 2GWuBLJe =1hW6 -----END PGP SIGNATURE----- Merge tag 'bitmap-for-6.14' of https://github.com:/norov/linux Pull bitmap updates from Yury Norov: "This includes const_true() series from Vincent Mailhol, another __always_inline rework from Nathan Chancellor for RISCV, and a couple of random fixes from Dr. David Alan Gilbert and I Hsin Cheng" * tag 'bitmap-for-6.14' of https://github.com:/norov/linux: cpumask: Rephrase comments for cpumask_any*() APIs cpu: Remove unused init_cpu_online riscv: Always inline bitops linux/bits.h: simplify GENMASK_INPUT_CHECK() compiler.h: add const_true()
This commit is contained in:
commit
5fb4088624
|
@ -228,7 +228,7 @@ legacy:
|
||||||
*
|
*
|
||||||
* This operation may be reordered on other architectures than x86.
|
* This operation may be reordered on other architectures than x86.
|
||||||
*/
|
*/
|
||||||
static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
|
static __always_inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
return __test_and_op_bit(or, __NOP, nr, addr);
|
return __test_and_op_bit(or, __NOP, nr, addr);
|
||||||
}
|
}
|
||||||
|
@ -240,7 +240,7 @@ static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||||
*
|
*
|
||||||
* This operation can be reordered on other architectures other than x86.
|
* This operation can be reordered on other architectures other than x86.
|
||||||
*/
|
*/
|
||||||
static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
|
static __always_inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
return __test_and_op_bit(and, __NOT, nr, addr);
|
return __test_and_op_bit(and, __NOT, nr, addr);
|
||||||
}
|
}
|
||||||
|
@ -253,7 +253,7 @@ static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||||
* This operation is atomic and cannot be reordered.
|
* This operation is atomic and cannot be reordered.
|
||||||
* It also implies a memory barrier.
|
* It also implies a memory barrier.
|
||||||
*/
|
*/
|
||||||
static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
|
static __always_inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
return __test_and_op_bit(xor, __NOP, nr, addr);
|
return __test_and_op_bit(xor, __NOP, nr, addr);
|
||||||
}
|
}
|
||||||
|
@ -270,7 +270,7 @@ static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
|
||||||
* Note that @nr may be almost arbitrarily large; this function is not
|
* Note that @nr may be almost arbitrarily large; this function is not
|
||||||
* restricted to acting on a single-word quantity.
|
* restricted to acting on a single-word quantity.
|
||||||
*/
|
*/
|
||||||
static inline void arch_set_bit(int nr, volatile unsigned long *addr)
|
static __always_inline void arch_set_bit(int nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
__op_bit(or, __NOP, nr, addr);
|
__op_bit(or, __NOP, nr, addr);
|
||||||
}
|
}
|
||||||
|
@ -284,7 +284,7 @@ static inline void arch_set_bit(int nr, volatile unsigned long *addr)
|
||||||
* on non x86 architectures, so if you are writing portable code,
|
* on non x86 architectures, so if you are writing portable code,
|
||||||
* make sure not to rely on its reordering guarantees.
|
* make sure not to rely on its reordering guarantees.
|
||||||
*/
|
*/
|
||||||
static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
|
static __always_inline void arch_clear_bit(int nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
__op_bit(and, __NOT, nr, addr);
|
__op_bit(and, __NOT, nr, addr);
|
||||||
}
|
}
|
||||||
|
@ -298,7 +298,7 @@ static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
|
||||||
* Note that @nr may be almost arbitrarily large; this function is not
|
* Note that @nr may be almost arbitrarily large; this function is not
|
||||||
* restricted to acting on a single-word quantity.
|
* restricted to acting on a single-word quantity.
|
||||||
*/
|
*/
|
||||||
static inline void arch_change_bit(int nr, volatile unsigned long *addr)
|
static __always_inline void arch_change_bit(int nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
__op_bit(xor, __NOP, nr, addr);
|
__op_bit(xor, __NOP, nr, addr);
|
||||||
}
|
}
|
||||||
|
@ -311,7 +311,7 @@ static inline void arch_change_bit(int nr, volatile unsigned long *addr)
|
||||||
* This operation is atomic and provides acquire barrier semantics.
|
* This operation is atomic and provides acquire barrier semantics.
|
||||||
* It can be used to implement bit locks.
|
* It can be used to implement bit locks.
|
||||||
*/
|
*/
|
||||||
static inline int arch_test_and_set_bit_lock(
|
static __always_inline int arch_test_and_set_bit_lock(
|
||||||
unsigned long nr, volatile unsigned long *addr)
|
unsigned long nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
|
return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
|
||||||
|
@ -324,7 +324,7 @@ static inline int arch_test_and_set_bit_lock(
|
||||||
*
|
*
|
||||||
* This operation is atomic and provides release barrier semantics.
|
* This operation is atomic and provides release barrier semantics.
|
||||||
*/
|
*/
|
||||||
static inline void arch_clear_bit_unlock(
|
static __always_inline void arch_clear_bit_unlock(
|
||||||
unsigned long nr, volatile unsigned long *addr)
|
unsigned long nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
__op_bit_ord(and, __NOT, nr, addr, .rl);
|
__op_bit_ord(and, __NOT, nr, addr, .rl);
|
||||||
|
@ -345,13 +345,13 @@ static inline void arch_clear_bit_unlock(
|
||||||
* non-atomic property here: it's a lot more instructions and we still have to
|
* non-atomic property here: it's a lot more instructions and we still have to
|
||||||
* provide release semantics anyway.
|
* provide release semantics anyway.
|
||||||
*/
|
*/
|
||||||
static inline void arch___clear_bit_unlock(
|
static __always_inline void arch___clear_bit_unlock(
|
||||||
unsigned long nr, volatile unsigned long *addr)
|
unsigned long nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
arch_clear_bit_unlock(nr, addr);
|
arch_clear_bit_unlock(nr, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
|
static __always_inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
|
||||||
volatile unsigned long *addr)
|
volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
unsigned long res;
|
unsigned long res;
|
||||||
|
|
|
@ -20,9 +20,8 @@
|
||||||
*/
|
*/
|
||||||
#if !defined(__ASSEMBLY__)
|
#if !defined(__ASSEMBLY__)
|
||||||
#include <linux/build_bug.h>
|
#include <linux/build_bug.h>
|
||||||
#define GENMASK_INPUT_CHECK(h, l) \
|
#include <linux/compiler.h>
|
||||||
(BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
|
#define GENMASK_INPUT_CHECK(h, l) BUILD_BUG_ON_ZERO(const_true((l) > (h)))
|
||||||
__is_constexpr((l) > (h)), (l) > (h), 0)))
|
|
||||||
#else
|
#else
|
||||||
/*
|
/*
|
||||||
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,
|
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,
|
||||||
|
|
|
@ -307,6 +307,28 @@ static inline void *offset_to_ptr(const int *off)
|
||||||
*/
|
*/
|
||||||
#define statically_true(x) (__builtin_constant_p(x) && (x))
|
#define statically_true(x) (__builtin_constant_p(x) && (x))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Similar to statically_true() but produces a constant expression
|
||||||
|
*
|
||||||
|
* To be used in conjunction with macros, such as BUILD_BUG_ON_ZERO(),
|
||||||
|
* which require their input to be a constant expression and for which
|
||||||
|
* statically_true() would otherwise fail.
|
||||||
|
*
|
||||||
|
* This is a trade-off: const_true() requires all its operands to be
|
||||||
|
* compile time constants. Else, it would always returns false even on
|
||||||
|
* the most trivial cases like:
|
||||||
|
*
|
||||||
|
* true || non_const_var
|
||||||
|
*
|
||||||
|
* On the opposite, statically_true() is able to fold more complex
|
||||||
|
* tautologies and will return true on expressions such as:
|
||||||
|
*
|
||||||
|
* !(non_const_var * 8 % 4)
|
||||||
|
*
|
||||||
|
* For the general case, statically_true() is better.
|
||||||
|
*/
|
||||||
|
#define const_true(x) __builtin_choose_expr(__is_constexpr(x), x, false)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is needed in functions which generate the stack canary, see
|
* This is needed in functions which generate the stack canary, see
|
||||||
* arch/x86/kernel/smpboot.c::start_secondary() for an example.
|
* arch/x86/kernel/smpboot.c::start_secondary() for an example.
|
||||||
|
|
|
@ -391,7 +391,7 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
|
||||||
for_each_set_bit_from(cpu, cpumask_bits(mask), small_cpumask_bits)
|
for_each_set_bit_from(cpu, cpumask_bits(mask), small_cpumask_bits)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpumask_any_but - return a "random" in a cpumask, but not this one.
|
* cpumask_any_but - return an arbitrary cpu in a cpumask, but not this one.
|
||||||
* @mask: the cpumask to search
|
* @mask: the cpumask to search
|
||||||
* @cpu: the cpu to ignore.
|
* @cpu: the cpu to ignore.
|
||||||
*
|
*
|
||||||
|
@ -411,7 +411,7 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpumask_any_and_but - pick a "random" cpu from *mask1 & *mask2, but not this one.
|
* cpumask_any_and_but - pick an arbitrary cpu from *mask1 & *mask2, but not this one.
|
||||||
* @mask1: the first input cpumask
|
* @mask1: the first input cpumask
|
||||||
* @mask2: the second input cpumask
|
* @mask2: the second input cpumask
|
||||||
* @cpu: the cpu to ignore
|
* @cpu: the cpu to ignore
|
||||||
|
@ -840,7 +840,7 @@ void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpumask_any - pick a "random" cpu from *srcp
|
* cpumask_any - pick an arbitrary cpu from *srcp
|
||||||
* @srcp: the input cpumask
|
* @srcp: the input cpumask
|
||||||
*
|
*
|
||||||
* Return: >= nr_cpu_ids if no cpus set.
|
* Return: >= nr_cpu_ids if no cpus set.
|
||||||
|
@ -848,7 +848,7 @@ void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp)
|
||||||
#define cpumask_any(srcp) cpumask_first(srcp)
|
#define cpumask_any(srcp) cpumask_first(srcp)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
|
* cpumask_any_and - pick an arbitrary cpu from *mask1 & *mask2
|
||||||
* @mask1: the first input cpumask
|
* @mask1: the first input cpumask
|
||||||
* @mask2: the second input cpumask
|
* @mask2: the second input cpumask
|
||||||
*
|
*
|
||||||
|
@ -1043,7 +1043,6 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
|
||||||
/* Wrappers for arch boot code to manipulate normally-constant masks */
|
/* Wrappers for arch boot code to manipulate normally-constant masks */
|
||||||
void init_cpu_present(const struct cpumask *src);
|
void init_cpu_present(const struct cpumask *src);
|
||||||
void init_cpu_possible(const struct cpumask *src);
|
void init_cpu_possible(const struct cpumask *src);
|
||||||
void init_cpu_online(const struct cpumask *src);
|
|
||||||
|
|
||||||
#define assign_cpu(cpu, mask, val) \
|
#define assign_cpu(cpu, mask, val) \
|
||||||
assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
|
assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
|
||||||
|
|
|
@ -3128,11 +3128,6 @@ void init_cpu_possible(const struct cpumask *src)
|
||||||
cpumask_copy(&__cpu_possible_mask, src);
|
cpumask_copy(&__cpu_possible_mask, src);
|
||||||
}
|
}
|
||||||
|
|
||||||
void init_cpu_online(const struct cpumask *src)
|
|
||||||
{
|
|
||||||
cpumask_copy(&__cpu_online_mask, src);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_cpu_online(unsigned int cpu, bool online)
|
void set_cpu_online(unsigned int cpu, bool online)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue
Block a user