bitmap-for-6.16

Bitmap updates for 6.16-rc1 include:
  - dead code cleanups for cpumasks and nodemasks (me);
  - fixed-width flavors of GENMASK() and BIT() (Vincent, Lucas and me);
  - FIELD_MODIFY() helper (Luo);
  - for_each_node_with_cpus() optimization (me);
  - bitmap-str fixes (Andy).
 -----BEGIN PGP SIGNATURE-----
 
 iQGzBAABCgAdFiEEi8GdvG6xMhdgpu/4sUSA/TofvsgFAmg43+EACgkQsUSA/Tof
 vsjn9wwAgQmfIgANEcFo2Gz3OjNIGHRyolG+dXYpf3OytlnbMlTnMev+LASl9VXL
 LML+2gZsQtMff1Ac7VJzufg5OHg2tTKyozWFPOG5mc+ysBL24d0Pd/Ki8mLlbbTl
 EQMu5uExXP4qja1vHLF+kEEYMCzWVTRwOE1H/nauu/OUonuOFLlIiXhgJHrmO22Z
 qHzBAto2bqE/Jy6OneYWiXLtIOcJhfoS2a+Xczf4cBZH6nqeepg7Vnudmd10IUJc
 BlXcD4Qt/uG6MTS96knNslcgV1Q5Nfkch9JPLu/bzO34nR0VBB1VqMos37fAHQln
 RhSxd6LdqGxA5ZafivN5YIwHrCJIr7yi6m+92kazX9baqf1Lh7opAK4NgV2o7Rm9
 v9SlX+Rcb+HyyoLI7Fh+hVyNdrymbAo4KDzZDk+yHuAYGKHdxgeK5D2GTtpm6ZiJ
 u515P6zFi7h2jPMTCOWwCBwpeHIDL1hyf9St5yvtDEEeWGRy3y7MkRmxnI4QVlTN
 b0in+YW5
 =vATl
 -----END PGP SIGNATURE-----

Merge tag 'bitmap-for-6.16-rc1' of https://github.com/norov/linux

Pull bitmap updates from Yury Norov:

 - dead code cleanups for cpumasks and nodemasks (me)

 - fixed-width flavors of GENMASK() and BIT() (Vincent, Lucas and me)

 - FIELD_MODIFY() helper (Luo)

 - for_each_node_with_cpus() optimization (me)

 - bitmap-str fixes (Andy)

* tag 'bitmap-for-6.16-rc1' of https://github.com/norov/linux:
  topology: make for_each_node_with_cpus() O(N)
  bitfield: Add FIELD_MODIFY() helper
  bitmap-str: Add missing header(s)
  bitmap-str: Get rid of 'extern' for function prototypes
  build_bug.h: more user friendly error messages in BUILD_BUG_ON_ZERO()
  test_bits: add tests for BIT_U*()
  test_bits: add tests for GENMASK_U*()
  drm/i915: Convert REG_GENMASK*() to fixed-width GENMASK_U*()
  bits: introduce fixed-type BIT_U*()
  bits: introduce fixed-type GENMASK_U*()
  bits: add comments and newlines to #if, #else and #endif directives
  cpumask: drop cpumask_assign_cpu()
  riscv: switch set_icache_stale_mask() to using non-atomic assign_cpu()
  cpumask: add non-atomic __assign_cpu()
  nodemask: drop nodes_shift
This commit is contained in:
Linus Torvalds 2025-06-03 07:39:23 -07:00
commit 8b2198f037
12 changed files with 134 additions and 153 deletions

View File

@ -172,7 +172,7 @@ static void set_icache_stale_mask(void)
stale_cpu = cpumask_test_cpu(cpu, mask);
cpumask_setall(mask);
cpumask_assign_cpu(cpu, mask, stale_cpu);
__assign_cpu(cpu, mask, stale_cpu);
put_cpu();
}
#endif

View File

@ -9,76 +9,19 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
/**
* REG_BIT() - Prepare a u32 bit value
* @__n: 0-based bit number
*
* Local wrapper for BIT() to force u32, with compile time checks.
*
* @return: Value with bit @__n set.
/*
* Wrappers over the generic fixed width BIT_U*() and GENMASK_U*()
* implementations, for compatibility reasons with previous implementation.
*/
#define REG_BIT(__n) \
((u32)(BIT(__n) + \
BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
((__n) < 0 || (__n) > 31))))
#define REG_GENMASK(high, low) GENMASK_U32(high, low)
#define REG_GENMASK64(high, low) GENMASK_U64(high, low)
#define REG_GENMASK16(high, low) GENMASK_U16(high, low)
#define REG_GENMASK8(high, low) GENMASK_U8(high, low)
/**
* REG_BIT8() - Prepare a u8 bit value
* @__n: 0-based bit number
*
* Local wrapper for BIT() to force u8, with compile time checks.
*
* @return: Value with bit @__n set.
*/
#define REG_BIT8(__n) \
((u8)(BIT(__n) + \
BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
((__n) < 0 || (__n) > 7))))
/**
* REG_GENMASK() - Prepare a continuous u32 bitmask
* @__high: 0-based high bit
* @__low: 0-based low bit
*
* Local wrapper for GENMASK() to force u32, with compile time checks.
*
* @return: Continuous bitmask from @__high to @__low, inclusive.
*/
#define REG_GENMASK(__high, __low) \
((u32)(GENMASK(__high, __low) + \
BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
__is_constexpr(__low) && \
((__low) < 0 || (__high) > 31 || (__low) > (__high)))))
/**
* REG_GENMASK64() - Prepare a continuous u64 bitmask
* @__high: 0-based high bit
* @__low: 0-based low bit
*
* Local wrapper for GENMASK_ULL() to force u64, with compile time checks.
*
* @return: Continuous bitmask from @__high to @__low, inclusive.
*/
#define REG_GENMASK64(__high, __low) \
((u64)(GENMASK_ULL(__high, __low) + \
BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
__is_constexpr(__low) && \
((__low) < 0 || (__high) > 63 || (__low) > (__high)))))
/**
* REG_GENMASK8() - Prepare a continuous u8 bitmask
* @__high: 0-based high bit
* @__low: 0-based low bit
*
* Local wrapper for GENMASK() to force u8, with compile time checks.
*
* @return: Continuous bitmask from @__high to @__low, inclusive.
*/
#define REG_GENMASK8(__high, __low) \
((u8)(GENMASK(__high, __low) + \
BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
__is_constexpr(__low) && \
((__low) < 0 || (__high) > 7 || (__low) > (__high)))))
#define REG_BIT(n) BIT_U32(n)
#define REG_BIT64(n) BIT_U64(n)
#define REG_BIT16(n) BIT_U16(n)
#define REG_BIT8(n) BIT_U8(n)
/*
* Local integer constant expression version of is_power_of_2().
@ -143,35 +86,6 @@
*/
#define REG_FIELD_GET64(__mask, __val) ((u64)FIELD_GET(__mask, __val))
/**
* REG_BIT16() - Prepare a u16 bit value
* @__n: 0-based bit number
*
* Local wrapper for BIT() to force u16, with compile time
* checks.
*
* @return: Value with bit @__n set.
*/
#define REG_BIT16(__n) \
((u16)(BIT(__n) + \
BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
((__n) < 0 || (__n) > 15))))
/**
* REG_GENMASK16() - Prepare a continuous u8 bitmask
* @__high: 0-based high bit
* @__low: 0-based low bit
*
* Local wrapper for GENMASK() to force u16, with compile time
* checks.
*
* @return: Continuous bitmask from @__high to @__low, inclusive.
*/
#define REG_GENMASK16(__high, __low) \
((u16)(GENMASK(__high, __low) + \
BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
__is_constexpr(__low) && \
((__low) < 0 || (__high) > 15 || (__low) > (__high)))))
/**
* REG_FIELD_PREP16() - Prepare a u16 bitfield value

View File

@ -8,6 +8,7 @@
#define _LINUX_BITFIELD_H
#include <linux/build_bug.h>
#include <linux/typecheck.h>
#include <asm/byteorder.h>
/*
@ -38,8 +39,7 @@
* FIELD_PREP(REG_FIELD_D, 0x40);
*
* Modify:
* reg &= ~REG_FIELD_C;
* reg |= FIELD_PREP(REG_FIELD_C, c);
* FIELD_MODIFY(REG_FIELD_C, &reg, c);
*/
#define __bf_shf(x) (__builtin_ffsll(x) - 1)
@ -156,6 +156,23 @@
(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
})
/**
* FIELD_MODIFY() - modify a bitfield element
* @_mask: shifted mask defining the field's length and position
* @_reg_p: pointer to the memory that should be updated
* @_val: value to store in the bitfield
*
* FIELD_MODIFY() modifies the set of bits in @_reg_p specified by @_mask,
* by replacing them with the bitfield value passed in as @_val.
*/
#define FIELD_MODIFY(_mask, _reg_p, _val) \
({ \
typecheck_pointer(_reg_p); \
__BF_FIELD_CHECK(_mask, *(_reg_p), _val, "FIELD_MODIFY: "); \
*(_reg_p) &= ~(_mask); \
*(_reg_p) |= (((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask)); \
})
extern void __compiletime_error("value doesn't fit into mask")
__field_overflow(void);
extern void __compiletime_error("bad bitfield mask")

View File

@ -2,12 +2,14 @@
#ifndef __LINUX_BITMAP_STR_H
#define __LINUX_BITMAP_STR_H
#include <linux/types.h>
int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits);
int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits);
extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
int nmaskbits, loff_t off, size_t count);
extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
int nmaskbits, loff_t off, size_t count);
int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp, int nmaskbits,
loff_t off, size_t count);
int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp, int nmaskbits,
loff_t off, size_t count);
int bitmap_parse(const char *buf, unsigned int buflen, unsigned long *dst, int nbits);
int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits);
int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,

View File

@ -8,7 +8,6 @@
#include <uapi/linux/kernel.h>
#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))

View File

@ -12,6 +12,7 @@
#define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG))
#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
#define BITS_PER_BYTE 8
#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
/*
* Create a contiguous bitmask starting at bit position @l and ending at
@ -19,16 +20,68 @@
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
#if !defined(__ASSEMBLY__)
/*
* Missing asm support
*
* GENMASK_U*() and BIT_U*() depend on BITS_PER_TYPE() which relies on sizeof(),
* something not available in asm. Nevertheless, fixed width integers is a C
* concept. Assembly code can rely on the long and long long versions instead.
*/
#include <linux/build_bug.h>
#include <linux/compiler.h>
#include <linux/overflow.h>
#define GENMASK_INPUT_CHECK(h, l) BUILD_BUG_ON_ZERO(const_true((l) > (h)))
#else
/*
* Generate a mask for the specified type @t. Additional checks are made to
* guarantee the value returned fits in that type, relying on
* -Wshift-count-overflow compiler check to detect incompatible arguments.
* For example, all these create build errors or warnings:
*
* - GENMASK(15, 20): wrong argument order
* - GENMASK(72, 15): doesn't fit unsigned long
* - GENMASK_U32(33, 15): doesn't fit in a u32
*/
#define GENMASK_TYPE(t, h, l) \
((t)(GENMASK_INPUT_CHECK(h, l) + \
(type_max(t) << (l) & \
type_max(t) >> (BITS_PER_TYPE(t) - 1 - (h)))))
#define GENMASK_U8(h, l) GENMASK_TYPE(u8, h, l)
#define GENMASK_U16(h, l) GENMASK_TYPE(u16, h, l)
#define GENMASK_U32(h, l) GENMASK_TYPE(u32, h, l)
#define GENMASK_U64(h, l) GENMASK_TYPE(u64, h, l)
/*
* Fixed-type variants of BIT(), with additional checks like GENMASK_TYPE(). The
* following examples generate compiler warnings due to -Wshift-count-overflow:
*
* - BIT_U8(8)
* - BIT_U32(-1)
* - BIT_U32(40)
*/
#define BIT_INPUT_CHECK(type, nr) \
BUILD_BUG_ON_ZERO(const_true((nr) >= BITS_PER_TYPE(type)))
#define BIT_TYPE(type, nr) ((type)(BIT_INPUT_CHECK(type, nr) + BIT_ULL(nr)))
#define BIT_U8(nr) BIT_TYPE(u8, nr)
#define BIT_U16(nr) BIT_TYPE(u16, nr)
#define BIT_U32(nr) BIT_TYPE(u32, nr)
#define BIT_U64(nr) BIT_TYPE(u64, nr)
#else /* defined(__ASSEMBLY__) */
/*
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,
* disable the input check if that is the case.
*/
#define GENMASK_INPUT_CHECK(h, l) 0
#endif
#endif /* !defined(__ASSEMBLY__) */
#define GENMASK(h, l) \
(GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))

View File

@ -4,17 +4,17 @@
#include <linux/compiler.h>
#ifdef __CHECKER__
#define BUILD_BUG_ON_ZERO(e) (0)
#else /* __CHECKER__ */
/*
* Force a compilation error if condition is true, but also produce a
* result (of value 0 and type int), so the expression can be used
* e.g. in a structure initializer (or where-ever else comma expressions
* aren't permitted).
*
* Take an error message as an optional second argument. If omitted,
* default to the stringification of the tested expression.
*/
#define BUILD_BUG_ON_ZERO(e) ((int)(sizeof(struct { int:(-!!(e)); })))
#endif /* __CHECKER__ */
#define BUILD_BUG_ON_ZERO(e, ...) \
__BUILD_BUG_ON_ZERO_MSG(e, ##__VA_ARGS__, #e " is true")
/* Force a compilation error if a constant expression is not a power of 2 */
#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \

View File

@ -192,9 +192,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
})
#ifdef __CHECKER__
#define __BUILD_BUG_ON_ZERO_MSG(e, msg) (0)
#define __BUILD_BUG_ON_ZERO_MSG(e, msg, ...) (0)
#else /* __CHECKER__ */
#define __BUILD_BUG_ON_ZERO_MSG(e, msg) ((int)sizeof(struct {_Static_assert(!(e), msg);}))
#define __BUILD_BUG_ON_ZERO_MSG(e, msg, ...) ((int)sizeof(struct {_Static_assert(!(e), msg);}))
#endif /* __CHECKER__ */
/* &a[0] degrades to a pointer: a different type from an array */

View File

@ -625,22 +625,6 @@ static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
__clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
/**
* cpumask_assign_cpu - assign a cpu in a cpumask
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
* @bool: the value to assign
*/
static __always_inline void cpumask_assign_cpu(int cpu, struct cpumask *dstp, bool value)
{
assign_bit(cpumask_check(cpu), cpumask_bits(dstp), value);
}
static __always_inline void __cpumask_assign_cpu(int cpu, struct cpumask *dstp, bool value)
{
__assign_bit(cpumask_check(cpu), cpumask_bits(dstp), value);
}
/**
* cpumask_test_cpu - test for a cpu in a cpumask
* @cpu: cpu number (< nr_cpu_ids)
@ -1141,6 +1125,9 @@ void init_cpu_possible(const struct cpumask *src);
#define assign_cpu(cpu, mask, val) \
assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
#define __assign_cpu(cpu, mask, val) \
__assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
#define set_cpu_possible(cpu, possible) assign_cpu((cpu), &__cpu_possible_mask, (possible))
#define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_enabled_mask, (enabled))
#define set_cpu_present(cpu, present) assign_cpu((cpu), &__cpu_present_mask, (present))

View File

@ -39,9 +39,6 @@
* int nodes_full(mask) Is mask full (all bits sets)?
* int nodes_weight(mask) Hamming weight - number of set bits
*
* void nodes_shift_right(dst, src, n) Shift right
* void nodes_shift_left(dst, src, n) Shift left
*
* unsigned int first_node(mask) Number lowest set bit, or MAX_NUMNODES
* unsigend int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
* unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first,
@ -247,22 +244,6 @@ static __always_inline int __nodes_weight(const nodemask_t *srcp, unsigned int n
return bitmap_weight(srcp->bits, nbits);
}
#define nodes_shift_right(dst, src, n) \
__nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES)
static __always_inline void __nodes_shift_right(nodemask_t *dstp,
const nodemask_t *srcp, int n, int nbits)
{
bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
}
#define nodes_shift_left(dst, src, n) \
__nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES)
static __always_inline void __nodes_shift_left(nodemask_t *dstp,
const nodemask_t *srcp, int n, int nbits)
{
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
}
/* FIXME: better would be to fix all architectures to never return
> MAX_NUMNODES, then the silly min_ts could be dropped. */
@ -541,6 +522,7 @@ static __always_inline int node_random(const nodemask_t *maskp)
#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
#define for_each_node_with_cpus(node) for_each_node_state(node, N_CPU)
/*
* For nodemask scratch area.

View File

@ -29,6 +29,7 @@
#include <linux/arch_topology.h>
#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/bitops.h>
#include <linux/mmzone.h>
#include <linux/smp.h>
@ -39,10 +40,6 @@
#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
#endif
#define for_each_node_with_cpus(node) \
for_each_online_node(node) \
if (nr_cpus_node(node))
int arch_update_cpu_topology(void);
/* Conform to ACPI 2.0 SLIT distance definitions */

View File

@ -5,6 +5,26 @@
#include <kunit/test.h>
#include <linux/bits.h>
#include <linux/types.h>
#define assert_type(t, x) _Generic(x, t: x, default: 0)
static_assert(assert_type(u8, BIT_U8(0)) == 1u);
static_assert(assert_type(u16, BIT_U16(0)) == 1u);
static_assert(assert_type(u32, BIT_U32(0)) == 1u);
static_assert(assert_type(u64, BIT_U64(0)) == 1ull);
static_assert(assert_type(u8, BIT_U8(7)) == 0x80u);
static_assert(assert_type(u16, BIT_U16(15)) == 0x8000u);
static_assert(assert_type(u32, BIT_U32(31)) == 0x80000000u);
static_assert(assert_type(u64, BIT_U64(63)) == 0x8000000000000000ull);
static_assert(assert_type(unsigned long, GENMASK(31, 0)) == U32_MAX);
static_assert(assert_type(unsigned long long, GENMASK_ULL(63, 0)) == U64_MAX);
static_assert(assert_type(u8, GENMASK_U8(7, 0)) == U8_MAX);
static_assert(assert_type(u16, GENMASK_U16(15, 0)) == U16_MAX);
static_assert(assert_type(u32, GENMASK_U32(31, 0)) == U32_MAX);
static_assert(assert_type(u64, GENMASK_U64(63, 0)) == U64_MAX);
static void genmask_test(struct kunit *test)
@ -14,11 +34,21 @@ static void genmask_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, 6ul, GENMASK(2, 1));
KUNIT_EXPECT_EQ(test, 0xFFFFFFFFul, GENMASK(31, 0));
KUNIT_EXPECT_EQ(test, 1u, GENMASK_U8(0, 0));
KUNIT_EXPECT_EQ(test, 3u, GENMASK_U16(1, 0));
KUNIT_EXPECT_EQ(test, 0x10000, GENMASK_U32(16, 16));
#ifdef TEST_GENMASK_FAILURES
/* these should fail compilation */
GENMASK(0, 1);
GENMASK(0, 10);
GENMASK(9, 10);
GENMASK_U32(0, 31);
GENMASK_U64(64, 0);
GENMASK_U32(32, 0);
GENMASK_U16(16, 0);
GENMASK_U8(8, 0);
#endif