mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-23 07:23:12 +02:00
This pull request contains the following branches:
context_tracking.15.08.24a: Rename context tracking state related
symbols and remove references to "dynticks" in various context
tracking state variables and related helpers; force
context_tracking_enabled_this_cpu() to be inlined to avoid
leaving a noinstr section.
csd.lock.15.08.24a: Enhance CSD-lock diagnostic reports; add an API
to provide an indication of ongoing CSD-lock stall.
nocb.09.09.24a: Update and simplify RCU nocb code to handle
(de-)offloading of callbacks only for offline CPUs; fix RT
throttling hrtimer being armed from offline CPU.
rcutorture.14.08.24a: Remove redundant rcu_torture_ops get_gp_completed
fields; add SRCU ->same_gp_state and ->get_comp_state
functions; add generic test for NUM_ACTIVE_*RCU_POLL* for
testing RCU and SRCU polled grace periods; add CFcommon.arch
for arch-specific Kconfig options; print number of update types
in rcu_torture_write_types();
add rcutree.nohz_full_patience_delay testing to the TREE07
scenario; add a stall_cpu_repeat module parameter to test
repeated CPU stalls; add argument to limit number of CPUs a
guest OS can use in torture.sh;
rcustall.09.09.24a: Abbreviate RCU CPU stall warnings during CSD-lock
stalls; Allow dump_cpu_task() to be called without disabling
preemption; defer printing stall-warning backtrace when holding
rcu_node lock.
srcu.12.08.24a: Make SRCU gp seq wrap-around faster; add KCSAN checks
for concurrent updates to ->srcu_n_exp_nodelay and
->reschedule_count which are used in heuristics governing
auto-expediting of normal SRCU grace periods and
grace-period-state-machine delays; mark idle SRCU-barrier
callbacks to help identify stuck SRCU-barrier callback.
rcu.tasks.14.08.24a: Remove RCU Tasks Rude asynchronous APIs as they
are no longer used; stop testing RCU Tasks Rude asynchronous
APIs; fix access to non-existent percpu regions; check
processor-ID assumptions during chosen CPU calculation for
callback enqueuing; update description of rtp->tasks_gp_seq
grace-period sequence number; add rcu_barrier_cb_is_done()
to identify whether a given rcu_barrier callback is stuck;
mark idle Tasks-RCU-barrier callbacks; add
*torture_stats_print() functions to print detailed
diagnostics for Tasks-RCU variants; capture start time of
rcu_barrier_tasks*() operation to help distinguish a hung
barrier operation from a long series of barrier operations.
rcu_scaling_tests.15.08.24a:
refscale: Add a TINY scenario to support tests of Tiny RCU
and Tiny SRCU; Optimize process_durations() operation;
rcuscale: Dump stacks of stalled rcu_scale_writer() instances;
dump grace-period statistics when rcu_scale_writer() stalls;
mark idle RCU-barrier callbacks to identify stuck RCU-barrier
callbacks; print detailed grace-period and barrier diagnostics
on rcu_scale_writer() hangs for Tasks-RCU variants; warn if
async module parameter is specified for RCU implementations
that do not have async primitives such as RCU Tasks Rude;
make all writer tasks report upon hang; tolerate repeated
GFP_KERNEL failure in rcu_scale_writer(); use special allocator
for rcu_scale_writer(); NULL out top-level pointers to heap
memory to avoid double-free bugs on modprobe failures; maintain
per-task instead of per-CPU callbacks count to avoid any issues
with migration of either tasks or callbacks; constify struct
ref_scale_ops.
fixes.12.08.24a: Use system_unbound_wq for kfree_rcu work to avoid
disturbing isolated CPUs.
misc.11.08.24a: Warn on unexpected rcu_state.srs_done_tail state;
Better define "atomic" for list_replace_rcu() and
hlist_replace_rcu() routines; annotate struct
kvfree_rcu_bulk_data with __counted_by().
-----BEGIN PGP SIGNATURE-----
iHUEABYIAB0WIQSi2tPIQIc2VEtjarIAHS7/6Z0wpQUCZt8+8wAKCRAAHS7/6Z0w
pTqoAPwPN//tlEoJx2PRs6t0q+nD1YNvnZawPaRmdzgdM8zJogD+PiSN+XhqRr80
jzyvMDU4Aa0wjUNP3XsCoaCxo7L/lQk=
=bZ9z
-----END PGP SIGNATURE-----
Merge tag 'rcu.release.v6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/rcu/linux
Pull RCU updates from Neeraj Upadhyay:
"Context tracking:
- rename context tracking state related symbols and remove references
to "dynticks" in various context tracking state variables and
related helpers
- force context_tracking_enabled_this_cpu() to be inlined to avoid
leaving a noinstr section
CSD lock:
- enhance CSD-lock diagnostic reports
- add an API to provide an indication of ongoing CSD-lock stall
nocb:
- update and simplify RCU nocb code to handle (de-)offloading of
callbacks only for offline CPUs
- fix RT throttling hrtimer being armed from offline CPU
rcutorture:
- remove redundant rcu_torture_ops get_gp_completed fields
- add SRCU ->same_gp_state and ->get_comp_state functions
- add generic test for NUM_ACTIVE_*RCU_POLL* for testing RCU and SRCU
polled grace periods
- add CFcommon.arch for arch-specific Kconfig options
- print number of update types in rcu_torture_write_types()
- add rcutree.nohz_full_patience_delay testing to the TREE07 scenario
- add a stall_cpu_repeat module parameter to test repeated CPU stalls
- add argument to limit number of CPUs a guest OS can use in
torture.sh
rcustall:
- abbreviate RCU CPU stall warnings during CSD-lock stalls
- Allow dump_cpu_task() to be called without disabling preemption
- defer printing stall-warning backtrace when holding rcu_node lock
srcu:
- make SRCU gp seq wrap-around faster
- add KCSAN checks for concurrent updates to ->srcu_n_exp_nodelay and
->reschedule_count which are used in heuristics governing
auto-expediting of normal SRCU grace periods and
grace-period-state-machine delays
- mark idle SRCU-barrier callbacks to help identify stuck
SRCU-barrier callback
rcu tasks:
- remove RCU Tasks Rude asynchronous APIs as they are no longer used
- stop testing RCU Tasks Rude asynchronous APIs
- fix access to non-existent percpu regions
- check processor-ID assumptions during chosen CPU calculation for
callback enqueuing
- update description of rtp->tasks_gp_seq grace-period sequence
number
- add rcu_barrier_cb_is_done() to identify whether a given
rcu_barrier callback is stuck
- mark idle Tasks-RCU-barrier callbacks
- add *torture_stats_print() functions to print detailed diagnostics
for Tasks-RCU variants
- capture start time of rcu_barrier_tasks*() operation to help
distinguish a hung barrier operation from a long series of barrier
operations
refscale:
- add a TINY scenario to support tests of Tiny RCU and Tiny
SRCU
- optimize process_durations() operation
rcuscale:
- dump stacks of stalled rcu_scale_writer() instances and
grace-period statistics when rcu_scale_writer() stalls
- mark idle RCU-barrier callbacks to identify stuck RCU-barrier
callbacks
- print detailed grace-period and barrier diagnostics on
rcu_scale_writer() hangs for Tasks-RCU variants
- warn if async module parameter is specified for RCU implementations
that do not have async primitives such as RCU Tasks Rude
- make all writer tasks report upon hang
- tolerate repeated GFP_KERNEL failure in rcu_scale_writer()
- use special allocator for rcu_scale_writer()
- NULL out top-level pointers to heap memory to avoid double-free
bugs on modprobe failures
- maintain per-task instead of per-CPU callbacks count to avoid any
issues with migration of either tasks or callbacks
- constify struct ref_scale_ops
Fixes:
- use system_unbound_wq for kfree_rcu work to avoid disturbing
isolated CPUs
Misc:
- warn on unexpected rcu_state.srs_done_tail state
- better define "atomic" for list_replace_rcu() and
hlist_replace_rcu() routines
- annotate struct kvfree_rcu_bulk_data with __counted_by()"
* tag 'rcu.release.v6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/rcu/linux: (90 commits)
rcu: Defer printing stall-warning backtrace when holding rcu_node lock
rcu/nocb: Remove superfluous memory barrier after bypass enqueue
rcu/nocb: Conditionally wake up rcuo if not already waiting on GP
rcu/nocb: Fix RT throttling hrtimer armed from offline CPU
rcu/nocb: Simplify (de-)offloading state machine
context_tracking: Tag context_tracking_enabled_this_cpu() __always_inline
context_tracking, rcu: Rename rcu_dyntick trace event into rcu_watching
rcu: Update stray documentation references to rcu_dynticks_eqs_{enter, exit}()
rcu: Rename rcu_momentary_dyntick_idle() into rcu_momentary_eqs()
rcu: Rename rcu_implicit_dynticks_qs() into rcu_watching_snap_recheck()
rcu: Rename dyntick_save_progress_counter() into rcu_watching_snap_save()
rcu: Rename struct rcu_data .exp_dynticks_snap into .exp_watching_snap
rcu: Rename struct rcu_data .dynticks_snap into .watching_snap
rcu: Rename rcu_dynticks_zero_in_eqs() into rcu_watching_zero_in_eqs()
rcu: Rename rcu_dynticks_in_eqs_since() into rcu_watching_snap_stopped_since()
rcu: Rename rcu_dynticks_in_eqs() into rcu_watching_snap_in_eqs()
rcu: Rename rcu_dynticks_eqs_online() into rcu_watching_online()
context_tracking, rcu: Rename rcu_dynticks_curr_cpu_in_eqs() into rcu_is_watching_curr_cpu()
context_tracking, rcu: Rename rcu_dynticks_task*() into rcu_task*()
refscale: Constify struct ref_scale_ops
...
173 lines
4.4 KiB
C
173 lines
4.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_CONTEXT_TRACKING_H
|
|
#define _LINUX_CONTEXT_TRACKING_H
|
|
|
|
#include <linux/sched.h>
|
|
#include <linux/vtime.h>
|
|
#include <linux/context_tracking_state.h>
|
|
#include <linux/instrumentation.h>
|
|
|
|
#include <asm/ptrace.h>
|
|
|
|
|
|
#ifdef CONFIG_CONTEXT_TRACKING_USER
|
|
extern void ct_cpu_track_user(int cpu);
|
|
|
|
/* Called with interrupts disabled. */
|
|
extern void __ct_user_enter(enum ctx_state state);
|
|
extern void __ct_user_exit(enum ctx_state state);
|
|
|
|
extern void ct_user_enter(enum ctx_state state);
|
|
extern void ct_user_exit(enum ctx_state state);
|
|
|
|
extern void user_enter_callable(void);
|
|
extern void user_exit_callable(void);
|
|
|
|
static inline void user_enter(void)
|
|
{
|
|
if (context_tracking_enabled())
|
|
ct_user_enter(CT_STATE_USER);
|
|
|
|
}
|
|
static inline void user_exit(void)
|
|
{
|
|
if (context_tracking_enabled())
|
|
ct_user_exit(CT_STATE_USER);
|
|
}
|
|
|
|
/* Called with interrupts disabled. */
|
|
static __always_inline void user_enter_irqoff(void)
|
|
{
|
|
if (context_tracking_enabled())
|
|
__ct_user_enter(CT_STATE_USER);
|
|
|
|
}
|
|
static __always_inline void user_exit_irqoff(void)
|
|
{
|
|
if (context_tracking_enabled())
|
|
__ct_user_exit(CT_STATE_USER);
|
|
}
|
|
|
|
static inline enum ctx_state exception_enter(void)
|
|
{
|
|
enum ctx_state prev_ctx;
|
|
|
|
if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) ||
|
|
!context_tracking_enabled())
|
|
return 0;
|
|
|
|
prev_ctx = __ct_state();
|
|
if (prev_ctx != CT_STATE_KERNEL)
|
|
ct_user_exit(prev_ctx);
|
|
|
|
return prev_ctx;
|
|
}
|
|
|
|
static inline void exception_exit(enum ctx_state prev_ctx)
|
|
{
|
|
if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) &&
|
|
context_tracking_enabled()) {
|
|
if (prev_ctx != CT_STATE_KERNEL)
|
|
ct_user_enter(prev_ctx);
|
|
}
|
|
}
|
|
|
|
static __always_inline bool context_tracking_guest_enter(void)
|
|
{
|
|
if (context_tracking_enabled())
|
|
__ct_user_enter(CT_STATE_GUEST);
|
|
|
|
return context_tracking_enabled_this_cpu();
|
|
}
|
|
|
|
static __always_inline bool context_tracking_guest_exit(void)
|
|
{
|
|
if (context_tracking_enabled())
|
|
__ct_user_exit(CT_STATE_GUEST);
|
|
|
|
return context_tracking_enabled_this_cpu();
|
|
}
|
|
|
|
#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
|
|
|
|
#else
|
|
static inline void user_enter(void) { }
|
|
static inline void user_exit(void) { }
|
|
static inline void user_enter_irqoff(void) { }
|
|
static inline void user_exit_irqoff(void) { }
|
|
static inline int exception_enter(void) { return 0; }
|
|
static inline void exception_exit(enum ctx_state prev_ctx) { }
|
|
static inline int ct_state(void) { return -1; }
|
|
static inline int __ct_state(void) { return -1; }
|
|
static __always_inline bool context_tracking_guest_enter(void) { return false; }
|
|
static __always_inline bool context_tracking_guest_exit(void) { return false; }
|
|
#define CT_WARN_ON(cond) do { } while (0)
|
|
#endif /* !CONFIG_CONTEXT_TRACKING_USER */
|
|
|
|
#ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE
|
|
extern void context_tracking_init(void);
|
|
#else
|
|
static inline void context_tracking_init(void) { }
|
|
#endif /* CONFIG_CONTEXT_TRACKING_USER_FORCE */
|
|
|
|
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
|
|
extern void ct_idle_enter(void);
|
|
extern void ct_idle_exit(void);
|
|
|
|
/*
|
|
* Is RCU watching the current CPU (IOW, it is not in an extended quiescent state)?
|
|
*
|
|
* Note that this returns the actual boolean data (watching / not watching),
|
|
* whereas ct_rcu_watching() returns the RCU_WATCHING subvariable of
|
|
* context_tracking.state.
|
|
*
|
|
* No ordering, as we are sampling CPU-local information.
|
|
*/
|
|
static __always_inline bool rcu_is_watching_curr_cpu(void)
|
|
{
|
|
return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING;
|
|
}
|
|
|
|
/*
|
|
* Increment the current CPU's context_tracking structure's ->state field
|
|
* with ordering. Return the new value.
|
|
*/
|
|
static __always_inline unsigned long ct_state_inc(int incby)
|
|
{
|
|
return raw_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
|
|
}
|
|
|
|
static __always_inline bool warn_rcu_enter(void)
|
|
{
|
|
bool ret = false;
|
|
|
|
/*
|
|
* Horrible hack to shut up recursive RCU isn't watching fail since
|
|
* lots of the actual reporting also relies on RCU.
|
|
*/
|
|
preempt_disable_notrace();
|
|
if (!rcu_is_watching_curr_cpu()) {
|
|
ret = true;
|
|
ct_state_inc(CT_RCU_WATCHING);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static __always_inline void warn_rcu_exit(bool rcu)
|
|
{
|
|
if (rcu)
|
|
ct_state_inc(CT_RCU_WATCHING);
|
|
preempt_enable_notrace();
|
|
}
|
|
|
|
#else
|
|
static inline void ct_idle_enter(void) { }
|
|
static inline void ct_idle_exit(void) { }
|
|
|
|
static __always_inline bool warn_rcu_enter(void) { return false; }
|
|
static __always_inline void warn_rcu_exit(bool rcu) { }
|
|
#endif /* !CONFIG_CONTEXT_TRACKING_IDLE */
|
|
|
|
#endif
|