Merge branches 'rcu/torture', 'rcu/fixes', 'rcu/docs', 'rcu/refscale', 'rcu/tasks' and 'rcu/stall' into rcu/next
rcu/torture: RCU torture, locktorture and generic torture infrastructure rcu/fixes: Generic and misc fixes rcu/docs: RCU documentation updates rcu/refscale: RCU reference scalability test updates rcu/tasks: RCU tasks updates rcu/stall: Stall detection updates
|
@ -181,7 +181,7 @@ operations is carried out at several levels:
|
||||||
of this wait (or series of waits, as the case may be) is to permit a
|
of this wait (or series of waits, as the case may be) is to permit a
|
||||||
concurrent CPU-hotplug operation to complete.
|
concurrent CPU-hotplug operation to complete.
|
||||||
#. In the case of RCU-sched, one of the last acts of an outgoing CPU is
|
#. In the case of RCU-sched, one of the last acts of an outgoing CPU is
|
||||||
to invoke ``rcu_report_dead()``, which reports a quiescent state for
|
to invoke ``rcutree_report_cpu_dead()``, which reports a quiescent state for
|
||||||
that CPU. However, this is likely paranoia-induced redundancy.
|
that CPU. However, this is likely paranoia-induced redundancy.
|
||||||
|
|
||||||
+-----------------------------------------------------------------------+
|
+-----------------------------------------------------------------------+
|
||||||
|
|
|
@ -564,15 +564,6 @@
|
||||||
font-size="192"
|
font-size="192"
|
||||||
id="text202-7-9-6"
|
id="text202-7-9-6"
|
||||||
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcutree_migrate_callbacks()</text>
|
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcutree_migrate_callbacks()</text>
|
||||||
<text
|
|
||||||
xml:space="preserve"
|
|
||||||
x="8335.4873"
|
|
||||||
y="5357.1006"
|
|
||||||
font-style="normal"
|
|
||||||
font-weight="bold"
|
|
||||||
font-size="192"
|
|
||||||
id="text202-7-9-6-0"
|
|
||||||
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_migrate_callbacks()</text>
|
|
||||||
<text
|
<text
|
||||||
xml:space="preserve"
|
xml:space="preserve"
|
||||||
x="8768.4678"
|
x="8768.4678"
|
||||||
|
|
Before Width: | Height: | Size: 23 KiB After Width: | Height: | Size: 23 KiB |
|
@ -1135,7 +1135,7 @@
|
||||||
font-weight="bold"
|
font-weight="bold"
|
||||||
font-size="192"
|
font-size="192"
|
||||||
id="text202-7-5-3-27-6-5"
|
id="text202-7-5-3-27-6-5"
|
||||||
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_report_dead()</text>
|
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcutree_report_cpu_dead()</text>
|
||||||
<text
|
<text
|
||||||
xml:space="preserve"
|
xml:space="preserve"
|
||||||
x="3745.7725"
|
x="3745.7725"
|
||||||
|
@ -1256,7 +1256,7 @@
|
||||||
font-style="normal"
|
font-style="normal"
|
||||||
y="3679.27"
|
y="3679.27"
|
||||||
x="-3804.9949"
|
x="-3804.9949"
|
||||||
xml:space="preserve">rcu_cpu_starting()</text>
|
xml:space="preserve">rcutree_report_cpu_starting()</text>
|
||||||
<g
|
<g
|
||||||
style="fill:none;stroke-width:0.025in"
|
style="fill:none;stroke-width:0.025in"
|
||||||
id="g3107-7-5-0"
|
id="g3107-7-5-0"
|
||||||
|
|
Before Width: | Height: | Size: 50 KiB After Width: | Height: | Size: 50 KiB |
|
@ -1446,15 +1446,6 @@
|
||||||
font-size="192"
|
font-size="192"
|
||||||
id="text202-7-9-6"
|
id="text202-7-9-6"
|
||||||
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcutree_migrate_callbacks()</text>
|
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcutree_migrate_callbacks()</text>
|
||||||
<text
|
|
||||||
xml:space="preserve"
|
|
||||||
x="8335.4873"
|
|
||||||
y="5357.1006"
|
|
||||||
font-style="normal"
|
|
||||||
font-weight="bold"
|
|
||||||
font-size="192"
|
|
||||||
id="text202-7-9-6-0"
|
|
||||||
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_migrate_callbacks()</text>
|
|
||||||
<text
|
<text
|
||||||
xml:space="preserve"
|
xml:space="preserve"
|
||||||
x="8768.4678"
|
x="8768.4678"
|
||||||
|
@ -3274,7 +3265,7 @@
|
||||||
font-weight="bold"
|
font-weight="bold"
|
||||||
font-size="192"
|
font-size="192"
|
||||||
id="text202-7-5-3-27-6-5"
|
id="text202-7-5-3-27-6-5"
|
||||||
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_report_dead()</text>
|
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcutree_report_cpu_dead()</text>
|
||||||
<text
|
<text
|
||||||
xml:space="preserve"
|
xml:space="preserve"
|
||||||
x="3745.7725"
|
x="3745.7725"
|
||||||
|
@ -3395,7 +3386,7 @@
|
||||||
font-style="normal"
|
font-style="normal"
|
||||||
y="3679.27"
|
y="3679.27"
|
||||||
x="-3804.9949"
|
x="-3804.9949"
|
||||||
xml:space="preserve">rcu_cpu_starting()</text>
|
xml:space="preserve">rcutree_report_cpu_starting()</text>
|
||||||
<g
|
<g
|
||||||
style="fill:none;stroke-width:0.025in"
|
style="fill:none;stroke-width:0.025in"
|
||||||
id="g3107-7-5-0"
|
id="g3107-7-5-0"
|
||||||
|
|
Before Width: | Height: | Size: 209 KiB After Width: | Height: | Size: 208 KiB |
|
@ -607,7 +607,7 @@
|
||||||
font-weight="bold"
|
font-weight="bold"
|
||||||
font-size="192"
|
font-size="192"
|
||||||
id="text202-7-5-3-27-6"
|
id="text202-7-5-3-27-6"
|
||||||
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_report_dead()</text>
|
style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcutree_report_cpu_dead()</text>
|
||||||
<text
|
<text
|
||||||
xml:space="preserve"
|
xml:space="preserve"
|
||||||
x="3745.7725"
|
x="3745.7725"
|
||||||
|
@ -728,7 +728,7 @@
|
||||||
font-style="normal"
|
font-style="normal"
|
||||||
y="3679.27"
|
y="3679.27"
|
||||||
x="-3804.9949"
|
x="-3804.9949"
|
||||||
xml:space="preserve">rcu_cpu_starting()</text>
|
xml:space="preserve">rcutree_report_cpu_starting()</text>
|
||||||
<g
|
<g
|
||||||
style="fill:none;stroke-width:0.025in"
|
style="fill:none;stroke-width:0.025in"
|
||||||
id="g3107-7-5-0"
|
id="g3107-7-5-0"
|
||||||
|
|
Before Width: | Height: | Size: 28 KiB After Width: | Height: | Size: 28 KiB |
|
@ -1955,12 +1955,12 @@ if offline CPUs block an RCU grace period for too long.
|
||||||
|
|
||||||
An offline CPU's quiescent state will be reported either:
|
An offline CPU's quiescent state will be reported either:
|
||||||
|
|
||||||
1. As the CPU goes offline using RCU's hotplug notifier (rcu_report_dead()).
|
1. As the CPU goes offline using RCU's hotplug notifier (rcutree_report_cpu_dead()).
|
||||||
2. When grace period initialization (rcu_gp_init()) detects a
|
2. When grace period initialization (rcu_gp_init()) detects a
|
||||||
race either with CPU offlining or with a task unblocking on a leaf
|
race either with CPU offlining or with a task unblocking on a leaf
|
||||||
``rcu_node`` structure whose CPUs are all offline.
|
``rcu_node`` structure whose CPUs are all offline.
|
||||||
|
|
||||||
The CPU-online path (rcu_cpu_starting()) should never need to report
|
The CPU-online path (rcutree_report_cpu_starting()) should never need to report
|
||||||
a quiescent state for an offline CPU. However, as a debugging measure,
|
a quiescent state for an offline CPU. However, as a debugging measure,
|
||||||
it does emit a warning if a quiescent state was not already reported
|
it does emit a warning if a quiescent state was not already reported
|
||||||
for that CPU.
|
for that CPU.
|
||||||
|
|
|
@ -8,6 +8,15 @@ One of the most common uses of RCU is protecting read-mostly linked lists
|
||||||
that all of the required memory ordering is provided by the list macros.
|
that all of the required memory ordering is provided by the list macros.
|
||||||
This document describes several list-based RCU use cases.
|
This document describes several list-based RCU use cases.
|
||||||
|
|
||||||
|
When iterating a list while holding the rcu_read_lock(), writers may
|
||||||
|
modify the list. The reader is guaranteed to see all of the elements
|
||||||
|
which were added to the list before they acquired the rcu_read_lock()
|
||||||
|
and are still on the list when they drop the rcu_read_unlock().
|
||||||
|
Elements which are added to, or removed from the list may or may not
|
||||||
|
be seen. If the writer calls list_replace_rcu(), the reader may see
|
||||||
|
either the old element or the new element; they will not see both,
|
||||||
|
nor will they see neither.
|
||||||
|
|
||||||
|
|
||||||
Example 1: Read-mostly list: Deferred Destruction
|
Example 1: Read-mostly list: Deferred Destruction
|
||||||
-------------------------------------------------
|
-------------------------------------------------
|
||||||
|
|
|
@ -59,8 +59,8 @@ experiment with should focus on Section 2. People who prefer to start
|
||||||
with example uses should focus on Sections 3 and 4. People who need to
|
with example uses should focus on Sections 3 and 4. People who need to
|
||||||
understand the RCU implementation should focus on Section 5, then dive
|
understand the RCU implementation should focus on Section 5, then dive
|
||||||
into the kernel source code. People who reason best by analogy should
|
into the kernel source code. People who reason best by analogy should
|
||||||
focus on Section 6. Section 7 serves as an index to the docbook API
|
focus on Section 6 and 7. Section 8 serves as an index to the docbook
|
||||||
documentation, and Section 8 is the traditional answer key.
|
API documentation, and Section 9 is the traditional answer key.
|
||||||
|
|
||||||
So, start with the section that makes the most sense to you and your
|
So, start with the section that makes the most sense to you and your
|
||||||
preferred method of learning. If you need to know everything about
|
preferred method of learning. If you need to know everything about
|
||||||
|
|
|
@ -4820,6 +4820,13 @@
|
||||||
Set maximum number of finished RCU callbacks to
|
Set maximum number of finished RCU callbacks to
|
||||||
process in one batch.
|
process in one batch.
|
||||||
|
|
||||||
|
rcutree.do_rcu_barrier= [KNL]
|
||||||
|
Request a call to rcu_barrier(). This is
|
||||||
|
throttled so that userspace tests can safely
|
||||||
|
hammer on the sysfs variable if they so choose.
|
||||||
|
If triggered before the RCU grace-period machinery
|
||||||
|
is fully active, this will error out with EAGAIN.
|
||||||
|
|
||||||
rcutree.dump_tree= [KNL]
|
rcutree.dump_tree= [KNL]
|
||||||
Dump the structure of the rcu_node combining tree
|
Dump the structure of the rcu_node combining tree
|
||||||
out at early boot. This is used for diagnostic
|
out at early boot. This is used for diagnostic
|
||||||
|
@ -5473,6 +5480,12 @@
|
||||||
test until boot completes in order to avoid
|
test until boot completes in order to avoid
|
||||||
interference.
|
interference.
|
||||||
|
|
||||||
|
refscale.lookup_instances= [KNL]
|
||||||
|
Number of data elements to use for the forms of
|
||||||
|
SLAB_TYPESAFE_BY_RCU testing. A negative number
|
||||||
|
is negated and multiplied by nr_cpu_ids, while
|
||||||
|
zero specifies nr_cpu_ids.
|
||||||
|
|
||||||
refscale.loops= [KNL]
|
refscale.loops= [KNL]
|
||||||
Set the number of loops over the synchronization
|
Set the number of loops over the synchronization
|
||||||
primitive under test. Increasing this number
|
primitive under test. Increasing this number
|
||||||
|
|
|
@ -215,7 +215,7 @@ asmlinkage notrace void secondary_start_kernel(void)
|
||||||
if (system_uses_irq_prio_masking())
|
if (system_uses_irq_prio_masking())
|
||||||
init_gic_priority_masking();
|
init_gic_priority_masking();
|
||||||
|
|
||||||
rcu_cpu_starting(cpu);
|
rcutree_report_cpu_starting(cpu);
|
||||||
trace_hardirqs_off();
|
trace_hardirqs_off();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -401,7 +401,7 @@ void __noreturn cpu_die_early(void)
|
||||||
|
|
||||||
/* Mark this CPU absent */
|
/* Mark this CPU absent */
|
||||||
set_cpu_present(cpu, 0);
|
set_cpu_present(cpu, 0);
|
||||||
rcu_report_dead(cpu);
|
rcutree_report_cpu_dead();
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
|
if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
|
||||||
update_cpu_boot_status(CPU_KILL_ME);
|
update_cpu_boot_status(CPU_KILL_ME);
|
||||||
|
|
|
@ -1629,7 +1629,7 @@ void start_secondary(void *unused)
|
||||||
|
|
||||||
smp_store_cpu_info(cpu);
|
smp_store_cpu_info(cpu);
|
||||||
set_dec(tb_ticks_per_jiffy);
|
set_dec(tb_ticks_per_jiffy);
|
||||||
rcu_cpu_starting(cpu);
|
rcutree_report_cpu_starting(cpu);
|
||||||
cpu_callin_map[cpu] = 1;
|
cpu_callin_map[cpu] = 1;
|
||||||
|
|
||||||
if (smp_ops->setup_cpu)
|
if (smp_ops->setup_cpu)
|
||||||
|
|
|
@ -898,7 +898,7 @@ static void smp_start_secondary(void *cpuvoid)
|
||||||
S390_lowcore.restart_flags = 0;
|
S390_lowcore.restart_flags = 0;
|
||||||
restore_access_regs(S390_lowcore.access_regs_save_area);
|
restore_access_regs(S390_lowcore.access_regs_save_area);
|
||||||
cpu_init();
|
cpu_init();
|
||||||
rcu_cpu_starting(cpu);
|
rcutree_report_cpu_starting(cpu);
|
||||||
init_cpu_timer();
|
init_cpu_timer();
|
||||||
vtime_init();
|
vtime_init();
|
||||||
vdso_getcpu_init();
|
vdso_getcpu_init();
|
||||||
|
|
|
@ -288,7 +288,7 @@ static void notrace start_secondary(void *unused)
|
||||||
|
|
||||||
cpu_init();
|
cpu_init();
|
||||||
fpu__init_cpu();
|
fpu__init_cpu();
|
||||||
rcu_cpu_starting(raw_smp_processor_id());
|
rcutree_report_cpu_starting(raw_smp_processor_id());
|
||||||
x86_cpuinit.early_percpu_clock_init();
|
x86_cpuinit.early_percpu_clock_init();
|
||||||
|
|
||||||
ap_starting();
|
ap_starting();
|
||||||
|
|
|
@ -566,7 +566,7 @@ enum
|
||||||
*
|
*
|
||||||
* _ RCU:
|
* _ RCU:
|
||||||
* 1) rcutree_migrate_callbacks() migrates the queue.
|
* 1) rcutree_migrate_callbacks() migrates the queue.
|
||||||
* 2) rcu_report_dead() reports the final quiescent states.
|
* 2) rcutree_report_cpu_dead() reports the final quiescent states.
|
||||||
*
|
*
|
||||||
* _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
|
* _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
|
||||||
*/
|
*/
|
||||||
|
|
32
include/linux/rcu_notifier.h
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||||
|
/*
|
||||||
|
* Read-Copy Update notifiers, initially RCU CPU stall notifier.
|
||||||
|
* Separate from rcupdate.h to avoid #include loops.
|
||||||
|
*
|
||||||
|
* Copyright (C) 2023 Paul E. McKenney.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __LINUX_RCU_NOTIFIER_H
|
||||||
|
#define __LINUX_RCU_NOTIFIER_H
|
||||||
|
|
||||||
|
// Actions for RCU CPU stall notifier calls.
|
||||||
|
#define RCU_STALL_NOTIFY_NORM 1
|
||||||
|
#define RCU_STALL_NOTIFY_EXP 2
|
||||||
|
|
||||||
|
#ifdef CONFIG_RCU_STALL_COMMON
|
||||||
|
|
||||||
|
#include <linux/notifier.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
int rcu_stall_chain_notifier_register(struct notifier_block *n);
|
||||||
|
int rcu_stall_chain_notifier_unregister(struct notifier_block *n);
|
||||||
|
|
||||||
|
#else // #ifdef CONFIG_RCU_STALL_COMMON
|
||||||
|
|
||||||
|
// No RCU CPU stall warnings in Tiny RCU.
|
||||||
|
static inline int rcu_stall_chain_notifier_register(struct notifier_block *n) { return -EEXIST; }
|
||||||
|
static inline int rcu_stall_chain_notifier_unregister(struct notifier_block *n) { return -ENOENT; }
|
||||||
|
|
||||||
|
#endif // #else // #ifdef CONFIG_RCU_STALL_COMMON
|
||||||
|
|
||||||
|
#endif /* __LINUX_RCU_NOTIFIER_H */
|
|
@ -122,8 +122,6 @@ static inline void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
|
||||||
void rcu_init(void);
|
void rcu_init(void);
|
||||||
extern int rcu_scheduler_active;
|
extern int rcu_scheduler_active;
|
||||||
void rcu_sched_clock_irq(int user);
|
void rcu_sched_clock_irq(int user);
|
||||||
void rcu_report_dead(unsigned int cpu);
|
|
||||||
void rcutree_migrate_callbacks(int cpu);
|
|
||||||
|
|
||||||
#ifdef CONFIG_TASKS_RCU_GENERIC
|
#ifdef CONFIG_TASKS_RCU_GENERIC
|
||||||
void rcu_init_tasks_generic(void);
|
void rcu_init_tasks_generic(void);
|
||||||
|
|
|
@ -171,6 +171,6 @@ static inline void rcu_all_qs(void) { barrier(); }
|
||||||
#define rcutree_offline_cpu NULL
|
#define rcutree_offline_cpu NULL
|
||||||
#define rcutree_dead_cpu NULL
|
#define rcutree_dead_cpu NULL
|
||||||
#define rcutree_dying_cpu NULL
|
#define rcutree_dying_cpu NULL
|
||||||
static inline void rcu_cpu_starting(unsigned int cpu) { }
|
static inline void rcutree_report_cpu_starting(unsigned int cpu) { }
|
||||||
|
|
||||||
#endif /* __LINUX_RCUTINY_H */
|
#endif /* __LINUX_RCUTINY_H */
|
||||||
|
|
|
@ -37,7 +37,6 @@ void synchronize_rcu_expedited(void);
|
||||||
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
|
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
|
||||||
|
|
||||||
void rcu_barrier(void);
|
void rcu_barrier(void);
|
||||||
bool rcu_eqs_special_set(int cpu);
|
|
||||||
void rcu_momentary_dyntick_idle(void);
|
void rcu_momentary_dyntick_idle(void);
|
||||||
void kfree_rcu_scheduler_running(void);
|
void kfree_rcu_scheduler_running(void);
|
||||||
bool rcu_gp_might_be_stalled(void);
|
bool rcu_gp_might_be_stalled(void);
|
||||||
|
@ -111,9 +110,21 @@ void rcu_all_qs(void);
|
||||||
/* RCUtree hotplug events */
|
/* RCUtree hotplug events */
|
||||||
int rcutree_prepare_cpu(unsigned int cpu);
|
int rcutree_prepare_cpu(unsigned int cpu);
|
||||||
int rcutree_online_cpu(unsigned int cpu);
|
int rcutree_online_cpu(unsigned int cpu);
|
||||||
int rcutree_offline_cpu(unsigned int cpu);
|
void rcutree_report_cpu_starting(unsigned int cpu);
|
||||||
|
|
||||||
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
int rcutree_dead_cpu(unsigned int cpu);
|
int rcutree_dead_cpu(unsigned int cpu);
|
||||||
int rcutree_dying_cpu(unsigned int cpu);
|
int rcutree_dying_cpu(unsigned int cpu);
|
||||||
void rcu_cpu_starting(unsigned int cpu);
|
int rcutree_offline_cpu(unsigned int cpu);
|
||||||
|
#else
|
||||||
|
#define rcutree_dead_cpu NULL
|
||||||
|
#define rcutree_dying_cpu NULL
|
||||||
|
#define rcutree_offline_cpu NULL
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void rcutree_migrate_callbacks(int cpu);
|
||||||
|
|
||||||
|
/* Called from hotplug and also arm64 early secondary boot failure */
|
||||||
|
void rcutree_report_cpu_dead(void);
|
||||||
|
|
||||||
#endif /* __LINUX_RCUTREE_H */
|
#endif /* __LINUX_RCUTREE_H */
|
||||||
|
|
|
@ -245,8 +245,9 @@ DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
|
||||||
size_t ksize(const void *objp);
|
size_t ksize(const void *objp);
|
||||||
|
|
||||||
#ifdef CONFIG_PRINTK
|
#ifdef CONFIG_PRINTK
|
||||||
bool kmem_valid_obj(void *object);
|
bool kmem_dump_obj(void *object);
|
||||||
void kmem_dump_obj(void *object);
|
#else
|
||||||
|
static inline bool kmem_dump_obj(void *object) { return false; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
13
kernel/cpu.c
|
@ -1372,7 +1372,14 @@ static int takedown_cpu(unsigned int cpu)
|
||||||
cpuhp_bp_sync_dead(cpu);
|
cpuhp_bp_sync_dead(cpu);
|
||||||
|
|
||||||
tick_cleanup_dead_cpu(cpu);
|
tick_cleanup_dead_cpu(cpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Callbacks must be re-integrated right away to the RCU state machine.
|
||||||
|
* Otherwise an RCU callback could block a further teardown function
|
||||||
|
* waiting for its completion.
|
||||||
|
*/
|
||||||
rcutree_migrate_callbacks(cpu);
|
rcutree_migrate_callbacks(cpu);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1388,10 +1395,10 @@ void cpuhp_report_idle_dead(void)
|
||||||
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
|
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
|
||||||
|
|
||||||
BUG_ON(st->state != CPUHP_AP_OFFLINE);
|
BUG_ON(st->state != CPUHP_AP_OFFLINE);
|
||||||
rcu_report_dead(smp_processor_id());
|
rcutree_report_cpu_dead();
|
||||||
st->state = CPUHP_AP_IDLE_DEAD;
|
st->state = CPUHP_AP_IDLE_DEAD;
|
||||||
/*
|
/*
|
||||||
* We cannot call complete after rcu_report_dead() so we delegate it
|
* We cannot call complete after rcutree_report_cpu_dead() so we delegate it
|
||||||
* to an online cpu.
|
* to an online cpu.
|
||||||
*/
|
*/
|
||||||
smp_call_function_single(cpumask_first(cpu_online_mask),
|
smp_call_function_single(cpumask_first(cpu_online_mask),
|
||||||
|
@ -1617,7 +1624,7 @@ void notify_cpu_starting(unsigned int cpu)
|
||||||
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
||||||
enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
|
enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
|
||||||
|
|
||||||
rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
|
rcutree_report_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
|
||||||
cpumask_set_cpu(cpu, &cpus_booted_once_mask);
|
cpumask_set_cpu(cpu, &cpus_booted_once_mask);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
#ifndef __LINUX_RCU_H
|
#ifndef __LINUX_RCU_H
|
||||||
#define __LINUX_RCU_H
|
#define __LINUX_RCU_H
|
||||||
|
|
||||||
|
#include <linux/slab.h>
|
||||||
#include <trace/events/rcu.h>
|
#include <trace/events/rcu.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -248,6 +249,12 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head)
|
||||||
}
|
}
|
||||||
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
|
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
|
||||||
|
|
||||||
|
static inline void debug_rcu_head_callback(struct rcu_head *rhp)
|
||||||
|
{
|
||||||
|
if (unlikely(!rhp->func))
|
||||||
|
kmem_dump_obj(rhp);
|
||||||
|
}
|
||||||
|
|
||||||
extern int rcu_cpu_stall_suppress_at_boot;
|
extern int rcu_cpu_stall_suppress_at_boot;
|
||||||
|
|
||||||
static inline bool rcu_stall_is_suppressed_at_boot(void)
|
static inline bool rcu_stall_is_suppressed_at_boot(void)
|
||||||
|
@ -650,4 +657,10 @@ static inline bool rcu_cpu_beenfullyonline(int cpu) { return true; }
|
||||||
bool rcu_cpu_beenfullyonline(int cpu);
|
bool rcu_cpu_beenfullyonline(int cpu);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_RCU_STALL_COMMON
|
||||||
|
int rcu_stall_notifier_call_chain(unsigned long val, void *v);
|
||||||
|
#else // #ifdef CONFIG_RCU_STALL_COMMON
|
||||||
|
static inline int rcu_stall_notifier_call_chain(unsigned long val, void *v) { return NOTIFY_DONE; }
|
||||||
|
#endif // #else // #ifdef CONFIG_RCU_STALL_COMMON
|
||||||
|
|
||||||
#endif /* __LINUX_RCU_H */
|
#endif /* __LINUX_RCU_H */
|
||||||
|
|
|
@ -368,7 +368,7 @@ bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
|
||||||
smp_mb(); /* Ensure counts are updated before callback is entrained. */
|
smp_mb(); /* Ensure counts are updated before callback is entrained. */
|
||||||
rhp->next = NULL;
|
rhp->next = NULL;
|
||||||
for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--)
|
for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--)
|
||||||
if (rsclp->tails[i] != rsclp->tails[i - 1])
|
if (!rcu_segcblist_segempty(rsclp, i))
|
||||||
break;
|
break;
|
||||||
rcu_segcblist_inc_seglen(rsclp, i);
|
rcu_segcblist_inc_seglen(rsclp, i);
|
||||||
WRITE_ONCE(*rsclp->tails[i], rhp);
|
WRITE_ONCE(*rsclp->tails[i], rhp);
|
||||||
|
@ -551,7 +551,7 @@ bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq)
|
||||||
* as their ->gp_seq[] grace-period completion sequence number.
|
* as their ->gp_seq[] grace-period completion sequence number.
|
||||||
*/
|
*/
|
||||||
for (i = RCU_NEXT_READY_TAIL; i > RCU_DONE_TAIL; i--)
|
for (i = RCU_NEXT_READY_TAIL; i > RCU_DONE_TAIL; i--)
|
||||||
if (rsclp->tails[i] != rsclp->tails[i - 1] &&
|
if (!rcu_segcblist_segempty(rsclp, i) &&
|
||||||
ULONG_CMP_LT(rsclp->gp_seq[i], seq))
|
ULONG_CMP_LT(rsclp->gp_seq[i], seq))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
#include <linux/rcupdate_wait.h>
|
#include <linux/rcupdate_wait.h>
|
||||||
|
#include <linux/rcu_notifier.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/sched/signal.h>
|
#include <linux/sched/signal.h>
|
||||||
#include <uapi/linux/sched/types.h>
|
#include <uapi/linux/sched/types.h>
|
||||||
|
@ -2428,6 +2429,16 @@ static int rcutorture_booster_init(unsigned int cpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr)
|
||||||
|
{
|
||||||
|
pr_info("%s: v=%lu, duration=%lu.\n", __func__, v, (unsigned long)ptr);
|
||||||
|
return NOTIFY_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct notifier_block rcu_torture_stall_block = {
|
||||||
|
.notifier_call = rcu_torture_stall_nf,
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
|
* CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
|
||||||
* induces a CPU stall for the time specified by stall_cpu.
|
* induces a CPU stall for the time specified by stall_cpu.
|
||||||
|
@ -2435,9 +2446,14 @@ static int rcutorture_booster_init(unsigned int cpu)
|
||||||
static int rcu_torture_stall(void *args)
|
static int rcu_torture_stall(void *args)
|
||||||
{
|
{
|
||||||
int idx;
|
int idx;
|
||||||
|
int ret;
|
||||||
unsigned long stop_at;
|
unsigned long stop_at;
|
||||||
|
|
||||||
VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
|
VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
|
||||||
|
ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block);
|
||||||
|
if (ret)
|
||||||
|
pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n",
|
||||||
|
__func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : "");
|
||||||
if (stall_cpu_holdoff > 0) {
|
if (stall_cpu_holdoff > 0) {
|
||||||
VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
|
VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
|
||||||
schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
|
schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
|
||||||
|
@ -2481,6 +2497,11 @@ static int rcu_torture_stall(void *args)
|
||||||
cur_ops->readunlock(idx);
|
cur_ops->readunlock(idx);
|
||||||
}
|
}
|
||||||
pr_alert("%s end.\n", __func__);
|
pr_alert("%s end.\n", __func__);
|
||||||
|
if (!ret) {
|
||||||
|
ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block);
|
||||||
|
if (ret)
|
||||||
|
pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret);
|
||||||
|
}
|
||||||
torture_shutdown_absorb("rcu_torture_stall");
|
torture_shutdown_absorb("rcu_torture_stall");
|
||||||
while (!kthread_should_stop())
|
while (!kthread_should_stop())
|
||||||
schedule_timeout_interruptible(10 * HZ);
|
schedule_timeout_interruptible(10 * HZ);
|
||||||
|
|
|
@ -655,12 +655,12 @@ retry:
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
un_delay(udl, ndl);
|
un_delay(udl, ndl);
|
||||||
|
b = READ_ONCE(rtsp->a);
|
||||||
// Remember, seqlock read-side release can fail.
|
// Remember, seqlock read-side release can fail.
|
||||||
if (!rts_release(rtsp, start)) {
|
if (!rts_release(rtsp, start)) {
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
b = READ_ONCE(rtsp->a);
|
|
||||||
WARN_ONCE(a != b, "Re-read of ->a changed from %u to %u.\n", a, b);
|
WARN_ONCE(a != b, "Re-read of ->a changed from %u to %u.\n", a, b);
|
||||||
b = rtsp->b;
|
b = rtsp->b;
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
@ -1025,8 +1025,8 @@ static void
|
||||||
ref_scale_print_module_parms(struct ref_scale_ops *cur_ops, const char *tag)
|
ref_scale_print_module_parms(struct ref_scale_ops *cur_ops, const char *tag)
|
||||||
{
|
{
|
||||||
pr_alert("%s" SCALE_FLAG
|
pr_alert("%s" SCALE_FLAG
|
||||||
"--- %s: verbose=%d shutdown=%d holdoff=%d loops=%ld nreaders=%d nruns=%d readdelay=%d\n", scale_type, tag,
|
"--- %s: verbose=%d verbose_batched=%d shutdown=%d holdoff=%d lookup_instances=%ld loops=%ld nreaders=%d nruns=%d readdelay=%d\n", scale_type, tag,
|
||||||
verbose, shutdown, holdoff, loops, nreaders, nruns, readdelay);
|
verbose, verbose_batched, shutdown, holdoff, lookup_instances, loops, nreaders, nruns, readdelay);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
|
@ -138,6 +138,7 @@ void srcu_drive_gp(struct work_struct *wp)
|
||||||
while (lh) {
|
while (lh) {
|
||||||
rhp = lh;
|
rhp = lh;
|
||||||
lh = lh->next;
|
lh = lh->next;
|
||||||
|
debug_rcu_head_callback(rhp);
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
rhp->func(rhp);
|
rhp->func(rhp);
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
|
|
|
@ -223,7 +223,7 @@ static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
|
||||||
snp->grplo = cpu;
|
snp->grplo = cpu;
|
||||||
snp->grphi = cpu;
|
snp->grphi = cpu;
|
||||||
}
|
}
|
||||||
sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
|
sdp->grpmask = 1UL << (cpu - sdp->mynode->grplo);
|
||||||
}
|
}
|
||||||
smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
|
smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
|
||||||
return true;
|
return true;
|
||||||
|
@ -255,29 +255,31 @@ static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
|
||||||
ssp->srcu_sup->sda_is_static = is_static;
|
ssp->srcu_sup->sda_is_static = is_static;
|
||||||
if (!is_static)
|
if (!is_static)
|
||||||
ssp->sda = alloc_percpu(struct srcu_data);
|
ssp->sda = alloc_percpu(struct srcu_data);
|
||||||
if (!ssp->sda) {
|
if (!ssp->sda)
|
||||||
if (!is_static)
|
goto err_free_sup;
|
||||||
kfree(ssp->srcu_sup);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
init_srcu_struct_data(ssp);
|
init_srcu_struct_data(ssp);
|
||||||
ssp->srcu_sup->srcu_gp_seq_needed_exp = 0;
|
ssp->srcu_sup->srcu_gp_seq_needed_exp = 0;
|
||||||
ssp->srcu_sup->srcu_last_gp_end = ktime_get_mono_fast_ns();
|
ssp->srcu_sup->srcu_last_gp_end = ktime_get_mono_fast_ns();
|
||||||
if (READ_ONCE(ssp->srcu_sup->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) {
|
if (READ_ONCE(ssp->srcu_sup->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) {
|
||||||
if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC)) {
|
if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC))
|
||||||
if (!ssp->srcu_sup->sda_is_static) {
|
goto err_free_sda;
|
||||||
free_percpu(ssp->sda);
|
WRITE_ONCE(ssp->srcu_sup->srcu_size_state, SRCU_SIZE_BIG);
|
||||||
ssp->sda = NULL;
|
|
||||||
kfree(ssp->srcu_sup);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
WRITE_ONCE(ssp->srcu_sup->srcu_size_state, SRCU_SIZE_BIG);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
ssp->srcu_sup->srcu_ssp = ssp;
|
ssp->srcu_sup->srcu_ssp = ssp;
|
||||||
smp_store_release(&ssp->srcu_sup->srcu_gp_seq_needed, 0); /* Init done. */
|
smp_store_release(&ssp->srcu_sup->srcu_gp_seq_needed, 0); /* Init done. */
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_free_sda:
|
||||||
|
if (!is_static) {
|
||||||
|
free_percpu(ssp->sda);
|
||||||
|
ssp->sda = NULL;
|
||||||
|
}
|
||||||
|
err_free_sup:
|
||||||
|
if (!is_static) {
|
||||||
|
kfree(ssp->srcu_sup);
|
||||||
|
ssp->srcu_sup = NULL;
|
||||||
|
}
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
|
@ -782,8 +784,7 @@ static void srcu_gp_start(struct srcu_struct *ssp)
|
||||||
spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
|
spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
|
||||||
rcu_segcblist_advance(&sdp->srcu_cblist,
|
rcu_segcblist_advance(&sdp->srcu_cblist,
|
||||||
rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
|
rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
|
||||||
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
|
WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
|
||||||
rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq));
|
|
||||||
spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
|
spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
|
||||||
WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies);
|
WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies);
|
||||||
WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0);
|
WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0);
|
||||||
|
@ -833,7 +834,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
|
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
|
||||||
if (!(mask & (1 << (cpu - snp->grplo))))
|
if (!(mask & (1UL << (cpu - snp->grplo))))
|
||||||
continue;
|
continue;
|
||||||
srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
|
srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
|
||||||
}
|
}
|
||||||
|
@ -1242,10 +1243,37 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
|
||||||
spin_lock_irqsave_sdp_contention(sdp, &flags);
|
spin_lock_irqsave_sdp_contention(sdp, &flags);
|
||||||
if (rhp)
|
if (rhp)
|
||||||
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
|
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
|
||||||
|
/*
|
||||||
|
* The snapshot for acceleration must be taken _before_ the read of the
|
||||||
|
* current gp sequence used for advancing, otherwise advancing may fail
|
||||||
|
* and acceleration may then fail too.
|
||||||
|
*
|
||||||
|
* This could happen if:
|
||||||
|
*
|
||||||
|
* 1) The RCU_WAIT_TAIL segment has callbacks (gp_num = X + 4) and the
|
||||||
|
* RCU_NEXT_READY_TAIL also has callbacks (gp_num = X + 8).
|
||||||
|
*
|
||||||
|
* 2) The grace period for RCU_WAIT_TAIL is seen as started but not
|
||||||
|
* completed so rcu_seq_current() returns X + SRCU_STATE_SCAN1.
|
||||||
|
*
|
||||||
|
* 3) This value is passed to rcu_segcblist_advance() which can't move
|
||||||
|
* any segment forward and fails.
|
||||||
|
*
|
||||||
|
* 4) srcu_gp_start_if_needed() still proceeds with callback acceleration.
|
||||||
|
* But then the call to rcu_seq_snap() observes the grace period for the
|
||||||
|
* RCU_WAIT_TAIL segment as completed and the subsequent one for the
|
||||||
|
* RCU_NEXT_READY_TAIL segment as started (ie: X + 4 + SRCU_STATE_SCAN1)
|
||||||
|
* so it returns a snapshot of the next grace period, which is X + 12.
|
||||||
|
*
|
||||||
|
* 5) The value of X + 12 is passed to rcu_segcblist_accelerate() but the
|
||||||
|
* freshly enqueued callback in RCU_NEXT_TAIL can't move to
|
||||||
|
* RCU_NEXT_READY_TAIL which already has callbacks for a previous grace
|
||||||
|
* period (gp_num = X + 8). So acceleration fails.
|
||||||
|
*/
|
||||||
|
s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
|
||||||
rcu_segcblist_advance(&sdp->srcu_cblist,
|
rcu_segcblist_advance(&sdp->srcu_cblist,
|
||||||
rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
|
rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
|
||||||
s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
|
WARN_ON_ONCE(!rcu_segcblist_accelerate(&sdp->srcu_cblist, s) && rhp);
|
||||||
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
|
|
||||||
if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
|
if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
|
||||||
sdp->srcu_gp_seq_needed = s;
|
sdp->srcu_gp_seq_needed = s;
|
||||||
needgp = true;
|
needgp = true;
|
||||||
|
@ -1692,6 +1720,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
|
||||||
ssp = sdp->ssp;
|
ssp = sdp->ssp;
|
||||||
rcu_cblist_init(&ready_cbs);
|
rcu_cblist_init(&ready_cbs);
|
||||||
spin_lock_irq_rcu_node(sdp);
|
spin_lock_irq_rcu_node(sdp);
|
||||||
|
WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
|
||||||
rcu_segcblist_advance(&sdp->srcu_cblist,
|
rcu_segcblist_advance(&sdp->srcu_cblist,
|
||||||
rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
|
rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
|
||||||
if (sdp->srcu_cblist_invoking ||
|
if (sdp->srcu_cblist_invoking ||
|
||||||
|
@ -1708,6 +1737,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
|
||||||
rhp = rcu_cblist_dequeue(&ready_cbs);
|
rhp = rcu_cblist_dequeue(&ready_cbs);
|
||||||
for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
|
for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
|
||||||
debug_rcu_head_unqueue(rhp);
|
debug_rcu_head_unqueue(rhp);
|
||||||
|
debug_rcu_head_callback(rhp);
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
rhp->func(rhp);
|
rhp->func(rhp);
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
|
@ -1720,8 +1750,6 @@ static void srcu_invoke_callbacks(struct work_struct *work)
|
||||||
*/
|
*/
|
||||||
spin_lock_irq_rcu_node(sdp);
|
spin_lock_irq_rcu_node(sdp);
|
||||||
rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
|
rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
|
||||||
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
|
|
||||||
rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq));
|
|
||||||
sdp->srcu_cblist_invoking = false;
|
sdp->srcu_cblist_invoking = false;
|
||||||
more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
|
more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
|
||||||
spin_unlock_irq_rcu_node(sdp);
|
spin_unlock_irq_rcu_node(sdp);
|
||||||
|
|
|
@ -432,6 +432,7 @@ static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
|
||||||
static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
|
static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
int dequeue_limit;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
|
bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
|
||||||
long n;
|
long n;
|
||||||
|
@ -439,7 +440,8 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
|
||||||
long ncbsnz = 0;
|
long ncbsnz = 0;
|
||||||
int needgpcb = 0;
|
int needgpcb = 0;
|
||||||
|
|
||||||
for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) {
|
dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim);
|
||||||
|
for (cpu = 0; cpu < dequeue_limit; cpu++) {
|
||||||
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
|
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
|
||||||
|
|
||||||
/* Advance and accelerate any new callbacks. */
|
/* Advance and accelerate any new callbacks. */
|
||||||
|
@ -538,6 +540,7 @@ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu
|
||||||
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
|
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
|
||||||
len = rcl.len;
|
len = rcl.len;
|
||||||
for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
|
for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
|
||||||
|
debug_rcu_head_callback(rhp);
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
rhp->func(rhp);
|
rhp->func(rhp);
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
|
@ -1084,7 +1087,7 @@ void rcu_barrier_tasks(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
|
EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
|
||||||
|
|
||||||
int rcu_tasks_lazy_ms = -1;
|
static int rcu_tasks_lazy_ms = -1;
|
||||||
module_param(rcu_tasks_lazy_ms, int, 0444);
|
module_param(rcu_tasks_lazy_ms, int, 0444);
|
||||||
|
|
||||||
static int __init rcu_spawn_tasks_kthread(void)
|
static int __init rcu_spawn_tasks_kthread(void)
|
||||||
|
@ -1979,20 +1982,22 @@ static void test_rcu_tasks_callback(struct rcu_head *rhp)
|
||||||
|
|
||||||
static void rcu_tasks_initiate_self_tests(void)
|
static void rcu_tasks_initiate_self_tests(void)
|
||||||
{
|
{
|
||||||
pr_info("Running RCU-tasks wait API self tests\n");
|
|
||||||
#ifdef CONFIG_TASKS_RCU
|
#ifdef CONFIG_TASKS_RCU
|
||||||
|
pr_info("Running RCU Tasks wait API self tests\n");
|
||||||
tests[0].runstart = jiffies;
|
tests[0].runstart = jiffies;
|
||||||
synchronize_rcu_tasks();
|
synchronize_rcu_tasks();
|
||||||
call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
|
call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_TASKS_RUDE_RCU
|
#ifdef CONFIG_TASKS_RUDE_RCU
|
||||||
|
pr_info("Running RCU Tasks Rude wait API self tests\n");
|
||||||
tests[1].runstart = jiffies;
|
tests[1].runstart = jiffies;
|
||||||
synchronize_rcu_tasks_rude();
|
synchronize_rcu_tasks_rude();
|
||||||
call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
|
call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_TASKS_TRACE_RCU
|
#ifdef CONFIG_TASKS_TRACE_RCU
|
||||||
|
pr_info("Running RCU Tasks Trace wait API self tests\n");
|
||||||
tests[2].runstart = jiffies;
|
tests[2].runstart = jiffies;
|
||||||
synchronize_rcu_tasks_trace();
|
synchronize_rcu_tasks_trace();
|
||||||
call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
|
call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
|
||||||
|
|
|
@ -97,6 +97,7 @@ static inline bool rcu_reclaim_tiny(struct rcu_head *head)
|
||||||
|
|
||||||
trace_rcu_invoke_callback("", head);
|
trace_rcu_invoke_callback("", head);
|
||||||
f = head->func;
|
f = head->func;
|
||||||
|
debug_rcu_head_callback(head);
|
||||||
WRITE_ONCE(head->func, (rcu_callback_t)0L);
|
WRITE_ONCE(head->func, (rcu_callback_t)0L);
|
||||||
f(head);
|
f(head);
|
||||||
rcu_lock_release(&rcu_callback_map);
|
rcu_lock_release(&rcu_callback_map);
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/completion.h>
|
#include <linux/completion.h>
|
||||||
|
#include <linux/kmemleak.h>
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
#include <linux/panic.h>
|
#include <linux/panic.h>
|
||||||
#include <linux/panic_notifier.h>
|
#include <linux/panic_notifier.h>
|
||||||
|
@ -1260,7 +1261,7 @@ EXPORT_SYMBOL_GPL(rcu_gp_slow_register);
|
||||||
/* Unregister a counter, with NULL for not caring which. */
|
/* Unregister a counter, with NULL for not caring which. */
|
||||||
void rcu_gp_slow_unregister(atomic_t *rgssp)
|
void rcu_gp_slow_unregister(atomic_t *rgssp)
|
||||||
{
|
{
|
||||||
WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress);
|
WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress && rcu_gp_slow_suppress != NULL);
|
||||||
|
|
||||||
WRITE_ONCE(rcu_gp_slow_suppress, NULL);
|
WRITE_ONCE(rcu_gp_slow_suppress, NULL);
|
||||||
}
|
}
|
||||||
|
@ -1556,10 +1557,22 @@ static bool rcu_gp_fqs_check_wake(int *gfp)
|
||||||
*/
|
*/
|
||||||
static void rcu_gp_fqs(bool first_time)
|
static void rcu_gp_fqs(bool first_time)
|
||||||
{
|
{
|
||||||
|
int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
|
||||||
struct rcu_node *rnp = rcu_get_root();
|
struct rcu_node *rnp = rcu_get_root();
|
||||||
|
|
||||||
WRITE_ONCE(rcu_state.gp_activity, jiffies);
|
WRITE_ONCE(rcu_state.gp_activity, jiffies);
|
||||||
WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
|
WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
|
||||||
|
|
||||||
|
WARN_ON_ONCE(nr_fqs > 3);
|
||||||
|
/* Only countdown nr_fqs for stall purposes if jiffies moves. */
|
||||||
|
if (nr_fqs) {
|
||||||
|
if (nr_fqs == 1) {
|
||||||
|
WRITE_ONCE(rcu_state.jiffies_stall,
|
||||||
|
jiffies + rcu_jiffies_till_stall_check());
|
||||||
|
}
|
||||||
|
WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
|
||||||
|
}
|
||||||
|
|
||||||
if (first_time) {
|
if (first_time) {
|
||||||
/* Collect dyntick-idle snapshots. */
|
/* Collect dyntick-idle snapshots. */
|
||||||
force_qs_rnp(dyntick_save_progress_counter);
|
force_qs_rnp(dyntick_save_progress_counter);
|
||||||
|
@ -2135,6 +2148,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
||||||
trace_rcu_invoke_callback(rcu_state.name, rhp);
|
trace_rcu_invoke_callback(rcu_state.name, rhp);
|
||||||
|
|
||||||
f = rhp->func;
|
f = rhp->func;
|
||||||
|
debug_rcu_head_callback(rhp);
|
||||||
WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
|
WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
|
||||||
f(rhp);
|
f(rhp);
|
||||||
|
|
||||||
|
@ -2713,7 +2727,7 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
|
||||||
*/
|
*/
|
||||||
void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
|
void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
|
||||||
{
|
{
|
||||||
return __call_rcu_common(head, func, false);
|
__call_rcu_common(head, func, false);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(call_rcu_hurry);
|
EXPORT_SYMBOL_GPL(call_rcu_hurry);
|
||||||
#endif
|
#endif
|
||||||
|
@ -2764,7 +2778,7 @@ EXPORT_SYMBOL_GPL(call_rcu_hurry);
|
||||||
*/
|
*/
|
||||||
void call_rcu(struct rcu_head *head, rcu_callback_t func)
|
void call_rcu(struct rcu_head *head, rcu_callback_t func)
|
||||||
{
|
{
|
||||||
return __call_rcu_common(head, func, IS_ENABLED(CONFIG_RCU_LAZY));
|
__call_rcu_common(head, func, IS_ENABLED(CONFIG_RCU_LAZY));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(call_rcu);
|
EXPORT_SYMBOL_GPL(call_rcu);
|
||||||
|
|
||||||
|
@ -3388,6 +3402,14 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
|
||||||
success = true;
|
success = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The kvfree_rcu() caller considers the pointer freed at this point
|
||||||
|
* and likely removes any references to it. Since the actual slab
|
||||||
|
* freeing (and kmemleak_free()) is deferred, tell kmemleak to ignore
|
||||||
|
* this object (no scanning or false positives reporting).
|
||||||
|
*/
|
||||||
|
kmemleak_ignore(ptr);
|
||||||
|
|
||||||
// Set timer to drain after KFREE_DRAIN_JIFFIES.
|
// Set timer to drain after KFREE_DRAIN_JIFFIES.
|
||||||
if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
|
if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
|
||||||
schedule_delayed_monitor_work(krcp);
|
schedule_delayed_monitor_work(krcp);
|
||||||
|
@ -4083,6 +4105,82 @@ retry:
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rcu_barrier);
|
EXPORT_SYMBOL_GPL(rcu_barrier);
|
||||||
|
|
||||||
|
static unsigned long rcu_barrier_last_throttle;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rcu_barrier_throttled - Do rcu_barrier(), but limit to one per second
|
||||||
|
*
|
||||||
|
* This can be thought of as guard rails around rcu_barrier() that
|
||||||
|
* permits unrestricted userspace use, at least assuming the hardware's
|
||||||
|
* try_cmpxchg() is robust. There will be at most one call per second to
|
||||||
|
* rcu_barrier() system-wide from use of this function, which means that
|
||||||
|
* callers might needlessly wait a second or three.
|
||||||
|
*
|
||||||
|
* This is intended for use by test suites to avoid OOM by flushing RCU
|
||||||
|
* callbacks from the previous test before starting the next. See the
|
||||||
|
* rcutree.do_rcu_barrier module parameter for more information.
|
||||||
|
*
|
||||||
|
* Why not simply make rcu_barrier() more scalable? That might be
|
||||||
|
* the eventual endpoint, but let's keep it simple for the time being.
|
||||||
|
* Note that the module parameter infrastructure serializes calls to a
|
||||||
|
* given .set() function, but should concurrent .set() invocation ever be
|
||||||
|
* possible, we are ready!
|
||||||
|
*/
|
||||||
|
static void rcu_barrier_throttled(void)
|
||||||
|
{
|
||||||
|
unsigned long j = jiffies;
|
||||||
|
unsigned long old = READ_ONCE(rcu_barrier_last_throttle);
|
||||||
|
unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
|
||||||
|
|
||||||
|
while (time_in_range(j, old, old + HZ / 16) ||
|
||||||
|
!try_cmpxchg(&rcu_barrier_last_throttle, &old, j)) {
|
||||||
|
schedule_timeout_idle(HZ / 16);
|
||||||
|
if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
|
||||||
|
smp_mb(); /* caller's subsequent code after above check. */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
j = jiffies;
|
||||||
|
old = READ_ONCE(rcu_barrier_last_throttle);
|
||||||
|
}
|
||||||
|
rcu_barrier();
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Invoke rcu_barrier_throttled() when a rcutree.do_rcu_barrier
|
||||||
|
* request arrives. We insist on a true value to allow for possible
|
||||||
|
* future expansion.
|
||||||
|
*/
|
||||||
|
static int param_set_do_rcu_barrier(const char *val, const struct kernel_param *kp)
|
||||||
|
{
|
||||||
|
bool b;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING)
|
||||||
|
return -EAGAIN;
|
||||||
|
ret = kstrtobool(val, &b);
|
||||||
|
if (!ret && b) {
|
||||||
|
atomic_inc((atomic_t *)kp->arg);
|
||||||
|
rcu_barrier_throttled();
|
||||||
|
atomic_dec((atomic_t *)kp->arg);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Output the number of outstanding rcutree.do_rcu_barrier requests.
|
||||||
|
*/
|
||||||
|
static int param_get_do_rcu_barrier(char *buffer, const struct kernel_param *kp)
|
||||||
|
{
|
||||||
|
return sprintf(buffer, "%d\n", atomic_read((atomic_t *)kp->arg));
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct kernel_param_ops do_rcu_barrier_ops = {
|
||||||
|
.set = param_set_do_rcu_barrier,
|
||||||
|
.get = param_get_do_rcu_barrier,
|
||||||
|
};
|
||||||
|
static atomic_t do_rcu_barrier;
|
||||||
|
module_param_cb(do_rcu_barrier, &do_rcu_barrier_ops, &do_rcu_barrier, 0644);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Compute the mask of online CPUs for the specified rcu_node structure.
|
* Compute the mask of online CPUs for the specified rcu_node structure.
|
||||||
* This will not be stable unless the rcu_node structure's ->lock is
|
* This will not be stable unless the rcu_node structure's ->lock is
|
||||||
|
@ -4130,7 +4228,7 @@ bool rcu_lockdep_current_cpu_online(void)
|
||||||
rdp = this_cpu_ptr(&rcu_data);
|
rdp = this_cpu_ptr(&rcu_data);
|
||||||
/*
|
/*
|
||||||
* Strictly, we care here about the case where the current CPU is
|
* Strictly, we care here about the case where the current CPU is
|
||||||
* in rcu_cpu_starting() and thus has an excuse for rdp->grpmask
|
* in rcutree_report_cpu_starting() and thus has an excuse for rdp->grpmask
|
||||||
* not being up to date. So arch_spin_is_locked() might have a
|
* not being up to date. So arch_spin_is_locked() might have a
|
||||||
* false positive if it's held by some *other* CPU, but that's
|
* false positive if it's held by some *other* CPU, but that's
|
||||||
* OK because that just means a false *negative* on the warning.
|
* OK because that just means a false *negative* on the warning.
|
||||||
|
@ -4151,25 +4249,6 @@ static bool rcu_init_invoked(void)
|
||||||
return !!rcu_state.n_online_cpus;
|
return !!rcu_state.n_online_cpus;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Near the end of the offline process. Trace the fact that this CPU
|
|
||||||
* is going offline.
|
|
||||||
*/
|
|
||||||
int rcutree_dying_cpu(unsigned int cpu)
|
|
||||||
{
|
|
||||||
bool blkd;
|
|
||||||
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
||||||
struct rcu_node *rnp = rdp->mynode;
|
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
|
|
||||||
trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
|
|
||||||
blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All CPUs for the specified rcu_node structure have gone offline,
|
* All CPUs for the specified rcu_node structure have gone offline,
|
||||||
* and all tasks that were preempted within an RCU read-side critical
|
* and all tasks that were preempted within an RCU read-side critical
|
||||||
|
@ -4215,23 +4294,6 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* The CPU has been completely removed, and some other CPU is reporting
|
|
||||||
* this fact from process context. Do the remainder of the cleanup.
|
|
||||||
* There can only be one CPU hotplug operation at a time, so no need for
|
|
||||||
* explicit locking.
|
|
||||||
*/
|
|
||||||
int rcutree_dead_cpu(unsigned int cpu)
|
|
||||||
{
|
|
||||||
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
|
|
||||||
// Stop-machine done, so allow nohz_full to disable tick.
|
|
||||||
tick_dep_clear(TICK_DEP_BIT_RCU);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Propagate ->qsinitmask bits up the rcu_node tree to account for the
|
* Propagate ->qsinitmask bits up the rcu_node tree to account for the
|
||||||
* first CPU in a given leaf rcu_node structure coming online. The caller
|
* first CPU in a given leaf rcu_node structure coming online. The caller
|
||||||
|
@ -4384,29 +4446,6 @@ int rcutree_online_cpu(unsigned int cpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Near the beginning of the process. The CPU is still very much alive
|
|
||||||
* with pretty much all services enabled.
|
|
||||||
*/
|
|
||||||
int rcutree_offline_cpu(unsigned int cpu)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
struct rcu_data *rdp;
|
|
||||||
struct rcu_node *rnp;
|
|
||||||
|
|
||||||
rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
||||||
rnp = rdp->mynode;
|
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
|
||||||
rnp->ffmask &= ~rdp->grpmask;
|
|
||||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
|
||||||
|
|
||||||
rcutree_affinity_setting(cpu, cpu);
|
|
||||||
|
|
||||||
// nohz_full CPUs need the tick for stop-machine to work quickly
|
|
||||||
tick_dep_set(TICK_DEP_BIT_RCU);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mark the specified CPU as being online so that subsequent grace periods
|
* Mark the specified CPU as being online so that subsequent grace periods
|
||||||
* (both expedited and normal) will wait on it. Note that this means that
|
* (both expedited and normal) will wait on it. Note that this means that
|
||||||
|
@ -4418,8 +4457,10 @@ int rcutree_offline_cpu(unsigned int cpu)
|
||||||
* from the incoming CPU rather than from the cpuhp_step mechanism.
|
* from the incoming CPU rather than from the cpuhp_step mechanism.
|
||||||
* This is because this function must be invoked at a precise location.
|
* This is because this function must be invoked at a precise location.
|
||||||
* This incoming CPU must not have enabled interrupts yet.
|
* This incoming CPU must not have enabled interrupts yet.
|
||||||
|
*
|
||||||
|
* This mirrors the effects of rcutree_report_cpu_dead().
|
||||||
*/
|
*/
|
||||||
void rcu_cpu_starting(unsigned int cpu)
|
void rcutree_report_cpu_starting(unsigned int cpu)
|
||||||
{
|
{
|
||||||
unsigned long mask;
|
unsigned long mask;
|
||||||
struct rcu_data *rdp;
|
struct rcu_data *rdp;
|
||||||
|
@ -4473,14 +4514,21 @@ void rcu_cpu_starting(unsigned int cpu)
|
||||||
* Note that this function is special in that it is invoked directly
|
* Note that this function is special in that it is invoked directly
|
||||||
* from the outgoing CPU rather than from the cpuhp_step mechanism.
|
* from the outgoing CPU rather than from the cpuhp_step mechanism.
|
||||||
* This is because this function must be invoked at a precise location.
|
* This is because this function must be invoked at a precise location.
|
||||||
|
*
|
||||||
|
* This mirrors the effect of rcutree_report_cpu_starting().
|
||||||
*/
|
*/
|
||||||
void rcu_report_dead(unsigned int cpu)
|
void rcutree_report_cpu_dead(void)
|
||||||
{
|
{
|
||||||
unsigned long flags, seq_flags;
|
unsigned long flags;
|
||||||
unsigned long mask;
|
unsigned long mask;
|
||||||
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
||||||
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
|
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IRQS must be disabled from now on and until the CPU dies, or an interrupt
|
||||||
|
* may introduce a new READ-side while it is actually off the QS masks.
|
||||||
|
*/
|
||||||
|
lockdep_assert_irqs_disabled();
|
||||||
// Do any dangling deferred wakeups.
|
// Do any dangling deferred wakeups.
|
||||||
do_nocb_deferred_wakeup(rdp);
|
do_nocb_deferred_wakeup(rdp);
|
||||||
|
|
||||||
|
@ -4488,7 +4536,6 @@ void rcu_report_dead(unsigned int cpu)
|
||||||
|
|
||||||
/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
|
/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
|
||||||
mask = rdp->grpmask;
|
mask = rdp->grpmask;
|
||||||
local_irq_save(seq_flags);
|
|
||||||
arch_spin_lock(&rcu_state.ofl_lock);
|
arch_spin_lock(&rcu_state.ofl_lock);
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
|
raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
|
||||||
rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
|
rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
|
||||||
|
@ -4502,8 +4549,6 @@ void rcu_report_dead(unsigned int cpu)
|
||||||
WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
|
WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
|
||||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||||
arch_spin_unlock(&rcu_state.ofl_lock);
|
arch_spin_unlock(&rcu_state.ofl_lock);
|
||||||
local_irq_restore(seq_flags);
|
|
||||||
|
|
||||||
rdp->cpu_started = false;
|
rdp->cpu_started = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4558,7 +4603,60 @@ void rcutree_migrate_callbacks(int cpu)
|
||||||
cpu, rcu_segcblist_n_cbs(&rdp->cblist),
|
cpu, rcu_segcblist_n_cbs(&rdp->cblist),
|
||||||
rcu_segcblist_first_cb(&rdp->cblist));
|
rcu_segcblist_first_cb(&rdp->cblist));
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
/*
|
||||||
|
* The CPU has been completely removed, and some other CPU is reporting
|
||||||
|
* this fact from process context. Do the remainder of the cleanup.
|
||||||
|
* There can only be one CPU hotplug operation at a time, so no need for
|
||||||
|
* explicit locking.
|
||||||
|
*/
|
||||||
|
int rcutree_dead_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
|
||||||
|
// Stop-machine done, so allow nohz_full to disable tick.
|
||||||
|
tick_dep_clear(TICK_DEP_BIT_RCU);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Near the end of the offline process. Trace the fact that this CPU
|
||||||
|
* is going offline.
|
||||||
|
*/
|
||||||
|
int rcutree_dying_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
bool blkd;
|
||||||
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
|
struct rcu_node *rnp = rdp->mynode;
|
||||||
|
|
||||||
|
blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
|
||||||
|
trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
|
||||||
|
blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Near the beginning of the process. The CPU is still very much alive
|
||||||
|
* with pretty much all services enabled.
|
||||||
|
*/
|
||||||
|
int rcutree_offline_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
struct rcu_data *rdp;
|
||||||
|
struct rcu_node *rnp;
|
||||||
|
|
||||||
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
|
rnp = rdp->mynode;
|
||||||
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
|
rnp->ffmask &= ~rdp->grpmask;
|
||||||
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||||
|
|
||||||
|
rcutree_affinity_setting(cpu, cpu);
|
||||||
|
|
||||||
|
// nohz_full CPUs need the tick for stop-machine to work quickly
|
||||||
|
tick_dep_set(TICK_DEP_BIT_RCU);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* On non-huge systems, use expedited RCU grace periods to make suspend
|
* On non-huge systems, use expedited RCU grace periods to make suspend
|
||||||
|
@ -4990,7 +5088,7 @@ void __init rcu_init(void)
|
||||||
pm_notifier(rcu_pm_notify, 0);
|
pm_notifier(rcu_pm_notify, 0);
|
||||||
WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot.
|
WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot.
|
||||||
rcutree_prepare_cpu(cpu);
|
rcutree_prepare_cpu(cpu);
|
||||||
rcu_cpu_starting(cpu);
|
rcutree_report_cpu_starting(cpu);
|
||||||
rcutree_online_cpu(cpu);
|
rcutree_online_cpu(cpu);
|
||||||
|
|
||||||
/* Create workqueue for Tree SRCU and for expedited GPs. */
|
/* Create workqueue for Tree SRCU and for expedited GPs. */
|
||||||
|
|
|
@ -386,6 +386,10 @@ struct rcu_state {
|
||||||
/* in jiffies. */
|
/* in jiffies. */
|
||||||
unsigned long jiffies_stall; /* Time at which to check */
|
unsigned long jiffies_stall; /* Time at which to check */
|
||||||
/* for CPU stalls. */
|
/* for CPU stalls. */
|
||||||
|
int nr_fqs_jiffies_stall; /* Number of fqs loops after
|
||||||
|
* which read jiffies and set
|
||||||
|
* jiffies_stall. Stall
|
||||||
|
* warnings disabled if !0. */
|
||||||
unsigned long jiffies_resched; /* Time at which to resched */
|
unsigned long jiffies_resched; /* Time at which to resched */
|
||||||
/* a reluctant CPU. */
|
/* a reluctant CPU. */
|
||||||
unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
|
unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
|
||||||
|
|
|
@ -621,10 +621,14 @@ static void synchronize_rcu_expedited_wait(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
|
unsigned long j;
|
||||||
|
|
||||||
if (synchronize_rcu_expedited_wait_once(jiffies_stall))
|
if (synchronize_rcu_expedited_wait_once(jiffies_stall))
|
||||||
return;
|
return;
|
||||||
if (rcu_stall_is_suppressed())
|
if (rcu_stall_is_suppressed())
|
||||||
continue;
|
continue;
|
||||||
|
j = jiffies;
|
||||||
|
rcu_stall_notifier_call_chain(RCU_STALL_NOTIFY_EXP, (void *)(j - jiffies_start));
|
||||||
trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
|
trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
|
||||||
pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
|
pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
|
||||||
rcu_state.name);
|
rcu_state.name);
|
||||||
|
@ -647,7 +651,7 @@ static void synchronize_rcu_expedited_wait(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
|
pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
|
||||||
jiffies - jiffies_start, rcu_state.expedited_sequence,
|
j - jiffies_start, rcu_state.expedited_sequence,
|
||||||
data_race(rnp_root->expmask),
|
data_race(rnp_root->expmask),
|
||||||
".T"[!!data_race(rnp_root->exp_tasks)]);
|
".T"[!!data_race(rnp_root->exp_tasks)]);
|
||||||
if (ndetected) {
|
if (ndetected) {
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/kvm_para.h>
|
#include <linux/kvm_para.h>
|
||||||
|
#include <linux/rcu_notifier.h>
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
//
|
//
|
||||||
|
@ -149,12 +150,17 @@ static void panic_on_rcu_stall(void)
|
||||||
/**
|
/**
|
||||||
* rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
|
* rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
|
||||||
*
|
*
|
||||||
|
* To perform the reset request from the caller, disable stall detection until
|
||||||
|
* 3 fqs loops have passed. This is required to ensure a fresh jiffies is
|
||||||
|
* loaded. It should be safe to do from the fqs loop as enough timer
|
||||||
|
* interrupts and context switches should have passed.
|
||||||
|
*
|
||||||
* The caller must disable hard irqs.
|
* The caller must disable hard irqs.
|
||||||
*/
|
*/
|
||||||
void rcu_cpu_stall_reset(void)
|
void rcu_cpu_stall_reset(void)
|
||||||
{
|
{
|
||||||
WRITE_ONCE(rcu_state.jiffies_stall,
|
WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 3);
|
||||||
jiffies + rcu_jiffies_till_stall_check());
|
WRITE_ONCE(rcu_state.jiffies_stall, ULONG_MAX);
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -170,6 +176,7 @@ static void record_gp_stall_check_time(void)
|
||||||
WRITE_ONCE(rcu_state.gp_start, j);
|
WRITE_ONCE(rcu_state.gp_start, j);
|
||||||
j1 = rcu_jiffies_till_stall_check();
|
j1 = rcu_jiffies_till_stall_check();
|
||||||
smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
|
smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
|
||||||
|
WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 0);
|
||||||
WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
|
WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
|
||||||
rcu_state.jiffies_resched = j + j1 / 2;
|
rcu_state.jiffies_resched = j + j1 / 2;
|
||||||
rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
|
rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
|
||||||
|
@ -534,16 +541,16 @@ static void rcu_check_gp_kthread_starvation(void)
|
||||||
data_race(READ_ONCE(rcu_state.gp_state)),
|
data_race(READ_ONCE(rcu_state.gp_state)),
|
||||||
gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu);
|
gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu);
|
||||||
if (gpk) {
|
if (gpk) {
|
||||||
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
|
|
||||||
pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
|
pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
|
||||||
pr_err("RCU grace-period kthread stack dump:\n");
|
pr_err("RCU grace-period kthread stack dump:\n");
|
||||||
sched_show_task(gpk);
|
sched_show_task(gpk);
|
||||||
if (cpu >= 0) {
|
if (cpu_is_offline(cpu)) {
|
||||||
if (cpu_is_offline(cpu)) {
|
pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu);
|
||||||
pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu);
|
} else if (!(data_race(READ_ONCE(rdp->mynode->qsmask)) & rdp->grpmask)) {
|
||||||
} else {
|
pr_err("Stack dump where RCU GP kthread last ran:\n");
|
||||||
pr_err("Stack dump where RCU GP kthread last ran:\n");
|
dump_cpu_task(cpu);
|
||||||
dump_cpu_task(cpu);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
wake_up_process(gpk);
|
wake_up_process(gpk);
|
||||||
}
|
}
|
||||||
|
@ -711,7 +718,7 @@ static void print_cpu_stall(unsigned long gps)
|
||||||
|
|
||||||
static void check_cpu_stall(struct rcu_data *rdp)
|
static void check_cpu_stall(struct rcu_data *rdp)
|
||||||
{
|
{
|
||||||
bool didstall = false;
|
bool self_detected;
|
||||||
unsigned long gs1;
|
unsigned long gs1;
|
||||||
unsigned long gs2;
|
unsigned long gs2;
|
||||||
unsigned long gps;
|
unsigned long gps;
|
||||||
|
@ -725,6 +732,16 @@ static void check_cpu_stall(struct rcu_data *rdp)
|
||||||
!rcu_gp_in_progress())
|
!rcu_gp_in_progress())
|
||||||
return;
|
return;
|
||||||
rcu_stall_kick_kthreads();
|
rcu_stall_kick_kthreads();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check if it was requested (via rcu_cpu_stall_reset()) that the FQS
|
||||||
|
* loop has to set jiffies to ensure a non-stale jiffies value. This
|
||||||
|
* is required to have good jiffies value after coming out of long
|
||||||
|
* breaks of jiffies updates. Not doing so can cause false positives.
|
||||||
|
*/
|
||||||
|
if (READ_ONCE(rcu_state.nr_fqs_jiffies_stall) > 0)
|
||||||
|
return;
|
||||||
|
|
||||||
j = jiffies;
|
j = jiffies;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -758,10 +775,10 @@ static void check_cpu_stall(struct rcu_data *rdp)
|
||||||
return; /* No stall or GP completed since entering function. */
|
return; /* No stall or GP completed since entering function. */
|
||||||
rnp = rdp->mynode;
|
rnp = rdp->mynode;
|
||||||
jn = jiffies + ULONG_MAX / 2;
|
jn = jiffies + ULONG_MAX / 2;
|
||||||
|
self_detected = READ_ONCE(rnp->qsmask) & rdp->grpmask;
|
||||||
if (rcu_gp_in_progress() &&
|
if (rcu_gp_in_progress() &&
|
||||||
(READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
|
(self_detected || ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) &&
|
||||||
cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
|
cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If a virtual machine is stopped by the host it can look to
|
* If a virtual machine is stopped by the host it can look to
|
||||||
* the watchdog like an RCU stall. Check to see if the host
|
* the watchdog like an RCU stall. Check to see if the host
|
||||||
|
@ -770,39 +787,28 @@ static void check_cpu_stall(struct rcu_data *rdp)
|
||||||
if (kvm_check_and_clear_guest_paused())
|
if (kvm_check_and_clear_guest_paused())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* We haven't checked in, so go dump stack. */
|
rcu_stall_notifier_call_chain(RCU_STALL_NOTIFY_NORM, (void *)j - gps);
|
||||||
print_cpu_stall(gps);
|
if (self_detected) {
|
||||||
|
/* We haven't checked in, so go dump stack. */
|
||||||
|
print_cpu_stall(gps);
|
||||||
|
} else {
|
||||||
|
/* They had a few time units to dump stack, so complain. */
|
||||||
|
print_other_cpu_stall(gs2, gps);
|
||||||
|
}
|
||||||
|
|
||||||
if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
|
if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
|
||||||
rcu_ftrace_dump(DUMP_ALL);
|
rcu_ftrace_dump(DUMP_ALL);
|
||||||
didstall = true;
|
|
||||||
|
|
||||||
} else if (rcu_gp_in_progress() &&
|
if (READ_ONCE(rcu_state.jiffies_stall) == jn) {
|
||||||
ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
|
jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
|
||||||
cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
|
WRITE_ONCE(rcu_state.jiffies_stall, jn);
|
||||||
|
}
|
||||||
/*
|
|
||||||
* If a virtual machine is stopped by the host it can look to
|
|
||||||
* the watchdog like an RCU stall. Check to see if the host
|
|
||||||
* stopped the vm.
|
|
||||||
*/
|
|
||||||
if (kvm_check_and_clear_guest_paused())
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* They had a few time units to dump stack, so complain. */
|
|
||||||
print_other_cpu_stall(gs2, gps);
|
|
||||||
if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
|
|
||||||
rcu_ftrace_dump(DUMP_ALL);
|
|
||||||
didstall = true;
|
|
||||||
}
|
|
||||||
if (didstall && READ_ONCE(rcu_state.jiffies_stall) == jn) {
|
|
||||||
jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
|
|
||||||
WRITE_ONCE(rcu_state.jiffies_stall, jn);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
//
|
//
|
||||||
// RCU forward-progress mechanisms, including of callback invocation.
|
// RCU forward-progress mechanisms, including for callback invocation.
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1054,3 +1060,58 @@ static int __init rcu_sysrq_init(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
early_initcall(rcu_sysrq_init);
|
early_initcall(rcu_sysrq_init);
|
||||||
|
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// RCU CPU stall-warning notifiers
|
||||||
|
|
||||||
|
static ATOMIC_NOTIFIER_HEAD(rcu_cpu_stall_notifier_list);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rcu_stall_chain_notifier_register - Add an RCU CPU stall notifier
|
||||||
|
* @n: Entry to add.
|
||||||
|
*
|
||||||
|
* Adds an RCU CPU stall notifier to an atomic notifier chain.
|
||||||
|
* The @action passed to a notifier will be @RCU_STALL_NOTIFY_NORM or
|
||||||
|
* friends. The @data will be the duration of the stalled grace period,
|
||||||
|
* in jiffies, coerced to a void* pointer.
|
||||||
|
*
|
||||||
|
* Returns 0 on success, %-EEXIST on error.
|
||||||
|
*/
|
||||||
|
int rcu_stall_chain_notifier_register(struct notifier_block *n)
|
||||||
|
{
|
||||||
|
return atomic_notifier_chain_register(&rcu_cpu_stall_notifier_list, n);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(rcu_stall_chain_notifier_register);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rcu_stall_chain_notifier_unregister - Remove an RCU CPU stall notifier
|
||||||
|
* @n: Entry to add.
|
||||||
|
*
|
||||||
|
* Removes an RCU CPU stall notifier from an atomic notifier chain.
|
||||||
|
*
|
||||||
|
* Returns zero on success, %-ENOENT on failure.
|
||||||
|
*/
|
||||||
|
int rcu_stall_chain_notifier_unregister(struct notifier_block *n)
|
||||||
|
{
|
||||||
|
return atomic_notifier_chain_unregister(&rcu_cpu_stall_notifier_list, n);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(rcu_stall_chain_notifier_unregister);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* rcu_stall_notifier_call_chain - Call functions in an RCU CPU stall notifier chain
|
||||||
|
* @val: Value passed unmodified to notifier function
|
||||||
|
* @v: Pointer passed unmodified to notifier function
|
||||||
|
*
|
||||||
|
* Calls each function in the RCU CPU stall notifier chain in turn, which
|
||||||
|
* is an atomic call chain. See atomic_notifier_call_chain() for more
|
||||||
|
* information.
|
||||||
|
*
|
||||||
|
* This is for use within RCU, hence the omission of the extra asterisk
|
||||||
|
* to indicate a non-kerneldoc format header comment.
|
||||||
|
*/
|
||||||
|
int rcu_stall_notifier_call_chain(unsigned long val, void *v)
|
||||||
|
{
|
||||||
|
return atomic_notifier_call_chain(&rcu_cpu_stall_notifier_list, val, v);
|
||||||
|
}
|
||||||
|
|
|
@ -528,26 +528,6 @@ bool slab_is_available(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PRINTK
|
#ifdef CONFIG_PRINTK
|
||||||
/**
|
|
||||||
* kmem_valid_obj - does the pointer reference a valid slab object?
|
|
||||||
* @object: pointer to query.
|
|
||||||
*
|
|
||||||
* Return: %true if the pointer is to a not-yet-freed object from
|
|
||||||
* kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
|
|
||||||
* is to an already-freed object, and %false otherwise.
|
|
||||||
*/
|
|
||||||
bool kmem_valid_obj(void *object)
|
|
||||||
{
|
|
||||||
struct folio *folio;
|
|
||||||
|
|
||||||
/* Some arches consider ZERO_SIZE_PTR to be a valid address. */
|
|
||||||
if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
|
|
||||||
return false;
|
|
||||||
folio = virt_to_folio(object);
|
|
||||||
return folio_test_slab(folio);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(kmem_valid_obj);
|
|
||||||
|
|
||||||
static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
|
static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
|
||||||
{
|
{
|
||||||
if (__kfence_obj_info(kpp, object, slab))
|
if (__kfence_obj_info(kpp, object, slab))
|
||||||
|
@ -566,11 +546,11 @@ static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *
|
||||||
* and, if available, the slab name, return address, and stack trace from
|
* and, if available, the slab name, return address, and stack trace from
|
||||||
* the allocation and last free path of that object.
|
* the allocation and last free path of that object.
|
||||||
*
|
*
|
||||||
* This function will splat if passed a pointer to a non-slab object.
|
* Return: %true if the pointer is to a not-yet-freed object from
|
||||||
* If you are not sure what type of object you have, you should instead
|
* kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
|
||||||
* use mem_dump_obj().
|
* is to an already-freed object, and %false otherwise.
|
||||||
*/
|
*/
|
||||||
void kmem_dump_obj(void *object)
|
bool kmem_dump_obj(void *object)
|
||||||
{
|
{
|
||||||
char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
|
char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
|
||||||
int i;
|
int i;
|
||||||
|
@ -578,13 +558,13 @@ void kmem_dump_obj(void *object)
|
||||||
unsigned long ptroffset;
|
unsigned long ptroffset;
|
||||||
struct kmem_obj_info kp = { };
|
struct kmem_obj_info kp = { };
|
||||||
|
|
||||||
if (WARN_ON_ONCE(!virt_addr_valid(object)))
|
/* Some arches consider ZERO_SIZE_PTR to be a valid address. */
|
||||||
return;
|
if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
|
||||||
|
return false;
|
||||||
slab = virt_to_slab(object);
|
slab = virt_to_slab(object);
|
||||||
if (WARN_ON_ONCE(!slab)) {
|
if (!slab)
|
||||||
pr_cont(" non-slab memory.\n");
|
return false;
|
||||||
return;
|
|
||||||
}
|
|
||||||
kmem_obj_info(&kp, object, slab);
|
kmem_obj_info(&kp, object, slab);
|
||||||
if (kp.kp_slab_cache)
|
if (kp.kp_slab_cache)
|
||||||
pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
|
pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
|
||||||
|
@ -621,6 +601,7 @@ void kmem_dump_obj(void *object)
|
||||||
pr_info(" %pS\n", kp.kp_free_stack[i]);
|
pr_info(" %pS\n", kp.kp_free_stack[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kmem_dump_obj);
|
EXPORT_SYMBOL_GPL(kmem_dump_obj);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1060,10 +1060,8 @@ void mem_dump_obj(void *object)
|
||||||
{
|
{
|
||||||
const char *type;
|
const char *type;
|
||||||
|
|
||||||
if (kmem_valid_obj(object)) {
|
if (kmem_dump_obj(object))
|
||||||
kmem_dump_obj(object);
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
if (vmalloc_dump_obj(object))
|
if (vmalloc_dump_obj(object))
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -6427,15 +6427,6 @@ sub process {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# check for soon-to-be-deprecated single-argument k[v]free_rcu() API
|
|
||||||
if ($line =~ /\bk[v]?free_rcu\s*\([^(]+\)/) {
|
|
||||||
if ($line =~ /\bk[v]?free_rcu\s*\([^,]+\)/) {
|
|
||||||
ERROR("DEPRECATED_API",
|
|
||||||
"Single-argument k[v]free_rcu() API is deprecated, please pass rcu_head object or call k[v]free_rcu_mightsleep()." . $herecurr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
# check for unnecessary "Out of Memory" messages
|
# check for unnecessary "Out of Memory" messages
|
||||||
if ($line =~ /^\+.*\b$logFunctions\s*\(/ &&
|
if ($line =~ /^\+.*\b$logFunctions\s*\(/ &&
|
||||||
$prevline =~ /^[ \+]\s*if\s*\(\s*(\!\s*|NULL\s*==\s*)?($Lval)(\s*==\s*NULL\s*)?\s*\)/ &&
|
$prevline =~ /^[ \+]\s*if\s*\(\s*(\!\s*|NULL\s*==\s*)?($Lval)(\s*==\s*NULL\s*)?\s*\)/ &&
|
||||||
|
|