linux-imx/kernel/softirq.c
Greg Kroah-Hartman 3b4c9871b1 Merge branch 'android15-6.6' into branch 'android15-6.6-lts'
This catches the android15-6.6-lts branch up with a lot of abi additions
and other changes in the android15-6.6 branch.  Included in here are the
following commits:

* 41bf0d8226 ANDROID: Use -fomit-frame-pointer for x86_ptrace_syscall_x86_64
* b7c3d209b4 ANDROID: GKI: Swap Allocator ABI Fixup
* 7cbe80f0b6 FROMLIST: BACKPORT: mm: swap: mTHP allocate swap entries from nonfull list
* 96836d4fd5 FROMLIST: BACKPORT: mm: swap: swap cluster switch to double link list
* 2b24dd9d53 Revert "FROMLIST: BACKPORT: mm: swap: swap cluster switch to double link list"
* 887a20b67d Revert "FROMLIST: BACKPORT: mm: swap: mTHP allocate swap entries from nonfull list"
* d24fe223ea Revert "ANDROID: ABI: mm: swap: reserve cluster according to mount option."
* 0cd41fbf53 UPSTREAM: erofs: fix uninitialized page cache reported by KMSAN
* 7341712cac ANDROID: oplus: Update the ABI xml and symbol list
* ffb163667c ANDROID: vendor_hooks: Add hook for fsnotify
* 59717f94ca ANDROID: ABI: update galaxy symbol list
* 4970677225 ANDROID: net: export symbol for tracepoint_consume_skb
* f50d04854c ANDROID: ABI: update symbol list for galaxy
* 89d387d916 ANDROID: memcg: add vendor hook to use vm_swappiness
* cebdeae238 ANDROID: ABI: update symbol list for galaxy
* 371517c5d0 ANDROID: mm: add vendor hook to tune warn_alloc
* 830cfc7a96 ANDROID: ABI: update symbol list for galaxy
* 3a6f635139 ANDROID: mm: add vendor hook in alloc_contig_range()
* 5752526073 ANDROID: ABI: update symbol list for galaxy
* 969cecdd1e ANDROID: mm: export tracepoint vm_unmapped_area
* ac1de79449 ANDROID: gfp: add __GFP_CMA in __def_gfpflag_names
* b983c75e38 ANDROID: ABI: update symbol list for galaxy
* c6ba1fd41e ANDROID: cma: add vendor hook for cma_debug_show_areas
* 2ca27754f0 ANDROID: ABI: update symbol list for galaxy
* 0bd33fde18 ANDROID: mm: add vendor hook for __alloc_pages_slowpath()
* 35cf4d8eab ANDROID: ABI: Update pixel symbol list
* 46f2aacf91 ANDROID: ABI: Update symbol list for Qcom
* cd83afb4f2 ANDROID: ABI: Update pixel symbol list
* 0935fda826 ANDROID: Export raise_softirq
* 4816d3f14a FROMGIT: media: amphion: Report the average QP of current encoded frame
* a9cf95c8ef FROMGIT: media: amphion: Remove lock in s_ctrl callback
* fadc0ed15c FROMGIT: media: v4l2-ctrls: Add average QP control
* 075954d0d8 ANDROID: GKI: Update qcom symbol list
* 0fc8eacf64 ANDROID: KVM: arm64: Fix state masking in guest_request_walker()
* a8759911ea ANDROID: ABI: Update symbol list for galaxy
* 0a53bc59a7 ANDROID: abi_gki_aarch64_qcom: Add configfs abi symbol
* 6b40f68ef9 ANDROID: Update symbol list for mtk
* 9ca99e6500 ANDROID: GKI: Update symbol list for Amlogic
* 9443f1fa14 ANDROID: GKI: update symbol list for unisoc
* 48509bef2a UPSTREAM: drm/drm_file: Fix pid refcounting race
* 8fd0ab9532 ANDROID: ABI: Update symbol list for galaxy
* 43b4adb9c9 ANDROID: mm: add vendor hook in vmscan.c
* dd071677cb ANDROID: GKI: Update symbol list for vivo
* c4db168998 ANDROID: GKI: net: add vendor hooks net qos for gki purpose
* 359c132bee ANDROID: GKI: update symbol list file for xiaomi
* 84dfc40591 ANDROID: abi_gki_aarch64_qcom: Add Tegra Symbols
* 8c8ee78fde ANDROID: ABI: Update symbol list for galaxy
* 16fd47ba32 ANDROID: GKI: Update QCOM symbol list
* b0acaed287 ANDROID: arm64: Allow non granule aligned MMIO guard requests
* bc028c905a ANDROID: gunyah: Add support for tracking Guest VM run time
* 2182ee6395 ANDROID: GKI: update xiaomi symbol list
* 885dc76e19 ANDROID: KVM: arm64: Fix pKVM mod hyp events lookup
* fcbb7a1d21 ANDROID: GKI: Add initialization for mutex oem_data.
* 1efd61a1c0 ANDROID: GKI: Update symbol list for vivo
* d2a7ba068d ANDROID: vendor_hooks: add vendor hooks for readahead bio
* 57bfc45d46 ANDROID: ABI: Update symbol list for galaxy
* 6589977ca5 ANDROID: mm: add vendor hooks in psi.c
* 14d4f8f785 ANDROID: ABI: Update symbol list for galaxy
* 9db4e9899b ANDROID: mm: add vendor hooks in madvise for swap entry
* 970642eb2d ANDROID: ABI: Update symbol list for galaxy
* 66c7ba200c ANDROID: dma-buf: add dma_heap_try_get_pool_size_kb for vendor hook
* d4474bddf8 ANDROID: KVM: arm64: Consolidate allowed and restricted guest cpu feature checks
* c7b8a41d0f ANDROID: android: Add symbols to debug_symbols driver
* 674cbcb7a2 ANDROID: abi_gki_aarch64_vivo: Update symbol list
* b0807745d4 ANDROID: mm: add vendor hooks to adjust memory reclamation
* 6d955b09ac ANDROID: GKI: Add symbol to symbol list for vivo.
* e41b8e8e59 ANDROID: vendor_hooks: add hooks in prctl_set_vma
* fb3f403773 UPSTREAM: usb: dwc3: core: Workaround for CSR read timeout
* c867ece908 UPSTREAM: f2fs: fix to force buffered IO on     inline_data inode
* 3efb7c2d2a ANDROID: GKI: Add symbol to symbol list for imx
* e24990b254 UPSTREAM: net: usb: ax88179_178a: improve reset check
* 9f53a5ac99 UPSTREAM: net: usb: ax88179_178a: fix link status when link is set to down/up
* 8a43f59a49 ANDROID: Reapply: "net: usb: ax88179_178a: avoid writing the mac address before first reading"
* 2916880780 ANDROID: f2fs: enable cleancache
* f9df46617d ANDROID: KVM: Update nVHE stack size to 8KB
* 6e716f19dc UPSTREAM: arm64: Add USER_STACKTRACE support
* aaca6b10f1 ANDROID: GKI: Add initialization for rwsem's oem_data and vendor_data.
* 1036ce8d67 ANDROID: GKI: Update symbols to symbol list for honor
* bb4dd28c18 ANDROID: Update the ABI representation
* da5b43867d BACKPORT: FROMLIST: dm-verity: improve performance by using multibuffer hashing
* 6c33cbb433 BACKPORT: FROMLIST: dm-verity: reduce scope of real and wanted digests
* 3503ed6feb FROMLIST: dm-verity: hash blocks with shash import+finup when possible
* 3ed9f23932 BACKPORT: FROMLIST: dm-verity: make verity_hash() take dm_verity_io instead of ahash_request
* 33bfa57441 BACKPORT: FROMLIST: dm-verity: always "map" the data blocks
* 901b6a1577 FROMLIST: dm-verity: provide dma_alignment limit in io_hints
* a936860934 FROMLIST: dm-verity: make real_digest and want_digest fixed-length
* 7958bb4e87 BACKPORT: FROMLIST: dm-verity: move data hash mismatch handling into its own function
* 76fed9f013 BACKPORT: FROMLIST: dm-verity: move hash algorithm setup into its own function
* abed1a5d36 FROMLIST: fsverity: improve performance by using multibuffer hashing
* 08600b5d0c FROMLIST: crypto: arm64/sha256-ce - add support for finup_mb
* 16e22de481 FROMLIST: crypto: x86/sha256-ni - add support for finup_mb
* a2372f602d FROMLIST: crypto: testmgr - add tests for finup_mb
* 17f53e8a94 FROMLIST: crypto: testmgr - generate power-of-2 lengths more often
* 614beb21b3 BACKPORT: FROMLIST: crypto: shash - add support for finup_mb
* 9c58b7c147 UPSTREAM: fsverity: remove hash page spin lock
* 627ec822c1 UPSTREAM: crypto: arm64/sha2-ce - clean up backwards function names
* b6284a7064 UPSTREAM: crypto: arm64/sha2-ce - implement ->digest for sha256
* 1725496fe7 UPSTREAM: crypto: x86/sha256 - implement ->digest for sha256
* 2414c5e05b UPSTREAM: erofs: ensure m_llen is reset to 0 if metadata is invalid
* 2a09862b0d ANDROID: Add thermal headers to aarch64 allowlist
* 1bf09fb4fb ANDROID: ABI: Update pixel symbol list
* 4d6aca029c ANDROID: GKI: Update lenovo symbol list
* 0347be8c14 ANDROID: rust: disable floating point target features
* 96ba096630 ANDROID: ABI: Update oplus symbol list
* 84e4882c2a UPSTREAM: mm/vmalloc: fix vmalloc which may return null if called with __GFP_NOFAIL
* 5a875d7051 FROMGIT: KVM: arm64: nVHE: Support CONFIG_CFI_CLANG at EL2
* f3cc12e6b9 FROMGIT: KVM: arm64: Introduce print_nvhe_hyp_panic helper
* ad4668a0b4 FROMGIT: arm64: Introduce esr_brk_comment, esr_is_cfi_brk
* 546ea288d0 FROMGIT: KVM: arm64: VHE: Mark __hyp_call_panic __noreturn
* 57d9ce55ce FROMGIT: KVM: arm64: nVHE: gen-hyprel: Skip R_AARCH64_ABS32
* 406d5af44a FROMGIT: KVM: arm64: nVHE: Simplify invalid_host_el2_vect
* 9dd9c0ecc8 FROMGIT: KVM: arm64: Fix __pkvm_init_switch_pgd call ABI
* 16302047f0 FROMGIT: KVM: arm64: Fix clobbered ELR in sync abort/SError
* 2fe138183d ANDROID: KVM: Reduce upstream diff for kaslr_off
* 30068fa327 Revert "FROMLIST: KVM: arm64: Fix clobbered ELR in sync abort/SError"
* 0bbdca2ec5 Revert "FROMLIST: KVM: arm64: Fix __pkvm_init_switch_pgd C signature"
* 16b7f3f996 Revert "FROMLIST: KVM: arm64: Pass pointer to __pkvm_init_switch_pgd"
* 386f51645f Revert "FROMLIST: KVM: arm64: nVHE: Remove __guest_exit_panic path"
* 5f4a702e41 Revert "FROMLIST: KVM: arm64: nVHE: Add EL2h sync exception handler"
* ff1e4507cd Revert "FROMLIST: KVM: arm64: nVHE: gen-hyprel: Skip R_AARCH64_ABS32"
* fe72c7b6c5 Revert "FROMLIST: KVM: arm64: VHE: Mark __hyp_call_panic __noreturn"
* b6e7c9eb19 Revert "FROMLIST: arm64: Move esr_comment() to <asm/esr.h>"
* 5456aa91d4 Revert "BACKPORT: FROMLIST: KVM: arm64: nVHE: Support CONFIG_CFI..."
* c876dae46a ANDROID: ABI: Update pixel symbol list
* a8f26ab36d ANDROID: ABI: Update kvm_hyp_iommu ABI
* 740a179b42 ANDROID: KVM: arm64: deduplicate kvm_hyp_iommu
* 691810c3b9 ANDROID: Makefile: Fail the build if RUST and CFI are both enabled
* 437e699ef9 ANDROID: KVM: arm64: Fix psci_mem_protect_dec() on VM reclaim
* 013c5ddc64 ANDROID: rust_binder: fix leak of name in binderfs
* 0dcde40390 Revert "ANDROID: scsi: ufs: Add hook to influence the UFS clock scaling policy"
* c573b85983 Revert "ANDROID: sched: Add vendor hook for update_load_sum"
* 22f0a58277 Revert "ANDROID: PM / Domains: add vendor_hook to disallow domain idle state"
* 808331120f ANDROID: ABI: Export kvm_iommu_flush_unmap_cache
* 21f5282377 ANDROID: KVM: arm64: iommu: Allow driver to flush cached refcount
* 65ea117650 ANDROID: KVM: arm64: iommu: Fix map_pages() error path
* dbc350cb13 ANDROID: GKI: Update lenovo symbol list
* 1cefa59a20 Revert "ANDROID: vendor_hooks: Add hook for mmc queue"
* 48f130c18f Revert "ANDROID: GKI: net: add vendor hooks for 'struct sock' lifecycle"
* 142c5838f4 FROMLIST: binder_alloc: Replace kcalloc with kvcalloc to mitigate OOM issues
* 9dc982c238 ANDROID: Update the ABI symbol list: set_normalized_timespec64
* 6f22fc659b ANDROID: GKI: Update qcom symbol list
* eb1f7db04a ANDROID: fix kernelci GCC builds of fips140.ko
* d4103f937a ANDROID: GKI: add a parameter to vh_blk_fill_rwbs
* e763f6a5a9 UPSTREAM: sched/fair: Use all little CPUs for CPU-bound workloads
* 2b640be5df ANDROID: GKI: update symbol list for honor
* 0e8d838f3d FROMLIST: locking/rwsem: Add __always_inline annotation to __down_write_common() and inlined callers
* fea3a332a9 ANDROID: ABI: Update pixel symbol list
* 0db446aae1 ANDROID: abi_gki_aarch64_qcom: Add v4l2 abi symbol
* d2da2d32f6 ANDROID: ABI: Update QCOM symbol list
* 145f51aca0 ANDROID: fips140: remove unnecessary no_sanitize(cfi)
* 45688919de ANDROID: GKI: Add whitelist related to runtime energy model
* 79591ebabf ANDROID: sched/psi: disable the privilege check if CONFIG_DEFAULT_SECURITY_SELINUX is enabled
* 5f59226f87 ANDROID: Update the ABI symbol list
* ba91ea859e ANDROID: scheduler: add vendor-specific wake flag
* df27fe0be2 ANDROID: Update the ABI symbol list
* aca2287a01 FROMGIT: erofs: fix possible memory leak in z_erofs_gbuf_exit()
* 412548f4e6 BACKPORT: erofs: add a reserved buffer pool for lz4 decompression
* 43b3f34c6b BACKPORT: erofs: do not use pagepool in z_erofs_gbuf_growsize()
* 5084a99bb6 BACKPORT: erofs: rename per-CPU buffers to global buffer pool and make it configurable
* c69d9ecaf3 BACKPORT: erofs: rename utils.c to zutil.c
* 2d68f6d5bb BACKPORT: erofs: relaxed temporary buffers allocation on readahead
* 85f00ea4c7 ANDROID: Limit vfs-only namespace to GKI builds
* 4b9c4f5f50 ANDROID: ABI: Update symbol list for OPLUS
* 2435f3246b FROMGIT: usb: dwc3: core: remove lock of otg mode during gadget suspend/resume to avoid deadlock
* 9cb7fd9a3d ANDROID: GKI: Update symbol list for Amlogic
* d14189b69e FROMGIT: f2fs: clean up set REQ_RAHEAD given rac
* 2f4e6b1def ANDROID: Disable warning about new bindgen
* a10b25b5a4 ANDROID: ABI: Update symbol list for Exynos SoC
* 3396c2131d ANDROID: rust: use target.json for aarch64
* 1656e8d99d ANDROID: rust: rustfmt scripts/generate_rust_target.rs
* 8d2c337716 ANDROID: GKI: update symbol list for lenovo
* 791cea9469 ANDROID: GKI: add a vendor hook in cpufreq_online
* ddf8bd0861 ANDROID: abi_gki_aarch64_qcom: update abi symbols
* 44045194d3 ANDROID: Revert^3 "ANDROID: Enable Rust Binder Module"
* 512a729ce8 ANDROID: ABI: Update pixel symbol list
* 324b653e2a ANDROID: scsi: ufs: add complete init vendor hook
* 0c09faf922 ANDROID: scsi: ufs: add vendor hook to override key reprogramming
* b5f875e6b5 ANDROID: GKI: Update Honor abi symbol list
* 6137bb28d6 ANDROID: GKI: Add hooks for sk_alloc.
* 27547c6a80 ANDROID: GKI: Update lenovo symbol list
* 0383c45728 ANDROID: GKI: Export css_task_iter_start()
* d8755d1258 ANDROID: thermal: Fix cases for vendor hook function not accounted correctly
* 23f02fa409 ANDROID: GKI: Update symbol list for xiaomi
* d2b35f36dc ANDROID: vendor_hooks: export cgroup_threadgroup_rwsem
* 95c7d8e95a FROMLIST: mm: fix incorrect vbq reference in purge_fragmented_block
* 2886675699 ANDROID: GKI: Update symbol list for vivo
* c996e1044e ANDROID: GKI: Modify the RWBS_LEN for blk_fill_rwbs
* 0dd775b383 ANDROID: GKI: add vendor hooks for blk_fill_rwbs
* 32721ad08c ANDROID: GKI: Update symbol list for Amlogic
* 05a1f39385 ANDROID: mm: allow hooks into free_pages_prepare()
* 379c8853b2 ANDROID: mm: allow hooks into __alloc_pages()
* 6d28431b7f FROMLIST: selftests/vDSO: fix clang build errors and warnings
* 069482893b FROMLIST: selftests/timers: Guard LONG_MAX / LONG_MIN defines
* 4141052d20 ANDROID: GKI: Update symbol list for vivo
* cdb09f7ea3 ANDROID: vendor hooks: add vendor hooks for do_new_mount
* 5bfee09a96 ANDROID: GKI: Export tracepoint tcp_retransmit_skb

Change-Id: Ic961ad4f6bd1536fcd025f2b4a95614166a2bc4a
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2024-07-18 11:16:27 +00:00

1051 lines
25 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/kernel/softirq.c
*
* Copyright (C) 1992 Linus Torvalds
*
* Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/export.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/local_lock.h>
#include <linux/mm.h>
#include <linux/notifier.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/rcupdate.h>
#include <linux/ftrace.h>
#include <linux/smp.h>
#include <linux/smpboot.h>
#include <linux/tick.h>
#include <linux/irq.h>
#include <linux/wait_bit.h>
#include <asm/softirq_stack.h>
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(softirq_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(softirq_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(tasklet_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(tasklet_exit);
/*
- No shared variables, all the data are CPU local.
- If a softirq needs serialization, let it serialize itself
by its own spinlocks.
- Even if softirq is serialized, only local cpu is marked for
execution. Hence, we get something sort of weak cpu binding.
Though it is still not clear, will it result in better locality
or will not.
Examples:
- NET RX softirq. It is multithreaded and does not require
any global serialization.
- NET TX softirq. It kicks software netdevice queues, hence
it is logically serialized per device, but this serialization
is invisible to common code.
- Tasklets: serialized wrt itself.
*/
#ifndef __ARCH_IRQ_STAT
DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);
#endif
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
EXPORT_PER_CPU_SYMBOL_GPL(ksoftirqd);
#ifdef CONFIG_RT_SOFTIRQ_AWARE_SCHED
/*
* active_softirqs -- per cpu, a mask of softirqs that are being handled,
* with the expectation that approximate answers are acceptable and therefore
* no synchronization.
*/
DEFINE_PER_CPU(u32, active_softirqs);
static inline void set_active_softirqs(u32 pending)
{
__this_cpu_write(active_softirqs, pending);
}
#else /* CONFIG_RT_SOFTIRQ_AWARE_SCHED */
static inline void set_active_softirqs(u32 pending) {};
#endif /* CONFIG_RT_SOFTIRQ_AWARE_SCHED */
const char * const softirq_to_name[NR_SOFTIRQS] = {
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
"TASKLET", "SCHED", "HRTIMER", "RCU"
};
/*
* we cannot loop indefinitely here to avoid userspace starvation,
* but we also don't want to introduce a worst case 1/HZ latency
* to the pending events, so lets the scheduler to balance
* the softirq load for us.
*/
static void wakeup_softirqd(void)
{
/* Interrupts are disabled: no need to stop preemption */
struct task_struct *tsk = __this_cpu_read(ksoftirqd);
if (tsk)
wake_up_process(tsk);
}
#ifdef CONFIG_TRACE_IRQFLAGS
DEFINE_PER_CPU(int, hardirqs_enabled);
DEFINE_PER_CPU(int, hardirq_context);
EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
#endif
/*
* SOFTIRQ_OFFSET usage:
*
* On !RT kernels 'count' is the preempt counter, on RT kernels this applies
* to a per CPU counter and to task::softirqs_disabled_cnt.
*
* - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
* processing.
*
* - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
* on local_bh_disable or local_bh_enable.
*
* This lets us distinguish between whether we are currently processing
* softirq and whether we just have bh disabled.
*/
#ifdef CONFIG_PREEMPT_RT
/*
* RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
* also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
* softirq disabled section to be preempted.
*
* The per task counter is used for softirq_count(), in_softirq() and
* in_serving_softirqs() because these counts are only valid when the task
* holding softirq_ctrl::lock is running.
*
* The per CPU counter prevents pointless wakeups of ksoftirqd in case that
* the task which is in a softirq disabled section is preempted or blocks.
*/
struct softirq_ctrl {
local_lock_t lock;
int cnt;
};
static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
.lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
};
/**
* local_bh_blocked() - Check for idle whether BH processing is blocked
*
* Returns false if the per CPU softirq::cnt is 0 otherwise true.
*
* This is invoked from the idle task to guard against false positive
* softirq pending warnings, which would happen when the task which holds
* softirq_ctrl::lock was the only running task on the CPU and blocks on
* some other lock.
*/
bool local_bh_blocked(void)
{
return __this_cpu_read(softirq_ctrl.cnt) != 0;
}
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{
unsigned long flags;
int newcnt;
WARN_ON_ONCE(in_hardirq());
/* First entry of a task into a BH disabled section? */
if (!current->softirq_disable_cnt) {
if (preemptible()) {
local_lock(&softirq_ctrl.lock);
/* Required to meet the RCU bottomhalf requirements. */
rcu_read_lock();
} else {
DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
}
}
/*
* Track the per CPU softirq disabled state. On RT this is per CPU
* state to allow preemption of bottom half disabled sections.
*/
newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
/*
* Reflect the result in the task state to prevent recursion on the
* local lock and to make softirq_count() & al work.
*/
current->softirq_disable_cnt = newcnt;
if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
raw_local_irq_save(flags);
lockdep_softirqs_off(ip);
raw_local_irq_restore(flags);
}
}
EXPORT_SYMBOL(__local_bh_disable_ip);
static void __local_bh_enable(unsigned int cnt, bool unlock)
{
unsigned long flags;
int newcnt;
DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
this_cpu_read(softirq_ctrl.cnt));
if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
raw_local_irq_save(flags);
lockdep_softirqs_on(_RET_IP_);
raw_local_irq_restore(flags);
}
newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
current->softirq_disable_cnt = newcnt;
if (!newcnt && unlock) {
rcu_read_unlock();
local_unlock(&softirq_ctrl.lock);
}
}
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
{
bool preempt_on = preemptible();
unsigned long flags;
u32 pending;
int curcnt;
WARN_ON_ONCE(in_hardirq());
lockdep_assert_irqs_enabled();
local_irq_save(flags);
curcnt = __this_cpu_read(softirq_ctrl.cnt);
/*
* If this is not reenabling soft interrupts, no point in trying to
* run pending ones.
*/
if (curcnt != cnt)
goto out;
pending = local_softirq_pending();
if (!pending)
goto out;
/*
* If this was called from non preemptible context, wake up the
* softirq daemon.
*/
if (!preempt_on) {
wakeup_softirqd();
goto out;
}
/*
* Adjust softirq count to SOFTIRQ_OFFSET which makes
* in_serving_softirq() become true.
*/
cnt = SOFTIRQ_OFFSET;
__local_bh_enable(cnt, false);
__do_softirq();
out:
__local_bh_enable(cnt, preempt_on);
local_irq_restore(flags);
}
EXPORT_SYMBOL(__local_bh_enable_ip);
/*
* Invoked from ksoftirqd_run() outside of the interrupt disabled section
* to acquire the per CPU local lock for reentrancy protection.
*/
static inline void ksoftirqd_run_begin(void)
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
local_irq_disable();
}
/* Counterpart to ksoftirqd_run_begin() */
static inline void ksoftirqd_run_end(void)
{
__local_bh_enable(SOFTIRQ_OFFSET, true);
WARN_ON_ONCE(in_interrupt());
local_irq_enable();
}
static inline void softirq_handle_begin(void) { }
static inline void softirq_handle_end(void) { }
static inline bool should_wake_ksoftirqd(void)
{
return !this_cpu_read(softirq_ctrl.cnt);
}
static inline void invoke_softirq(void)
{
if (should_wake_ksoftirqd())
wakeup_softirqd();
}
/*
* flush_smp_call_function_queue() can raise a soft interrupt in a function
* call. On RT kernels this is undesired and the only known functionality
* in the block layer which does this is disabled on RT. If soft interrupts
* get raised which haven't been raised before the flush, warn so it can be
* investigated.
*/
void do_softirq_post_smp_call_flush(unsigned int was_pending)
{
if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
invoke_softirq();
}
#else /* CONFIG_PREEMPT_RT */
/*
* This one is for softirq.c-internal use, where hardirqs are disabled
* legitimately:
*/
#ifdef CONFIG_TRACE_IRQFLAGS
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{
unsigned long flags;
WARN_ON_ONCE(in_hardirq());
raw_local_irq_save(flags);
/*
* The preempt tracer hooks into preempt_count_add and will break
* lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
* is set and before current->softirq_enabled is cleared.
* We must manually increment preempt_count here and manually
* call the trace_preempt_off later.
*/
__preempt_count_add(cnt);
/*
* Were softirqs turned off above:
*/
if (softirq_count() == (cnt & SOFTIRQ_MASK))
lockdep_softirqs_off(ip);
raw_local_irq_restore(flags);
if (preempt_count() == cnt) {
#ifdef CONFIG_DEBUG_PREEMPT
current->preempt_disable_ip = get_lock_parent_ip();
#endif
trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
}
}
EXPORT_SYMBOL(__local_bh_disable_ip);
#endif /* CONFIG_TRACE_IRQFLAGS */
static void __local_bh_enable(unsigned int cnt)
{
lockdep_assert_irqs_disabled();
if (preempt_count() == cnt)
trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
if (softirq_count() == (cnt & SOFTIRQ_MASK))
lockdep_softirqs_on(_RET_IP_);
__preempt_count_sub(cnt);
}
/*
* Special-case - softirqs can safely be enabled by __do_softirq(),
* without processing still-pending softirqs:
*/
void _local_bh_enable(void)
{
WARN_ON_ONCE(in_hardirq());
__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
}
EXPORT_SYMBOL(_local_bh_enable);
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
{
WARN_ON_ONCE(in_hardirq());
lockdep_assert_irqs_enabled();
#ifdef CONFIG_TRACE_IRQFLAGS
local_irq_disable();
#endif
/*
* Are softirqs going to be turned on now:
*/
if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
lockdep_softirqs_on(ip);
/*
* Keep preemption disabled until we are done with
* softirq processing:
*/
__preempt_count_sub(cnt - 1);
if (unlikely(!in_interrupt() && local_softirq_pending())) {
/*
* Run softirq if any pending. And do it in its own stack
* as we may be calling this deep in a task call stack already.
*/
do_softirq();
}
preempt_count_dec();
#ifdef CONFIG_TRACE_IRQFLAGS
local_irq_enable();
#endif
preempt_check_resched();
}
EXPORT_SYMBOL(__local_bh_enable_ip);
static inline void softirq_handle_begin(void)
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
}
static inline void softirq_handle_end(void)
{
__local_bh_enable(SOFTIRQ_OFFSET);
WARN_ON_ONCE(in_interrupt());
}
static inline void ksoftirqd_run_begin(void)
{
local_irq_disable();
}
static inline void ksoftirqd_run_end(void)
{
local_irq_enable();
}
static inline bool should_wake_ksoftirqd(void)
{
return true;
}
static inline void invoke_softirq(void)
{
if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/*
* We can safely execute softirq on the current stack if
* it is the irq stack, because it should be near empty
* at this stage.
*/
__do_softirq();
#else
/*
* Otherwise, irq_exit() is called on the task stack that can
* be potentially deep already. So call softirq in its own stack
* to prevent from any overrun.
*/
do_softirq_own_stack();
#endif
} else {
wakeup_softirqd();
}
}
asmlinkage __visible void do_softirq(void)
{
__u32 pending;
unsigned long flags;
if (in_interrupt())
return;
local_irq_save(flags);
pending = local_softirq_pending();
if (pending)
do_softirq_own_stack();
local_irq_restore(flags);
}
#endif /* !CONFIG_PREEMPT_RT */
/*
* We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
* but break the loop if need_resched() is set or after 2 ms.
* The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
* certain cases, such as stop_machine(), jiffies may cease to
* increment and so we need the MAX_SOFTIRQ_RESTART limit as
* well to make sure we eventually return from this method.
*
* These limits have been established via experimentation.
* The two things to balance is latency against fairness -
* we want to handle softirqs as soon as possible, but they
* should not be able to lock up the box.
*/
#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
#define MAX_SOFTIRQ_RESTART 10
#ifdef CONFIG_TRACE_IRQFLAGS
/*
* When we run softirqs from irq_exit() and thus on the hardirq stack we need
* to keep the lockdep irq context tracking as tight as possible in order to
* not miss-qualify lock contexts and miss possible deadlocks.
*/
static inline bool lockdep_softirq_start(void)
{
bool in_hardirq = false;
if (lockdep_hardirq_context()) {
in_hardirq = true;
lockdep_hardirq_exit();
}
lockdep_softirq_enter();
return in_hardirq;
}
static inline void lockdep_softirq_end(bool in_hardirq)
{
lockdep_softirq_exit();
if (in_hardirq)
lockdep_hardirq_enter();
}
#else
static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif
#ifdef CONFIG_RT_SOFTIRQ_AWARE_SCHED
static __u32 softirq_deferred_for_rt(__u32 *pending)
{
__u32 deferred = 0;
if (rt_task(current)) {
deferred = *pending & LONG_SOFTIRQ_MASK;
*pending &= ~LONG_SOFTIRQ_MASK;
}
return deferred;
}
#else
#define softirq_deferred_for_rt(x) (0)
#endif
static void handle_softirqs(bool ksirqd)
{
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
unsigned long old_flags = current->flags;
int max_restart = MAX_SOFTIRQ_RESTART;
struct softirq_action *h;
bool in_hardirq;
__u32 deferred;
__u32 pending;
int softirq_bit;
/*
* Mask out PF_MEMALLOC as the current task context is borrowed for the
* softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
* again if the socket is related to swapping.
*/
current->flags &= ~PF_MEMALLOC;
pending = local_softirq_pending();
deferred = softirq_deferred_for_rt(&pending);
softirq_handle_begin();
in_hardirq = lockdep_softirq_start();
account_softirq_enter(current);
restart:
/* Reset the pending bitmask before enabling irqs */
set_softirq_pending(deferred);
set_active_softirqs(pending);
local_irq_enable();
h = softirq_vec;
while ((softirq_bit = ffs(pending))) {
unsigned int vec_nr;
int prev_count;
h += softirq_bit - 1;
vec_nr = h - softirq_vec;
prev_count = preempt_count();
kstat_incr_softirqs_this_cpu(vec_nr);
trace_softirq_entry(vec_nr);
h->action(h);
trace_softirq_exit(vec_nr);
if (unlikely(prev_count != preempt_count())) {
pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
vec_nr, softirq_to_name[vec_nr], h->action,
prev_count, preempt_count());
preempt_count_set(prev_count);
}
h++;
pending >>= softirq_bit;
}
set_active_softirqs(0);
if (!IS_ENABLED(CONFIG_PREEMPT_RT) && ksirqd)
rcu_softirq_qs();
local_irq_disable();
pending = local_softirq_pending();
deferred = softirq_deferred_for_rt(&pending);
if (pending) {
if (time_before(jiffies, end) && !need_resched() &&
--max_restart)
goto restart;
}
if (pending | deferred)
wakeup_softirqd();
account_softirq_exit(current);
lockdep_softirq_end(in_hardirq);
softirq_handle_end();
current_restore_flags(old_flags, PF_MEMALLOC);
}
asmlinkage __visible void __softirq_entry __do_softirq(void)
{
handle_softirqs(false);
}
/**
* irq_enter_rcu - Enter an interrupt context with RCU watching
*/
void irq_enter_rcu(void)
{
__irq_enter_raw();
if (tick_nohz_full_cpu(smp_processor_id()) ||
(is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
tick_irq_enter();
account_hardirq_enter(current);
}
/**
* irq_enter - Enter an interrupt context including RCU update
*/
void irq_enter(void)
{
ct_irq_enter();
irq_enter_rcu();
}
static inline void tick_irq_exit(void)
{
#ifdef CONFIG_NO_HZ_COMMON
int cpu = smp_processor_id();
/* Make sure that timer wheel updates are propagated */
if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
if (!in_hardirq())
tick_nohz_irq_exit();
}
#endif
}
static inline void __irq_exit_rcu(void)
{
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
local_irq_disable();
#else
lockdep_assert_irqs_disabled();
#endif
account_hardirq_exit(current);
preempt_count_sub(HARDIRQ_OFFSET);
if (!in_interrupt() && local_softirq_pending())
invoke_softirq();
tick_irq_exit();
}
/**
* irq_exit_rcu() - Exit an interrupt context without updating RCU
*
* Also processes softirqs if needed and possible.
*/
void irq_exit_rcu(void)
{
__irq_exit_rcu();
/* must be last! */
lockdep_hardirq_exit();
}
/**
* irq_exit - Exit an interrupt context, update RCU and lockdep
*
* Also processes softirqs if needed and possible.
*/
void irq_exit(void)
{
__irq_exit_rcu();
ct_irq_exit();
/* must be last! */
lockdep_hardirq_exit();
}
/*
* This function must run with irqs disabled!
*/
inline void raise_softirq_irqoff(unsigned int nr)
{
__raise_softirq_irqoff(nr);
/*
* If we're in an interrupt or softirq, we're done
* (this also catches softirq-disabled code). We will
* actually run the softirq once we return from
* the irq or softirq.
*
* Otherwise we wake up ksoftirqd to make sure we
* schedule the softirq soon.
*/
if (!in_interrupt() && should_wake_ksoftirqd())
wakeup_softirqd();
}
void raise_softirq(unsigned int nr)
{
unsigned long flags;
local_irq_save(flags);
raise_softirq_irqoff(nr);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(raise_softirq);
void __raise_softirq_irqoff(unsigned int nr)
{
lockdep_assert_irqs_disabled();
trace_softirq_raise(nr);
or_softirq_pending(1UL << nr);
}
void open_softirq(int nr, void (*action)(struct softirq_action *))
{
softirq_vec[nr].action = action;
}
/*
* Tasklets
*/
struct tasklet_head {
struct tasklet_struct *head;
struct tasklet_struct **tail;
};
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
static void __tasklet_schedule_common(struct tasklet_struct *t,
struct tasklet_head __percpu *headp,
unsigned int softirq_nr)
{
struct tasklet_head *head;
unsigned long flags;
local_irq_save(flags);
head = this_cpu_ptr(headp);
t->next = NULL;
*head->tail = t;
head->tail = &(t->next);
raise_softirq_irqoff(softirq_nr);
local_irq_restore(flags);
}
void __tasklet_schedule(struct tasklet_struct *t)
{
__tasklet_schedule_common(t, &tasklet_vec,
TASKLET_SOFTIRQ);
}
EXPORT_SYMBOL(__tasklet_schedule);
void __tasklet_hi_schedule(struct tasklet_struct *t)
{
__tasklet_schedule_common(t, &tasklet_hi_vec,
HI_SOFTIRQ);
}
EXPORT_SYMBOL(__tasklet_hi_schedule);
static bool tasklet_clear_sched(struct tasklet_struct *t)
{
if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
wake_up_var(&t->state);
return true;
}
WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
t->use_callback ? "callback" : "func",
t->use_callback ? (void *)t->callback : (void *)t->func);
return false;
}
static void tasklet_action_common(struct softirq_action *a,
struct tasklet_head *tl_head,
unsigned int softirq_nr)
{
struct tasklet_struct *list;
local_irq_disable();
list = tl_head->head;
tl_head->head = NULL;
tl_head->tail = &tl_head->head;
local_irq_enable();
while (list) {
struct tasklet_struct *t = list;
list = list->next;
if (tasklet_trylock(t)) {
if (!atomic_read(&t->count)) {
if (tasklet_clear_sched(t)) {
if (t->use_callback) {
trace_tasklet_entry(t, t->callback);
t->callback(t);
trace_tasklet_exit(t, t->callback);
} else {
trace_tasklet_entry(t, t->func);
t->func(t->data);
trace_tasklet_exit(t, t->func);
}
}
tasklet_unlock(t);
continue;
}
tasklet_unlock(t);
}
local_irq_disable();
t->next = NULL;
*tl_head->tail = t;
tl_head->tail = &t->next;
__raise_softirq_irqoff(softirq_nr);
local_irq_enable();
}
}
static __latent_entropy void tasklet_action(struct softirq_action *a)
{
tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
}
static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
{
tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
}
void tasklet_setup(struct tasklet_struct *t,
void (*callback)(struct tasklet_struct *))
{
t->next = NULL;
t->state = 0;
atomic_set(&t->count, 0);
t->callback = callback;
t->use_callback = true;
t->data = 0;
}
EXPORT_SYMBOL(tasklet_setup);
void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data)
{
t->next = NULL;
t->state = 0;
atomic_set(&t->count, 0);
t->func = func;
t->use_callback = false;
t->data = data;
}
EXPORT_SYMBOL(tasklet_init);
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
/*
* Do not use in new code. Waiting for tasklets from atomic contexts is
* error prone and should be avoided.
*/
void tasklet_unlock_spin_wait(struct tasklet_struct *t)
{
while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
/*
* Prevent a live lock when current preempted soft
* interrupt processing or prevents ksoftirqd from
* running. If the tasklet runs on a different CPU
* then this has no effect other than doing the BH
* disable/enable dance for nothing.
*/
local_bh_disable();
local_bh_enable();
} else {
cpu_relax();
}
}
}
EXPORT_SYMBOL(tasklet_unlock_spin_wait);
#endif
void tasklet_kill(struct tasklet_struct *t)
{
if (in_interrupt())
pr_notice("Attempt to kill tasklet from interrupt\n");
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
tasklet_unlock_wait(t);
tasklet_clear_sched(t);
}
EXPORT_SYMBOL(tasklet_kill);
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
void tasklet_unlock(struct tasklet_struct *t)
{
smp_mb__before_atomic();
clear_bit(TASKLET_STATE_RUN, &t->state);
smp_mb__after_atomic();
wake_up_var(&t->state);
}
EXPORT_SYMBOL_GPL(tasklet_unlock);
void tasklet_unlock_wait(struct tasklet_struct *t)
{
wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
}
EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
#endif
void __init softirq_init(void)
{
int cpu;
for_each_possible_cpu(cpu) {
per_cpu(tasklet_vec, cpu).tail =
&per_cpu(tasklet_vec, cpu).head;
per_cpu(tasklet_hi_vec, cpu).tail =
&per_cpu(tasklet_hi_vec, cpu).head;
}
open_softirq(TASKLET_SOFTIRQ, tasklet_action);
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
}
static int ksoftirqd_should_run(unsigned int cpu)
{
return local_softirq_pending();
}
static void run_ksoftirqd(unsigned int cpu)
{
ksoftirqd_run_begin();
if (local_softirq_pending()) {
/*
* We can safely run softirq on inline stack, as we are not deep
* in the task stack here.
*/
handle_softirqs(true);
ksoftirqd_run_end();
cond_resched();
return;
}
ksoftirqd_run_end();
}
#ifdef CONFIG_HOTPLUG_CPU
static int takeover_tasklets(unsigned int cpu)
{
/* CPU is dead, so no lock needed. */
local_irq_disable();
/* Find end, append list for that CPU. */
if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
__this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
per_cpu(tasklet_vec, cpu).head = NULL;
per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
}
raise_softirq_irqoff(TASKLET_SOFTIRQ);
if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
per_cpu(tasklet_hi_vec, cpu).head = NULL;
per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
}
raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_enable();
return 0;
}
#else
#define takeover_tasklets NULL
#endif /* CONFIG_HOTPLUG_CPU */
static struct smp_hotplug_thread softirq_threads = {
.store = &ksoftirqd,
.thread_should_run = ksoftirqd_should_run,
.thread_fn = run_ksoftirqd,
.thread_comm = "ksoftirqd/%u",
};
static __init int spawn_ksoftirqd(void)
{
cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
takeover_tasklets);
BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
return 0;
}
early_initcall(spawn_ksoftirqd);
/*
* [ These __weak aliases are kept in a separate compilation unit, so that
* GCC does not inline them incorrectly. ]
*/
int __init __weak early_irq_init(void)
{
return 0;
}
int __init __weak arch_probe_nr_irqs(void)
{
return NR_IRQS_LEGACY;
}
int __init __weak arch_early_irq_init(void)
{
return 0;
}
unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
{
return from;
}