linux-imx/kernel/workqueue.c
Greg Kroah-Hartman 97d818d826 Merge tag 'android15-6.6.56_r00' into android15-6.6
This merges up to the 6.6.56 LTS release into android15-6.6.  Included
in here are the following commits:

*   3228553f91 Merge 6.6.56 into android15-6.6-lts
|\
| * d4576c5670 Linux 6.6.56
| * 1e0f696469 Revert "perf callchain: Fix stitch LBR memory leaks"
* | ae4370b575 Revert "perf,x86: avoid missing caller address in stack traces captured in uprobe"
* | ac723e338f Revert "i2c: create debugfs entry per adapter"
* | 988aef386e Revert "i2c: core: Lock address during client device instantiation"
* | ba4a8a450d Merge 6.6.55 into android15-6.6-lts
|\|
| * 9b15f68c41 Linux 6.6.55
| * e334ae4a0c perf python: Allow checking for the existence of warning options in clang
| * 3faea7810e Revert "ubifs: ubifs_symlink: Fix memleak of inode->i_link in error path"
| * e0aba0c6d5 null_blk: Fix return value of nullb_device_power_store()
| * e0b065ec63 drm/amd/display: enable_hpo_dp_link_output: Check link_res->hpo_dp_link_enc before using it
| * 776ebdeee6 perf report: Fix segfault when 'sym' sort key is not used
| * eada63e6e3 drm/amd/display: Revert Avoid overflow assignment
| * de4841fca8 crypto: octeontx* - Select CRYPTO_AUTHENC
| * 25613e6d98 vhost/scsi: null-ptr-dereference in vhost_scsi_get_req()
| * cdf4bbbdb9 rxrpc: Fix a race between socket set up and I/O thread creation
| * b538fefeb1 net: stmmac: move the EST lock to struct stmmac_priv
| * aaadb755f2 null_blk: fix null-ptr-dereference while configuring 'power' and 'submit_queues'
| * b2b02202f8 null_blk: Remove usage of the deprecated ida_simple_xx() API
| * 2deb10a996 platform/x86: think-lmi: Fix password opcode ordering for workstations
| * e115c1b5de efi/unaccepted: touch soft lockup during memory accept
| * 50f4b57e9a drm/bridge: adv7511: fix crash on irq during probe
| * fd4d5cd7a2 iommufd: Fix protection fault in iommufd_test_syz_conv_iova
| * 69a1e2d938 net: dsa: fix netdev_priv() dereference before check on non-DSA netdevice events
| * 164936b2fc netfilter: nf_tables: restore set elements when delete set fails
| * a1bd2a38a1 netfilter: nf_tables: fix memleak in map from abort path
| * b907789732 ubifs: ubifs_symlink: Fix memleak of inode->i_link in error path
| * 1428da2f4a Revert "drm/amd/display: Skip Recompute DSC Params if no Stream on Link"
| * d253f71605 drm/rockchip: vop: enable VOP_FEATURE_INTERNAL_RGB on RK3066
| * 907717eea1 btrfs: drop the backref cache during relocation if we commit
| * 4dc6ea8b4d btrfs: relocation: constify parameters where possible
| * 5ae94c6397 btrfs: relocation: return bool from btrfs_should_ignore_reloc_root
| * ce31847f10 ACPI: battery: Fix possible crash when unregistering a battery hook
| * 2deeb3c748 ACPI: battery: Simplify battery hook locking
| * 712d30f9a5 clk: qcom: gcc-sc8180x: Add GPLL9 support
| * 1c723d785a r8169: add tally counter fields added with RTL8125
| * f02fcb7283 r8169: Fix spelling mistake: "tx_underun" -> "tx_underrun"
| * 736da42408 iio: pressure: bmp280: Fix waiting time for BMP3xx configuration
| * ae6724f9f1 iio: pressure: bmp280: Fix regmap for BMP280 device
| * 5da669d9ee iio: pressure: bmp280: Use BME prefix for BME280 specifics
| * b71b2d704a iio: pressure: bmp280: Improve indentation and line wrapping
| * afe335a6c5 iio: pressure: bmp280: Allow multiple chips id per family of devices
| * c059a2661a dt-bindings: clock: qcom: Add GPLL9 support on gcc-sc8180x
| * 65e71cffb8 dt-bindings: clock: qcom: Add missing UFS QREF clocks
| * fc1ed6f791 remoteproc: k3-r5: Delay notification of wakeup event
| * e1df6bbf47 remoteproc: k3-r5: Acquire mailbox handle during probe routine
| * 9eac174a9b media: imx335: Fix reset-gpio handling
| * 2b00bc1d7d media: i2c: imx335: Enable regulator supplies
| * e6be95592c RDMA/mana_ib: use the correct page table index based on hardware page size
| * 6c95c700f2 net: mana: Add support for page sizes other than 4KB on ARM64
| * 86b6cf7e25 net: mana: Enable MANA driver on ARM64 with 4K page size
| * 1f997b1d13 sched: psi: fix bogus pressure spikes from aggregation race
| * c83a80d8b8 lib/buildid: harden build ID parsing logic
| * f941d77962 build-id: require program headers to be right after ELF header
| * a94ec40b94 drm/amd/display: Allow backlight to go below `AMDGPU_DM_DEFAULT_MIN_BACKLIGHT`
| * 54ad9c7608 mm: z3fold: deprecate CONFIG_Z3FOLD
| * 5b981d8335 uprobes: fix kernel info leak via "[uprobes]" vma
| * 24f7989ed2 io_uring/net: harden multishot termination case for recv
| * 3c38faa39e arm64: errata: Expand speculative SSBS workaround once more
| * 9a3e9aab60 arm64: cputype: Add Neoverse-N3 definitions
| * c7e0da7449 i2c: synquacer: Deal with optional PCLK correctly
| * 6109f5319b i2c: synquacer: Remove a clk reference from struct synquacer_i2c
| * 316be4911f i2c: core: Lock address during client device instantiation
| * 4a2be5a728 i2c: create debugfs entry per adapter
| * aac871e493 platform/x86: x86-android-tablets: Fix use after free on platform_device_register() errors
| * 2dbc42f554 platform/x86: x86-android-tablets: Create a platform_device from module_init()
| * ce5ec36799 kconfig: qconf: fix buffer overflow in debug links
| * eebc10e924 cpufreq: intel_pstate: Make hwp_notify_lock a raw spinlock
| * 68d603f467 drm/amd/display: Fix system hang while resume with TBT monitor
| * 487f6450bc drm/amd/display: Add HDR workaround for specific eDP
| * 579a0a84e3 drm/sched: Add locking to drm_sched_entity_modify_sched
| * 451c87d21d drm/i915/gem: fix bitwise and logical AND mixup
| * a8023f8b55 close_range(): fix the logics in descriptor table trimming
| * 310d953167 rtla: Fix the help text in osnoise and timerlat top tools
| * 8b0f0a268d tracing/timerlat: Fix duplicated kthread creation due to CPU online/offline
| * a6e9849063 tracing/timerlat: Fix a race during cpuhp processing
| * a4a05ceffe tracing/timerlat: Drop interface_lock in stop_kthread()
| * 79250decc9 tracing/hwlat: Fix a race during cpuhp processing
| * b8c118c2a2 ceph: fix cap ref leak via netfs init_request
| * e676e4ea76 mac802154: Fix potential RCU dereference issue in mac802154_scan_worker
| * 830c03e58b Bluetooth: hci_event: Align BR/EDR JUST_WORKS paring with LE
| * 2f4e3926bc net: pcs: xpcs: fix the wrong register that was written back
| * 7c5cd531d0 gpio: davinci: fix lazy disable
| * 0f41f383b5 cpufreq: Avoid a bad reference count on CPU node
| * ed87190e9d btrfs: wait for fixup workers before stopping cleaner kthread during umount
| * 521cfe23fb btrfs: send: fix invalid clone operation for file that got its size decreased
| * 7ad0c5868f btrfs: fix a NULL pointer dereference when failed to start a new trasacntion
| * d7674ed0dc ACPI: resource: Add Asus ExpertBook B2502CVA to irq1_level_low_skip_override[]
| * 27ec4a380b ACPI: resource: Add Asus Vivobook X1704VAP to irq1_level_low_skip_override[]
| * c7d10fa7d7 cachefiles: fix dentry leak in cachefiles_open_file()
| * 195e42c9a9 Input: adp5589-keys - fix adp5589_gpio_get_value()
| * 7c3f04223a Input: adp5589-keys - fix NULL pointer dereference
| * cda7d59724 rtc: at91sam9: fix OF node leak in probe() error path
| * 03582f4752 net: stmmac: Fix zero-division error when disabling tc cbs
| * cd9ce830fa tomoyo: fallback to realpath if symlink's pathname does not exist
| * af3122f5fd gso: fix udp gso fraglist segmentation after pull from frag_list
| * 718a752bd7 vrf: revert "vrf: Remove unnecessary RCU-bh critical section"
| * 73328d2af5 iio: magnetometer: ak8975: Fix reading for ak099xx sensors
| * 3374f06f95 smb3: fix incorrect mode displayed for read-only files
| * 472973229c smb: client: use actual path when queryfs
| * 868e843e52 clk: qcom: clk-alpha-pll: Fix CAL_L_VAL override for LUCID EVO PLL
| * 5bdb3cc0cc clk: qcom: gcc-sc8180x: Fix the sdcc2 and sdcc4 clocks freq table
| * ea3a6938cb media: qcom: camss: Fix ordering of pm_runtime_enable
| * c2218a82f7 media: qcom: camss: Remove use_count guard in stop_streaming
| * 8c860f3586 clk: qcom: gcc-sm8250: Do not turn off PCIe GDSCs during gdsc_disable()
| * b0686aedc5 media: venus: fix use after free bug in venus_remove due to race condition
| * 56770d1e01 clk: qcom: gcc-sm8150: De-register gcc_cpuss_ahb_clk_src
| * 7e21770654 clk: samsung: exynos7885: Update CLKS_NR_FSYS after bindings fix
| * 8cf5c85d5e clk: qcom: clk-rpmh: Fix overflow in BCM vote
| * 1229485abf media: uapi/linux/cec.h: cec_msg_set_reply_to: zero flags
| * e6f63d04c0 clk: qcom: gcc-sm8450: Do not turn off PCIe GDSCs during gdsc_disable()
| * 6fa24b41d3 media: sun4i_csi: Implement link validate for sun4i_csi subdev
| * fb2867420e clk: qcom: dispcc-sm8250: use CLK_SET_RATE_PARENT for branch clocks
| * fc71c23958 remoteproc: k3-r5: Fix error handling when power-up failed
| * bd588d5256 clk: rockchip: fix error for unknown clocks
| * eb4df5e36a media: ov5675: Fix power on/off delay timings
| * acc5103a0a aoe: fix the potential use-after-free problem in more places
| * 1587db1130 riscv: Fix kernel stack size when KASAN is enabled
| * 83b39493cd RDMA/mana_ib: use the correct page size for mapping user-mode doorbell page
| * 4ac6371229 i3c: master: svc: Fix use after free vulnerability in svc_i3c_master Driver Due to Race Condition
| * 6b17072c7d NFSD: Fix NFSv4's PUTPUBFH operation
| * f7d8ee9db9 nfsd: map the EBADMSG to nfserr_io to avoid warning
| * 33658acea0 nfsd: fix delegation_blocked() to block correctly for at least 30 seconds
| * be8d32ebfa perf hist: Update hist symbol when updating maps
| * bebb4c2405 perf python: Disable -Wno-cast-function-type-mismatch if present on clang
| * bf0b3b3525 exfat: fix memory leak in exfat_load_bitmap()
| * 9a2585ad17 riscv: define ILLEGAL_POINTER_VALUE for 64bit
| * 8f91116f36 arm64: Subscribe Microsoft Azure Cobalt 100 to erratum 3194386
| * 5f5ec16bd1 arm64: fix selection of HAVE_DYNAMIC_FTRACE_WITH_ARGS
| * d52c5652e7 ocfs2: fix possible null-ptr-deref in ocfs2_set_buffer_uptodate
| * 86a89e75e9 ocfs2: fix null-ptr-deref when journal load failed.
| * 84543da867 ocfs2: remove unreasonable unlock in ocfs2_read_blocks
| * ef76802036 ocfs2: cancel dqi_sync_work before freeing oinfo
| * 637c00e065 ocfs2: reserve space for inline xattr before attaching reflink tree
| * 8d176ca5d9 ocfs2: fix uninit-value in ocfs2_get_block()
| * e7a8010147 ocfs2: fix the la space leak when unmounting an ocfs2 volume
| * e3a9fc1520 mm: krealloc: consider spare memory for __GFP_ZERO
| * fd34962434 jbd2: correctly compare tids with tid_geq function in jbd2_fc_begin_commit
| * 1c62dc0d82 jbd2: stop waiting for space when jbd2_cleanup_journal_tail() returns error
| * 393331e16c resource: fix region_intersects() vs add_memory_driver_managed()
| * b57b53e8ff drm: omapdrm: Add missing check for alloc_ordered_workqueue
| * 0022085f11 of/irq: Support #msi-cells=<0> in of_msi_get_domain
| * d657d28641 of: address: Report error on resource bounds overflow
| * 25b7a67037 drm/rockchip: vop: clear DMA stop bit on RK3066
| * a17dfde577 parisc: Fix stack start for ADDR_NO_RANDOMIZE personality
| * 62f3e58c4e parisc: Allow mmap(MAP_STACK) memory to automatically expand upwards
| * 42451ba0d6 parisc: Fix 64-bit userspace syscall path
| * 89bbc55d6b ext4: mark fc as ineligible using an handle in ext4_xattr_set()
| * c5771f1c48 ext4: use handle to mark fc as ineligible in __track_dentry_update()
| * d13a3558e8 ext4: fix fast commit inode enqueueing during a full journal commit
| * 1552199ace ext4: fix incorrect tid assumption in jbd2_journal_shrink_checkpoint_list()
| * 80dccb81b7 ext4: fix incorrect tid assumption in ext4_wait_for_tail_page_commit()
| * f55ecc58d0 ext4: update orig_path in ext4_find_extent()
| * 9203817ba4 ext4: fix timer use-after-free on failed mount
| * 68a69cf606 ext4: fix double brelse() the buffer of the extents path
| * 8162ee5d94 ext4: aovid use-after-free in ext4_ext_insert_extent()
| * 1b558006d9 ext4: drop ppath from ext4_ext_replay_update_ex() to avoid double-free
| * 93051d16b3 ext4: fix incorrect tid assumption in __jbd2_log_wait_for_space()
| * 5efccdee4a ext4: dax: fix overflowing extents beyond inode size when partially writing
| * 8c762b4e19 ext4: fix incorrect tid assumption in ext4_fc_mark_ineligible()
| * f4308d8ee3 ext4: propagate errors from ext4_find_extent() in ext4_insert_range()
| * 8fe117790b ext4: fix slab-use-after-free in ext4_split_extent_at()
| * a56e5f389d ext4: correct encrypted dentry name hash when not casefolded
| * 2d64e7dada ext4: no need to continue when the number of entries is 1
| * 9cdf65c6c3 ALSA: hda/realtek: Add a quirk for HP Pavilion 15z-ec200
| * 762650cd5e ALSA: hda/realtek: Add quirk for Huawei MateBook 13 KLV-WX9
| * 3624416ab1 ALSA: line6: add hw monitor volume control to POD HD500X
| * 228a8b952c ALSA: usb-audio: Add native DSD support for Luxman D-08u
| * 9d125aab4c ALSA: usb-audio: Add delay quirk for VIVO USB-C HEADSET
| * aba1be9a80 ALSA: core: add isascii() check to card ID generator
| * 633d345684 ALSA: hda/tas2781: Add new quirk for Lenovo Y990 Laptop
| * c923bc8746 drm: Consistently use struct drm_mode_rect for FB_DAMAGE_CLIPS
| * 6e6f89549c drm/mediatek: ovl_adaptor: Add missing of_node_put()
| * 3eff30f2c3 parisc: Fix itlb miss handler for 64-bit programs
| * 9fca08c06a perf/core: Fix small negative period being ignored
| * 888f728d81 power: supply: hwmon: Fix missing temp1_max_alarm attribute
| * 7febcf1174 spi: bcm63xx: Fix missing pm_runtime_disable()
| * f2d0b351e0 spi: bcm63xx: Fix module autoloading
| * 0a42f63607 dt-bindings: clock: exynos7885: Fix duplicated binding
| * ff580d0130 memory: tegra186-emc: drop unused to_tegra186_emc()
| * 028258156f firmware: tegra: bpmp: Drop unused mbox_client_to_bpmp()
| * bf47be5479 ovl: fail if trusted xattrs are needed but caller lacks permission
| * 6fcd6feaf1 rust: sync: require `T: Sync` for `LockedBy::access`
| * d6c159c066 i2c: designware: fix controller is holding SCL low while ENABLE bit is disabled
| * b80dc74c38 i2c: xiic: Fix pm_runtime_set_suspended() with runtime pm enabled
| * 625a77b68c media: i2c: ar0521: Use cansleep version of gpiod_set_value()
| * c0e00163f8 i2c: xiic: Wait for TX empty to avoid missed TX NAKs
| * 7e263fd6ef i2c: qcom-geni: Use IRQF_NO_AUTOEN flag in request_irq()
| * 22a1f8a5b5 i2c: stm32f7: Do not prepare/unprepare clock during runtime suspend/resume
| * 8176d4878e platform/x86: ISST: Fix the KASAN report slab-out-of-bounds bug
| * b8c0aee7c2 Revert "ALSA: hda: Conditionally use snooping for AMD HDMI"
| * 2c74d33dbf selftests: vDSO: fix vdso_config for s390
| * 0fe35c4737 selftests: vDSO: fix ELF hash table entry size for s390x
| * 676727021d powerpc/vdso: Fix VDSO data access when running in a non-root time namespace
| * dfb569762c selftests/mm: fix charge_reserved_hugetlb.sh test
| * b88842a9f1 selftests: vDSO: fix vDSO symbols lookup for powerpc64
| * d3b90ed9a0 selftests: vDSO: fix vdso_config for powerpc
| * 6c8aff2022 selftests: vDSO: fix vDSO name for powerpc
| * 9629c0c3e8 perf: Really fix event_function_call() locking
| * 42cd165b4c perf callchain: Fix stitch LBR memory leaks
| * e2955fbe08 spi: rpc-if: Add missing MODULE_DEVICE_TABLE
| * 106f10fef0 accel/ivpu: Add missing MODULE_FIRMWARE metadata
| * 4019391dfe selftests: breakpoints: use remaining time to check if suspend succeed
| * c2aa410328 spi: s3c64xx: fix timeout counters in flush_fifo
| * dbda70bbe4 selftest: hid: add missing run-hid-tools-tests.sh
| * 494380a4e4 spi: spi-cadence: Fix missing spi_controller_is_target() check
| * 97aa3293db spi: spi-cadence: Fix pm_runtime_set_suspended() with runtime pm enabled
| * 97f76711a9 spi: spi-cadence: Use helper function devm_clk_get_enabled()
| * d6e3898d62 spi: spi-imx: Fix pm_runtime_set_suspended() with runtime pm enabled
| * c2d9f9a783 bpftool: Fix undefined behavior in qsort(NULL, 0, ...)
| * 390b9e54cd iomap: handle a post-direct I/O invalidate race in iomap_write_delalloc_release
| * ad762c5204 bpftool: Fix undefined behavior caused by shifting into the sign bit
| * d43776b907 ext4: fix i_data_sem unlock order in ext4_ind_migrate()
| * 34b2096380 ext4: avoid use-after-free in ext4_ext_show_leaf()
| * 1fe2852720 ext4: ext4_search_dir should return a proper error
| * b111ae42bb bpf: Make the pointer returned by iter next method valid
| * 18f06bacc1 ksmbd: add refcnt to ksmbd_conn struct
| * f5e30a30fc platform/x86: lenovo-ymc: Ignore the 0x0 state
| * 4298813a43 drm/amdgpu/gfx10: use rlc safe mode for soft recovery
| * c20cd3d6d2 drm/amdgpu/gfx11: use rlc safe mode for soft recovery
| * e16a6d1a33 powerpc/pseries: Use correct data types from pseries_hp_errorlog struct
| * fe2c86e192 of/irq: Refer to actual buffer size in of_irq_parse_one()
| * b511474f49 drm/amd/pm: ensure the fw_info is not null before using it
| * 3ffbdc977d drm/amdgpu/gfx9: use rlc safe mode for soft recovery
| * 8361e3f788 drm/amdgpu: Block MMR_READ IOCTL in reset
| * c474a1a755 drm/radeon/r100: Handle unknown family in r100_cp_init_microcode()
| * ee5d547006 scsi: NCR5380: Initialize buffer for MSG IN and STATUS transfers
| * 66a403d89b perf: Fix event_function_call() locking
| * deb78dc859 drm/amdgpu: fix unchecked return value warning for amdgpu_gfx
| * 5e0e1a941e scsi: lpfc: Update PRLO handling in direct attached topology
| * 55119faf5a scsi: aacraid: Rearrange order of struct aac_srb_unit
| * adf290fe43 perf,x86: avoid missing caller address in stack traces captured in uprobe
| * 4ee08b4a72 drm/printer: Allow NULL data in devcoredump printer
| * c7630935d9 drm/amd/display: Initialize get_bytes_per_element's default to 1
| * a1495acc62 drm/amd/display: Avoid overflow assignment in link_dp_cts
| * 929506d567 drm/amd/display: Fix index out of bounds in DCN30 color transformation
| * 122e3a7a8c drm/amd/display: Fix index out of bounds in degamma hardware format translation
| * 0d38a07511 drm/amd/display: Fix index out of bounds in DCN30 degamma hardware format translation
| * be2ca7a2c1 drm/amd/display: Check link_res->hpo_dp_link_enc before using it
| * 42d31a3364 drm/amd/display: Check stream before comparing them
| * fb557a36b0 drm/stm: ltdc: reset plane transparency after plane disable
| * 64f38c08b5 platform/x86: touchscreen_dmi: add nanote-next quirk
| * 651ba62c25 HID: multitouch: Add support for Thinkpad X12 Gen 2 Kbd Portfolio
| * 71cfb54e0f drm/amdkfd: Fix resource leak in criu restore queue
| * fe90214179 drm/amdgpu: enable gfxoff quirk on HP 705G4
| * a3c8cbefce drm/amdgpu: add raven1 gfxoff quirk
| * c076b37462 jfs: Fix uninit-value access of new_ea in ea_buffer
| * 9288a9676c drm/msm/adreno: Assign msm_gpu->pdev earlier to avoid nullptrs
| * 4155dff76a scsi: smartpqi: correct stream detection
| * 7fff9a9f86 jfs: check if leafidx greater than num leaves per dmap tree
| * 4218b31ecc jfs: Fix uaf in dbFreeBits
| * f04925a02e jfs: UBSAN: shift-out-of-bounds in dbFindBits
| * cf6f3ebd63 drm/amd/display: fix double free issue during amdgpu module unload
| * 75839e2365 drm/amd/display: Add null check for 'afb' in amdgpu_dm_plane_handle_cursor_update (v2)
| * 9641bc4adf drm/amd/display: Check null pointers before using dc->clk_mgr
| * 4778982c73 drm/amd/display: Handle null 'stream_status' in 'planes_changed_for_existing_stream'
| * 6ec7c73934 HID: Ignore battery for all ELAN I2C-HID devices
| * 29d2d5eda3 ata: sata_sil: Rename sil_blacklist to sil_quirks
| * 8fcf85196a ata: pata_serverworks: Do not use the term blacklist
| * e47e563c6f drm/amd/display: Add null check for top_pipe_to_program in commit_planes_for_stream
| * 30ceb873cc drm/amdkfd: amdkfd_free_gtt_mem clear the correct pointer
| * 44e4aeaef9 drm/amdgpu: disallow multiple BO_HANDLES chunks in one submit
| * 0a1741d10d drm/stm: Avoid use-after-free issues with crtc and plane
| * dfdbc5ba10 iommu/vt-d: Fix potential lockup if qi_submit_sync called with 0 count
| * 54e86bfec0 iommu/vt-d: Always reserve a domain ID for identity setup
| * 1c36eb1732 power: reset: brcmstb: Do not go into infinite loop if reset fails
| * 2d56271fce rcuscale: Provide clear error when async specified without primitives
| * fdda354f60 fbdev: pxafb: Fix possible use after free in pxafb_task()
| * 36bfefb6ba fbdev: efifb: Register sysfs groups through driver core
| * 4b101d2f40 hwmon: (nct6775) add G15CF to ASUS WMI monitoring list
| * 2de5fd836b x86/syscall: Avoid memcpy() for ia32 syscall_get_arguments()
| * e9df4c6107 selftests/nolibc: avoid passing NULL to printf("%s")
| * fc975b8dab tools/nolibc: powerpc: limit stack-protector workaround to GCC
| * 6cc4e5eaad ALSA: hdsp: Break infinite MIDI input flush loop
| * 7a55740996 ALSA: asihpi: Fix potential OOB array access
| * ddd52c9fe9 x86/kexec: Add EFI config table identity mapping for kexec kernel
| * 407abc7e0c x86/pkeys: Restore altstack access in sigreturn()
| * 1905912820 x86/pkeys: Add PKRU as a parameter in signal handling functions
| * ef6c1ed588 tools/x86/kcpuid: Protect against faulty "max subleaf" values
| * 71faa656b8 ASoC: codecs: wsa883x: Handle reading version failure
| * 70d5e30b0a ALSA: usb-audio: Add logitech Audio profile quirk
| * fb2ed616af ALSA: usb-audio: Replace complex quirk lines with macros
| * 0bf9779cd9 ALSA: usb-audio: Define macros for quirk table entries
| * 077e1b7cd5 x86/ioapic: Handle allocation failures gracefully
| * 864f68a242 ALSA: usb-audio: Add input value sanity checks for standard types
| * f888741fcf nfp: Use IRQF_NO_AUTOEN flag in request_irq()
| * fef7b51f22 wifi: mwifiex: Fix memcpy() field-spanning write warning in mwifiex_cmd_802_11_scan_ext()
| * 0a630d690b wifi: mt76: mt7915: hold dev->mt76.mutex while disabling tx worker
| * 833ebae266 wifi: mt76: mt7915: add dummy HW offload of IEEE 802.11 fragmentation
| * b4f8240bc3 can: netlink: avoid call to do_set_data_bittiming callback with stale can_priv::ctrlmode
| * b017f4f670 drivers/perf: arm_spe: Use perf_allow_kernel() for permissions
| * 8552508033 proc: add config & param to block forcing mem writes
| * 8b2906e134 ACPICA: iasl: handle empty connection_node
| * f373196093 wifi: mac80211: fix RCU list iterations
| * 6dcadb2ed3 wifi: iwlwifi: mvm: avoid NULL pointer dereference
| * 3241162554 wifi: iwlwifi: mvm: use correct key iteration
| * 5cce1c07bf tcp: avoid reusing FIN_WAIT2 when trying to find port in connect() process
| * 27fe713c62 netpoll: Ensure clean state on setup failures
| * b60d2bc676 crypto: simd - Do not call crypto_alloc_tfm during registration
| * 0f6dab0b79 net: atlantic: Avoid warning about potential string truncation
| * f989162f55 ipv4: Mask upper DSCP bits and ECN bits in NETLINK_FIB_LOOKUP family
| * 239ac7faea wifi: rtw89: correct base HT rate mask for firmware
| * d4c4653b60 ipv4: Check !in_dev earlier for ioctl(SIOCSIFADDR).
| * 0d6255e512 bnxt_en: Extend maximum length of version string by 1 byte
| * 74834f4a6c net: mvpp2: Increase size of queue_name buffer
| * 12d26aa7fd tipc: guard against string buffer overrun
| * 4588ea78d3 ACPICA: check null return of ACPI_ALLOCATE_ZEROED() in acpi_db_convert_to_package()
| * 93d065b704 ACPI: EC: Do not release locks during operation region accesses
| * 90ec583a85 wifi: rtw88: select WANT_DEV_COREDUMP
| * 7a552bc2f3 wifi: ath11k: fix array out-of-bound access in SoC stats
| * d0e4274d9d wifi: ath12k: fix array out-of-bound access in SoC stats
| * 1ab2cfe197 blk_iocost: fix more out of bound shifts
| * 29dbea4c56 ACPI: CPPC: Add support for setting EPP register in FFH
| * 716dae9686 ACPI: video: Add force_vendor quirk for Panasonic Toughbook CF-18
| * cc026a7f9b Bluetooth: btrtl: Set msft ext address filter quirk for RTL8852B
| * 18ed567ad0 Bluetooth: btusb: Add Realtek RTL8852C support ID 0x0489:0xe122
| * 37a6fc0d8f net: sched: consistently use rcu_replace_pointer() in taprio_change()
| * 3f5625e9e9 wifi: mt76: mt7915: disable tx worker during tx BA session enable/disable
| * 1c6db07811 e1000e: avoid failing the system during pm_suspend
| * 13ca2b3568 ACPICA: Fix memory leak if acpi_ps_get_next_field() fails
| * 0b02303431 ACPICA: Fix memory leak if acpi_ps_get_next_namepath() fails
| * 68a8e45743 ACPI: PAD: fix crash in exit_round_robin()
| * 0a94777ba4 net: hisilicon: hns_mdio: fix OF node leak in probe()
| * 359a218ce1 net: hisilicon: hns_dsaf_mac: fix OF node leak in hns_mac_get_info()
| * f62bf4ffeb net: hisilicon: hip04: fix OF node leak in probe()
| * 143edf098b net/xen-netback: prevent UAF in xenvif_flush_hash()
| * 04053e55dd wifi: cfg80211: Set correct chandef when starting CAC
| * d76360adab wifi: iwlwifi: mvm: drop wrong STA selection in TX
| * 191e8d5256 wifi: iwlwifi: mvm: Fix a race in scan abort flow
| * 82465e05ca ice: Adjust over allocation of memory in ice_sched_add_root_node() and ice_sched_add_node()
| * 21ba7132a9 crypto: octeontx2 - Fix authenc setkey
| * 0ac97b001c crypto: octeontx - Fix authenc setkey
| * 6300199be3 crypto: x86/sha256 - Add parentheses around macros' single arguments
| * e37e348835 wifi: ath9k_htc: Use __skb_set_length() for resetting urb before resubmit
| * fdc73f2cfb wifi: rtw89: avoid to add interface to list twice when SER
| * e6e4cfb5f6 wifi: ath9k: fix possible integer overflow in ath9k_get_et_stats()
| * 40346cbb19 ALSA: hda/conexant: Fix conflicting quirk for System76 Pangolin
| * b0f3c6a2d0 ALSA: gus: Fix some error handling paths related to get_bpos() usage
| * 2c3c1f87cf cifs: Do not convert delimiter when parsing NFS-style symlinks
| * c6db81c550 cifs: Fix buffer overflow when parsing NFS reparse points
| * 92e71ccd8f ASoC: imx-card: Set card.owner to avoid a warning calltrace if SND=m
| * f8f081578b ALSA: hda/generic: Unconditionally prefer preferred_dacs pairs
| * e4c886dd24 cifs: Remove intermediate object of failed create reparse call
| * fa72abf31b ALSA: hda/realtek: Fix the push button function for the ALC257
| * 466129e3d0 ALSA: mixer_oss: Remove some incorrect kfree_const() usages
| * 0152c81f61 ASoC: atmel: mchp-pdmc: Skip ALSA restoration if substream runtime is uninitialized
| * 28234f8ab6 Bluetooth: L2CAP: Fix not validating setsockopt user input
| * 6a6baa1ee7 Bluetooth: ISO: Fix not validating setsockopt user input
| * dea46e246e media: usbtv: Remove useless locks in usbtv_video_free()
| * 0c18a64039 Bluetooth: hci_sock: Fix not validating setsockopt user input
| * b66ff9a3fc loop: don't set QUEUE_FLAG_NOMERGES
| * 3000f3a86d i2c: xiic: Try re-initialization on bus busy timeout
| * 7c48b5a6c3 i2c: xiic: improve error message when transfer fails to start
| * 7f64cb5b4d sctp: set sk_state back to CLOSED if autobind fails in sctp_listen_start
| * 25a54df408 dt-bindings: net: xlnx,axi-ethernet: Add missing reg minItems
| * 1372c7579e iomap: constrain the file range passed to iomap_file_unshare
| * f9620e2a66 ppp: do not assume bh is held in ppp_channel_bridge_input()
| * d9dfd41e32 net: test for not too small csum_start in virtio_net_hdr_to_skb()
| * ea8cad4ca5 ipv4: ip_gre: Fix drops of small packets in ipgre_xmit
| * a9ad307c0d net: stmmac: dwmac4: extend timeout for VLAN Tag register busy bit check
| * 9b0ee571d2 net: add more sanity checks to qdisc_pkt_len_init()
| * 25ab0b87db net: avoid potential underflow in qdisc_pkt_len_init() with UFO
| * da14324002 net: fec: Reload PTP registers after link-state change
| * dc5fb26416 net: fec: Restart PPS after link state change
| * e66e38d07b net: ethernet: lantiq_etop: fix memory disclosure
| * 718b663403 net: Fix gso_features_check to check for both dev->gso_{ipv4_,}max_size
| * dae9b99bd2 net: Add netif_get_gro_max_size helper for GRO
| * f0a84ad84d Bluetooth: btmrvl: Use IRQF_NO_AUTOEN flag in request_irq()
| * b90907696c Bluetooth: L2CAP: Fix uaf in l2cap_connect
| * 4883296505 Bluetooth: MGMT: Fix possible crash on mgmt_index_removed
| * 4e3542f40f netfilter: nf_tables: prevent nf_skb_duplicated corruption
| * fe9ccbf1b7 selftests: netfilter: Fix nft_audit.sh for newer nft binaries
| * 271b490472 net: wwan: qcom_bam_dmux: Fix missing pm_runtime_disable()
| * 96858258de net: ieee802154: mcr20a: Use IRQF_NO_AUTOEN flag in request_irq()
| * 8691a82abf netfilter: uapi: NFTA_FLOWTABLE_HOOK is NLA_NESTED
| * fab615ac9f net/mlx5e: Fix crash caused by calling __xfrm_state_delete() twice
| * 0168ab6fbd net/mlx5e: Fix NULL deref in mlx5e_tir_builder_alloc()
| * 1c252d6465 net/mlx5: Added cond_resched() to crdump collection
| * 26fad69b34 net/mlx5: Fix error path in multi-packet WQE transmit
| * 70db858273 net: sparx5: Fix invalid timestamps
| * d6c4c08670 ieee802154: Fix build error
| * 11ab19d48a ceph: remove the incorrect Fw reference check when dirtying pages
| * 10a58555e0 mailbox: bcm2835: Fix timeout during suspend mode
| * b372b484d2 mailbox: rockchip: fix a typo in module autoloading
| * 7879ad0aa9 drm/amdgpu: Fix get each xcp macro
| * f42595fb8f scsi: pm8001: Do not overwrite PCI queue mapping
| * 6b63cda2d4 scsi: st: Fix input/output error on empty drive reset
| * 86fdd18064 jump_label: Fix static_key_slow_dec() yet again
| * 33f3e83227 jump_label: Simplify and clarify static_key_fast_inc_cpus_locked()
| * e67534bd31 static_call: Replace pointless WARN_ON() in static_call_module_notify()
| * c0abbbe8c9 static_call: Handle module init failure correctly in static_call_del_module()
* | 9cb3cefe51 ANDROID: add __pskb_copy_fclone to db845c symbol list.
* | c03ecccaad Revert "bpf: Fix helper writes to read-only maps"
* | d930352374 Merge 6.6.54 into android15-6.6-lts
|\|
| * 63a57420cf Linux 6.6.54
| * cada2646b7 Revert: "dm-verity: restart or panic on an I/O error"
| * 646749b423 spi: atmel-quadspi: Fix wrong register value written to MR
| * 4c0c5dcb54 x86/tdx: Fix "in-kernel MMIO" check
| * 440fba897c thunderbolt: Fix NULL pointer dereference in tb_port_update_credits()
| * e2ab9fd64d thunderbolt: Fix minimum allocated USB 3.x and PCIe bandwidth
| * 3dc5525d59 thunderbolt: Send uevent after asymmetric/symmetric switch
| * 6b5630297e wifi: brcmfmac: add linefeed at end of file
| * 72a3aef964 iio: magnetometer: ak8975: Fix 'Unexpected device' error
| * 18b5ee7bf7 perf/arm-cmn: Fail DTC counter allocation correctly
| * e43caacf61 usb: yurex: Fix inconsistent locking bug in yurex_read()
| * 790c630ab0 bpf: Fix use-after-free in bpf_uprobe_multi_link_attach()
| * 7390c46126 Documentation: KVM: fix warning in "make htmldocs"
| * d669e78290 i2c: isch: Add missed 'else'
| * 88dfb1dd17 i2c: aspeed: Update the stop sw state when the bus recovery occurs
| * b35a42bdaf mm/damon/vaddr: protect vma traversal in __damon_va_thre_regions() with rcu read lock
| * 6ec62dba4a module: Fix KCOV-ignored file name
| * 236eb2f95a spi: fspi: add support for imx8ulp
| * 9347605691 mm: only enforce minimum stack gap size if it's sensible
| * e1e734c1a0 lockdep: fix deadlock issue between lockdep and rcu
| * bd24f30f50 dm-verity: restart or panic on an I/O error
| * b3c10ac84c bpf: lsm: Set bpf_lsm_blob_sizes.lbs_task to 0
| * 722e9e5acc mm/filemap: optimize filemap folio adding
| * 734594d41c lib/xarray: introduce a new helper xas_get_order
| * ff3c557fa9 mm/filemap: return early if failed to allocate memory for split
| * 4d0261cea4 thunderbolt: Improve DisplayPort tunnel setup process to be more robust
| * aed38a3eaf thunderbolt: Configure asymmetric link if needed and bandwidth allows
| * 9b6933e9bd thunderbolt: Add support for asymmetric link
| * 8f053095e1 thunderbolt: Introduce tb_switch_depth()
| * e07bc5858e thunderbolt: Introduce tb_for_each_upstream_port_on_path()
| * 18dcdadc99 thunderbolt: Introduce tb_port_path_direction_downstream()
| * 5ac89bb006 thunderbolt: Change bandwidth reservations to comply USB4 v2
| * 7b85d75108 thunderbolt: Make is_gen4_link() available to the rest of the driver
| * 22081f7207 thunderbolt: Use weight constants in tb_usb3_consumed_bandwidth()
| * c014f37411 thunderbolt: Use constants for path weight and priority
| * ae2d54f5e5 thunderbolt: Create multiple DisplayPort tunnels if there are more DP IN/OUT pairs
| * 6870e5b499 thunderbolt: Expose tb_tunnel_xxx() log macros to the rest of the driver
| * 95f53ccfe6 thunderbolt: Use tb_tunnel_dbg() where possible to make logging more consistent
| * 90135c317d thunderbolt: Fix debug log when DisplayPort adapter not available for pairing
| * 159b1b4530 dt-bindings: spi: nxp-fspi: add imx8ulp support
| * eb95bd9646 dt-bindings: spi: nxp-fspi: support i.MX93 and i.MX95
| * f56a6d9c26 btrfs: fix race setting file private on concurrent lseek using same fd
| * 971d03cd45 btrfs: update comment for struct btrfs_inode::lock
| * a0cc053ba1 btrfs: reorder btrfs_inode to fill gaps
| * 0131bf19a1 btrfs: subpage: fix the bitmap dump which can cause bitmap corruption
| * 459b724c3c lib/bitmap: add bitmap_{read,write}()
| * 32e93cae4d x86/entry: Remove unwanted instrumentation in common_interrupt()
| * d5c5afdb9e x86/idtentry: Incorporate definitions/declarations of the FRED entries
| * 1d8c1add5e serial: don't use uninitialized value in uart_poll_init()
| * 88e26a196a tty: serial: kgdboc: Fix 8250_* kgdb over serial
| * 73c1928a00 pps: add an error check in parport_attach
| * 8b48ea2718 pps: remove usage of the deprecated ida_simple_xx() API
| * aafeabf276 usb: xhci: fix loss of data on Cadence xHC
| * eef5d6219a xhci: Add a quirk for writing ERST in high-low order
| * 225643310d USB: misc: yurex: fix race between read and write
| * eff6dde4c3 usb: yurex: Replace snprintf() with the safer scnprintf() variant
| * 8526ca3bc8 soc: versatile: realview: fix soc_dev leak during device remove
| * c48d5ad1c4 soc: versatile: realview: fix memory leak during device remove
| * f6bda3f118 ARM: dts: imx6ul-geam: fix fsl,pins property in tscgrp pinctrl
| * 45f690fae4 spi: fspi: involve lut_num for struct nxp_fspi_devtype_data
| * 1b8cf11b3c padata: use integer wrap around to prevent deadlock on seq_nr overflow
| * 62004f1703 cpuidle: riscv-sbi: Use scoped device node handling to fix missing of_node_put
| * 662ec52260 icmp: change the order of rate limits
| * e0be8f2d64 EDAC/igen6: Fix conversion of system address to physical memory address
| * 2a4a997adb nfs: fix memory leak in error path of nfs4_do_reclaim
| * 4d3d0869ec fs: Fix file_set_fowner LSM hook inconsistencies
| * 0eed942bc6 vfs: fix race between evice_inodes() and find_inode()&iput()
| * ca2a69fdd6 arm64: dts: rockchip: Correct the Pinebook Pro battery design capacity
| * eea02200cb arm64: dts: qcom: sa8775p: Mark APPS and PCIe SMMUs as DMA coherent
| * 4fff20cff6 arm64: dts: rockchip: Raise Pinebook Pro's panel backlight PWM frequency
| * 0e6774ec01 arm64: errata: Enable the AC03_CPU_38 workaround for ampere1a
| * 93e1215f3f arm64: esr: Define ESR_ELx_EC_* constants as UL
| * 1b4089d567 hwrng: cctrng - Add missing clk_disable_unprepare in cctrng_resume
| * 3fd8e444e8 hwrng: bcm2835 - Add missing clk_disable_unprepare in bcm2835_rng_init
| * 5ad4d0b648 hwrng: mtk - Use devm_pm_runtime_enable
| * 7cb51731f2 f2fs: fix to check atomic_file in f2fs ioctl interfaces
| * 5e0de753bf f2fs: Require FMODE_WRITE for atomic write ioctls
| * 56d8651679 f2fs: avoid potential int overflow in sanity_check_area_boundary()
| * 0c598a0217 f2fs: prevent possible int overflow in dir_block_index()
| * b18a5c8382 f2fs: fix several potential integer overflows in file offsets
| * 4adf651494 btrfs: always update fstrim_range on failure in FITRIM ioctl
| * 6a6a5751c0 btrfs: tree-checker: fix the wrong output of data backref objectid
| * 534230eeba debugobjects: Fix conditions in fill_pool()
| * c1ba1f2ca1 wifi: mt76: mt7615: check devm_kasprintf() returned value
| * eed8db8203 wifi: rtw88: 8822c: Fix reported RX band width
| * de0cb07dc2 wifi: rtw88: 8821cu: Remove VID/PID 0bda:c82c
| * 8e4b60ae8a wifi: mt76: mt7996: fix NULL pointer dereference in mt7996_mcu_sta_bfer_he
| * cf23427dd7 wifi: mt76: mt7915: check devm_kasprintf() returned value
| * 0a74a9b148 wifi: mt76: mt7921: Check devm_kasprintf() returned value
| * cb0125ec3d perf/x86/intel/pt: Fix sampling synchronization
| * 19fd2f2c5f efistub/tpm: Use ACPI reclaim memory for event log to avoid corruption
| * ca659f3804 ACPI: resource: Add another DMI match for the TongFang GMxXGxx
| * f0921ecd4d ACPI: sysfs: validate return type of _STR method
| * df6a82a6b0 drbd: Add NULL check for net_conf to prevent dereference in state validation
| * 42ac42d790 drbd: Fix atomicity violation in drbd_uuid_set_bm()
| * a3028d70a5 crypto: ccp - Properly unregister /dev/sev on sev PLATFORM_STATUS failure
| * 633bd1d6be serial: qcom-geni: fix fifo polling timeout
| * e29a1f8b74 xhci: Set quirky xHC PCI hosts to D3 _after_ stopping and freeing them.
| * f7ba350f4e tty: rp2: Fix reset with non forgiving PCIe host bridges
| * 7420c1bf7f firmware_loader: Block path traversal
| * 18ed6a3318 bus: mhi: host: pci_generic: Fix the name for the Telit FE990A
| * 3ae13d4868 bus: integrator-lm: fix OF node leak in probe()
| * 4f7908ebaf usb: dwc2: drd: fix clock gating on USB role switch
| * 19fb05d2e5 usb: cdnsp: Fix incorrect usb_request status
| * a0b4cbeb09 USB: class: CDC-ACM: fix race between get_serial and set_serial
| * 7bcd961dcb USB: misc: cypress_cy7c63: check for short transfer
| * ef08eb1605 USB: appledisplay: close race between probe and completion handler
| * 090386dbed arm64: dts: mediatek: mt8195-cherry: Mark USB 3.0 on xhci1 as disabled
| * 1e44ee6cdd usbnet: fix cyclical race on disconnect with work queue
| * d71300d07f wifi: rtw88: Fix USB/SDIO devices not transmitting beacons
| * 9ecd9d7ad7 can: esd_usb: Remove CAN_CTRLMODE_3_SAMPLES for CAN-USB/3-FD
| * ccc87864b0 scsi: mac_scsi: Disallow bus errors during PDMA send
| * 0120c7762f scsi: mac_scsi: Refactor polling loop
| * 6e8dc2050a scsi: mac_scsi: Revise printk(KERN_DEBUG ...) messages
| * 09b06c2591 scsi: ufs: qcom: Update MODE_MAX cfg_bw value
| * 568c7c4c77 scsi: sd: Fix off-by-one error in sd_read_block_characteristics()
| * facf1e49a0 ata: libata-scsi: Fix ata_msense_control() CDL page reporting
| * 6ab95e27b7 ksmbd: handle caseless file creation
| * 30fe2a885c ksmbd: allow write with FILE_APPEND_DATA
| * 3c1fd66a19 ksmbd: make __dir_empty() compatible with POSIX
| * ef83620438 fs: Create a generic is_dot_dotdot() utility
| * ae619de500 powerpc/atomic: Use YZ constraints for DS-form instructions
| * a3765b497a KEYS: prevent NULL pointer dereference in find_asymmetric_key()
| * c886061bbd drm/amd/display: Validate backlight caps are sane
| * 9ce1ee22dc drm/amd/display: Round calculated vtotal
| * 55fcbe5f60 drm/amd/display: Add HDMI DSC native YCbCr422 support
| * a53841b074 drm/amd/display: Skip Recompute DSC Params if no Stream on Link
| * 4777225ec8 KVM: Use dedicated mutex to protect kvm_usage_count to avoid deadlock
| * beef3353c6 KVM: x86: Move x2APIC ICR helper above kvm_apic_write_nodecode()
| * 7eae461dc3 KVM: x86: Enforce x2APIC's must-be-zero reserved ICR bits
| * d5d6489b92 KVM: arm64: Add memory length checks and remove inline in do_ffa_mem_xfer
| * 0188ea5fac Input: i8042 - add another board name for TUXEDO Stellaris Gen5 AMD line
| * 09d94ac8b2 Input: i8042 - add TUXEDO Stellaris 15 Slim Gen6 AMD to i8042 quirk table
| * c18dca92da Input: i8042 - add TUXEDO Stellaris 16 Gen5 AMD to i8042 quirk table
| * 2a26c3122d Input: adp5588-keys - fix check on return code
| * cd6dd564ae iommufd: Protect against overflow of ALIGN() during iova allocation
| * e48edd4762 Revert "media: tuners: fix error return code of hybrid_tuner_request_state()"
| * a4c2fbed20 soc: versatile: integrator: fix OF node leak in probe() error path
| * c3533bf2ed soc: fsl: cpm1: tsa: Fix tsa_write8()
| * 543a3c7dbd ASoC: rt5682: Return devm_of_clk_add_hw_provider to transfer the error
| * 513d60f419 Revert "soc: qcom: smd-rpm: Match rpmsg channel instead of compatible"
| * 02a370c4fc PCI: xilinx-nwl: Fix off-by-one in INTx IRQ handler
| * 3d8573abdc PCI: Use an error code with PCIe failed link retraining
| * a200897dc7 PCI: Correct error reporting with PCIe failed link retraining
| * f23785c6e7 PCI: imx6: Fix missing call to phy_power_off() in error handling
| * b91d041e07 PCI: dra7xx: Fix threaded IRQ request for "dra7xx-pcie-main" IRQ
| * 894f21117f PCI: Clear the LBMS bit after a link retrain
| * fb17695735 PCI: Revert to the original speed after PCIe failed link retraining
| * 38dee6edb7 Remove *.orig pattern from .gitignore
| * 01ad0576f0 io_uring/sqpoll: do not put cpumask on stack
| * 859f62a2f9 io_uring/sqpoll: retain test for whether the CPU is valid
| * adbb44539b xen: allow mapping ACPI data using a different physical address
| * 161fd69123 xen: move checks for e820 conflicts further up
| * 79fec62d0f Revert "net: libwx: fix alloc msix vectors failed"
| * 0851b1ec65 drm/vmwgfx: Prevent unmapping active read buffers
| * b5d38f1d4a drm/amd/display: Fix Synaptics Cascaded Panamera DSC Determination
| * 49d3a4ad57 mm: call the security_mmap_file() LSM hook in remap_file_pages()
| * 4bdf75c2ef io_uring: check for presence of task_work rather than TIF_NOTIFY_SIGNAL
| * 358124ba2c io_uring/sqpoll: do not allow pinning outside of cpuset
| * da2bb8e177 netfilter: nf_tables: use rcu chain hook list iterator from netlink dump path
| * b3f7607f20 netfilter: ctnetlink: compile ctnetlink_label_size with CONFIG_NF_CONNTRACK_EVENTS
| * 668f4df6d6 netfilter: nf_tables: Keep deleted flowtable hooks until after RCU
| * 3e8ac2743d net: stmmac: set PP_FLAG_DMA_SYNC_DEV only if XDP is enabled
| * e9e3424d6d virtio_net: Fix mismatched buf address when unmapping for small packets
| * ccd3e6ff05 bonding: Fix unnecessary warnings and logs from bond_xdp_get_xmit_slave()
| * 00a0c2d49b net: qrtr: Update packets cloning when broadcasting
| * 570f7d8c9b tcp: check skb is non-NULL in tcp_rto_delta_us()
| * 88297d3c1a net: ipv6: select DST_CACHE from IPV6_RPL_LWTUNNEL
| * d2abc37907 net: seeq: Fix use after free vulnerability in ether3 Driver Due to Race Condition
| * af4b8a704f netfilter: nf_reject_ipv6: fix nf_reject_ip6_tcphdr_put()
| * 89bab8310a net: xilinx: axienet: Fix packet counting
| * bcce13930b net: xilinx: axienet: Schedule NAPI in two steps
| * 9360d077d3 Revert "dm: requeue IO if mapping table not yet available"
| * 66e78ade97 ep93xx: clock: Fix off by one in ep93xx_div_recalc_rate()
| * ca64edd7ae vhost_vdpa: assign irq bypass producer token correctly
| * 70a180b8d8 cxl/pci: Fix to record only non-zero ranges
| * c16fa6d501 interconnect: icc-clk: Add missed num_nodes initialization
| * 257c7a3909 coresight: tmc: sg: Do not leak sg_table
| * 5060a1be93 serial: 8250: omap: Cleanup on error in request_irq
| * b8e45b9105 driver core: Fix a potential null-ptr-deref in module_add_driver()
| * fdc637d4f5 dt-bindings: iio: asahi-kasei,ak8975: drop incorrect AK09116 compatible
| * 7387270b68 iio: magnetometer: ak8975: drop incorrect AK09116 compatible
| * c5a4a27666 iio: magnetometer: ak8975: Convert enum->pointer for data in the match tables
| * 2bc96d4ea9 iio: chemical: bme680: Fix read/write ops to device by adding mutexes
| * 5d86a29db8 ABI: testing: fix admv8818 attr description
| * dd69fb026c driver core: Fix error handling in driver API device_rename()
| * 0f115888ea iio: adc: ad7606: fix standby gpio state to match the documentation
| * 4861770740 iio: adc: ad7606: fix oversampling gpio array
| * 30b9bf4b41 nvme-multipath: system fails to create generic nvme device
| * ecb8a79d21 spi: atmel-quadspi: Avoid overwriting delay register settings
| * 54fd87259c lib/sbitmap: define swap_lock as raw_spinlock_t
| * 93773e4461 spi: spi-fsl-lpspi: Undo runtime PM changes at driver exit time
| * 2016d58567 spi: atmel-quadspi: Undo runtime PM changes at driver exit time
| * 649ec8b30d f2fs: fix to don't set SB_RDONLY in f2fs_handle_critical_error()
| * f9ce2f550d f2fs: get rid of online repaire on corrupted directory
| * 66b1b8254d f2fs: clean up w/ dotdot_name
| * 364afd8aa8 f2fs: prevent atomic file from being dirtied before commit
| * b6f186bd6a f2fs: compress: don't redirty sparse cluster during {,de}compress
| * 4263b3ef81 f2fs: compress: do sanity check on cluster when CONFIG_F2FS_CHECK_FS is on
| * fc18e655b6 f2fs: fix to avoid use-after-free in f2fs_stop_gc_thread()
| * f2971778b2 f2fs: support .shutdown in f2fs_sops
| * 783b6ca342 f2fs: atomic: fix to truncate pagecache before on-disk metadata truncation
| * 1bb0686a2e f2fs: fix to wait page writeback before setting gcing flag
| * 87f9d26fcc f2fs: Create COW inode from parent dentry for atomic write
| * 67c3c4638f f2fs: fix to avoid racing in between read and OPU dio write
| * 6c59f87e1e f2fs: reduce expensive checkpoint trigger frequency
| * d889928bbc f2fs: atomic: fix to avoid racing w/ GC
| * 8edf3a4038 crypto: powerpc/p10-aes-gcm - Disable CRYPTO_AES_GCM_P10
| * 21b4fa3bff crypto: caam - Pad SG length when allocating hash edesc
| * 318f70857c nfsd: return -EINVAL when namelen is 0
| * a1afbbb527 nfsd: call cache_put if xdr_reserve_space returns NULL
| * b743922b5a ntb: Force physically contiguous allocation of rx ring buffers
| * fd8932cf6b ntb_perf: Fix printk format
| * 16e5bed6c1 ntb: intel: Fix the NULL vs IS_ERR() bug for debugfs_create_dir()
| * b15dd2aa79 RDMA/irdma: fix error message in irdma_modify_qp_roce()
| * 0d50ae281a RDMA/cxgb4: Added NULL check for lookup_atid
| * 21ada6915c riscv: Fix fp alignment bug in perf_callchain_user()
| * 6eff336b10 RDMA/mlx5: Obtain upper net device only when needed
| * e8721e9ba1 RDMA/hns: Fix restricted __le16 degrades to integer issue
| * b3b7ff0767 RDMA/hns: Optimize hem allocation performance
| * 288ecfd3e8 RDMA/hns: Fix 1bit-ECC recovery address in non-4K OS
| * 3ab289914e RDMA/hns: Fix VF triggering PF reset in abnormal interrupt handler
| * 094a182190 RDMA/hns: Fix spin_unlock_irqrestore() called with IRQs enabled
| * 69d9566822 RDMA/hns: Fix the overflow risk of hem_list_calc_ba_range()
| * d2d9c51271 RDMA/hns: Fix Use-After-Free of rsv_qp on HIP08
| * 85e37ac139 RDMA/hns: Don't modify rq next block addr in HIP09 QPC
| * b972bade15 watchdog: imx_sc_wdt: Don't disable WDT in suspend
| * 613a8d27d1 RDMA/mlx5: Limit usage of over-sized mkeys from the MR cache
| * 7838f6c8a6 RDMA/erdma: Return QP state in erdma_query_qp
| * 95248d7497 PCI: kirin: Fix buffer overflow in kirin_pcie_parse_port()
| * d08754be99 IB/core: Fix ib_cache_setup_one error flow cleanup
| * 4c49d34f87 pinctrl: mvebu: Fix devinit_dove_pinctrl_probe function
| * a685bc3524 nfsd: fix refcount leak when file is unhashed after being found
| * 982dfdfd59 nfsd: remove unneeded EEXIST error check in nfsd_do_file_acquire
| * 6ba2624779 clk: rockchip: rk3588: Fix 32k clock name for pmu_24m_32k_100m_src_p
| * 521d101e9e clk: starfive: Use pm_runtime_resume_and_get to fix pm_runtime_get_sync() usage
| * 8758691ea8 clk: ti: dra7-atl: Fix leak of of_nodes
| * 01b9be936e RDMA/rtrs-clt: Reset cid to con_num - 1 to stay in bounds
| * effc10f00c RDMA/rtrs: Reset hb_missed_cnt after receiving other traffic from peer
| * c6b9f971b4 media: mediatek: vcodec: Fix H264 stateless decoder smatch warning
| * dbe5b73738 media: mediatek: vcodec: Fix VP8 stateless decoder smatch warning
| * 588bcce9e6 media: mediatek: vcodec: Fix H264 multi stateless decoder smatch warning
| * 08d13bcb9c clk: at91: sama7g5: Allocate only the needed amount of memory for PLLs
| * b6edb3fd96 pinctrl: single: fix missing error code in pcs_probe()
| * 8b7df76356 RDMA/iwcm: Fix WARNING:at_kernel/workqueue.c:#check_flush_dependency
| * 451249bb8d media: platform: rzg2l-cru: rzg2l-csi2: Add missing MODULE_DEVICE_TABLE
| * 4f201a94ac PCI: xilinx-nwl: Clean up clock on probe failure/removal
| * f1058b0780 PCI: xilinx-nwl: Fix register misspelling
| * 18a672c62d nvdimm: Fix devs leaks in scan_labels()
| * e39cc0c37d x86/PCI: Check pcie_find_root_port() return for NULL
| * 597c72f4d1 leds: pca995x: Fix device child node usage in pca995x_probe()
| * d14451d91a leds: pca995x: Use device_for_each_child_node() to access device child nodes
| * dbba3fce3e leds: leds-pca995x: Add support for NXP PCA9956B
| * 583314ebaa clk: qcom: dispcc-sm8250: use special function for Lucid 5LPE PLL
| * 4ddb580089 clk: qcom: ipq5332: Register gcc_qdss_tsctr_clk_src
| * e85ab50788 PCI: keystone: Fix if-statement expression in ks_pcie_quirk()
| * 8e152448d0 firewire: core: correct range of block for case of switch statement
| * 390de4d01b PCI: Wait for Link before restoring Downstream Buses
| * 58f31be7df drivers: media: dvb-frontends/rtl2830: fix an out-of-bounds write error
| * 527ab3eb3b drivers: media: dvb-frontends/rtl2832: fix an out-of-bounds write error
| * 075a0ce1fa Input: ilitek_ts_i2c - add report id message validation
| * 831886bf1a Input: ilitek_ts_i2c - avoid wrong input subsystem sync
| * a3552e2f7d pinctrl: ti: ti-iodelay: Fix some error handling paths
| * 85427d5109 pinctrl: ti: iodelay: Use scope based of_node_put() cleanups
| * ccc7cdf496 pinctrl: Use device_get_match_data()
| * a12e8a9290 pinctrl: ti: ti-iodelay: Convert to platform remove callback returning void
| * bbf297b4cd leds: bd2606mvv: Fix device child node usage in bd2606mvv_probe()
| * 676bf8fcf3 clk: qcom: dispcc-sm8550: use rcg2_shared_ops for ESC RCGs
| * ffb0ae195b clk: qcom: dispcc-sm8650: Update the GDSC flags
| * 65a25e42a4 clk: qcom: dispcc-sm8550: use rcg2_ops for mdss_dptx1_aux_clk_src
| * 59938d4f05 clk: qcom: dispcc-sm8550: fix several supposed typos
| * 77c859e8b8 clk: rockchip: Set parent rate for DCLK_VOP clock on RK3228
| * d271e66f74 remoteproc: imx_rproc: Initialize workqueue earlier
| * 2941577c76 remoteproc: imx_rproc: Correct ddr alias for i.MX8M
| * af70d9395d clk: imx: imx8qxp: Parent should be initialized earlier than the clock
| * d64513b2da clk: imx: imx8qxp: Register dc0_bypass0_clk before disp clk
| * 5b44298953 clk: imx: imx8mp: fix clock tree update of TF-A managed clocks
| * 908165b5d3 clk: imx: fracn-gppll: fix fractional part of PLL getting lost
| * ed323659a0 clk: imx: composite-7ulp: Check the PCC present bit
| * c1eb71fd98 clk: imx: composite-93: keep root clock on when mcore enabled
| * 73034d130b clk: imx: composite-8m: Enable gate clk with mcore_booted
| * 554c590d22 clk: imx: composite-8m: Less function calls in __imx8m_clk_hw_composite() after error detection
| * c2ee6de22d clk: imx: imx6ul: fix default parent for enet*_ref_sel
| * bd553be1cf clk: imx: clk-audiomix: Correct parent clock for earc_phy and audpll
| * 3ba5a2e91c perf time-utils: Fix 32-bit nsec parsing
| * 022f9328ef perf sched timehist: Fixed timestamp error when unable to confirm event sched_in time
| * fa0720b32a perf stat: Display iostat headers correctly
| * 505ec05002 perf sched timehist: Fix missing free of session in perf_sched__timehist()
| * 88c4b5dd21 perf report: Fix --total-cycles --stdio output error
| * 297871cb51 perf ui/browser/annotate: Use global annotation_options
| * 4c857dcf34 perf annotate: Move some source code related fields from 'struct annotation' to 'struct annotated_source'
| * 4ef032d899 perf annotate: Split branch stack cycles info from 'struct annotation'
| * ba18185bea perf inject: Fix leader sampling inserting additional samples
| * 1490a5dbd5 perf mem: Free the allocated sort string, fixing a leak
| * a634fa8e48 bpf: Zero former ARG_PTR_TO_{LONG,INT} args in case of error
| * abf7559b4f bpf: Improve check_raw_mode_ok test for MEM_UNINIT-tagged types
| * a2c8dc7e21 bpf: Fix helper writes to read-only maps
| * 81197a9b45 bpf: Fix bpf_strtol and bpf_strtoul helpers for 32bit
| * 257f9e5185 nilfs2: fix potential oob read in nilfs_btree_check_delete()
| * 0f28b3b51f nilfs2: determine empty node blocks as corrupted
| * 21839b6fbc nilfs2: fix potential null-ptr-deref in nilfs_btree_insert()
| * 66f3fc7411 sched/numa: Fix the vma scan starving issue
| * e3a2d3f6c4 sched/numa: Complete scanning of inactive VMAs when there is no alternative
| * cb7846df6b sched/numa: Complete scanning of partial VMAs regardless of PID activity
| * 7f01977665 sched/numa: Move up the access pid reset logic
| * 6654e54ae7 sched/numa: Trace decisions related to skipping VMAs
| * 707e9a6c88 sched/numa: Rename vma_numab_state::access_pids[] => ::pids_active[], ::next_pid_reset => ::pids_active_reset
| * ba4eb7f258 sched/numa: Document vma_numab_state fields
| * faeff8b1ee ext4: check stripe size compatibility on remount as well
| * 2a6579ef5f ext4: avoid OOB when system.data xattr changes underneath the filesystem
| * dd3f90e8c4 ext4: return error on ext4_find_inline_entry
| * 9f70768554 ext4: avoid negative min_clusters in find_group_orlov()
| * fae0793abd ext4: avoid potential buffer_head leak in __ext4_new_inode()
| * 7a349feead ext4: avoid buffer_head leak in ext4_mark_inode_used()
| * 72eef5226f smackfs: Use rcu_assign_pointer() to ensure safe assignment in smk_set_cipso
| * e4006410b0 ext4: clear EXT4_GROUP_INFO_WAS_TRIMMED_BIT even mount with discard
| * cfd257f5e8 kthread: fix task state in kthread worker if being frozen
| * b7d6e724e4 xz: cleanup CRC32 edits from 2018
| * 2288b54b96 bpf: correctly handle malformed BPF_CORE_TYPE_ID_LOCAL relos
| * fc2b89707e samples/bpf: Fix compilation errors with cf-protection option
| * 33ef0b25b0 selftests/bpf: Fix error compiling tc_redirect.c with musl libc
| * 8553067f1c selftests/bpf: Fix compile if backtrace support missing in libc
| * 7824530b80 selftests/bpf: Fix redefinition errors compiling lwt_reroute.c
| * a7d322fd3b selftests/bpf: Fix flaky selftest lwt_redirect/lwt_reroute
| * fb99b106ad selftests/bpf: Fix C++ compile error from missing _Bool type
| * 99c0386959 selftests/bpf: Fix error compiling test_lru_map.c
| * 564d1abf50 selftests/bpf: Fix arg parsing in veristat, test_progs
| * d57f8de839 selftests/bpf: Fix errors compiling cg_storage_multi.h with musl libc
| * 96416a7e48 selftests/bpf: Fix errors compiling decap_sanity.c with musl libc
| * 0bc023e2f6 selftests/bpf: Fix errors compiling lwt_redirect.c with musl libc
| * 397192f814 selftests/bpf: Fix compiling core_reloc.c with musl-libc
| * 227b50fe66 selftests/bpf: Fix compiling tcp_rtt.c with musl-libc
| * fe81b3df3c selftests/bpf: Fix compiling flow_dissector.c with musl-libc
| * 7d8d584045 selftests/bpf: Fix compiling kfree_skb.c with musl-libc
| * 425d4934e4 selftests/bpf: Fix compiling parse_tcp_hdr_opt.c with musl-libc
| * 52f5ed9461 selftests/bpf: Fix include of <sys/fcntl.h>
| * 4730b07ef7 selftests/bpf: Add a cgroup prog bpf_get_ns_current_pid_tgid() test
| * 17536f3b72 selftests/bpf: Refactor out some functions in ns_current_pid_tgid test
| * d6e16c33e0 selftests/bpf: Replace CHECK with ASSERT_* in ns_current_pid_tgid test
| * bedda119ba selftests/bpf: Fix missing BUILD_BUG_ON() declaration
| * 4bff8cc537 selftests/bpf: Fix missing UINT_MAX definitions in benchmarks
| * 2388d18166 selftests/bpf: Fix missing ARRAY_SIZE() definition in bench.c
| * 103c0431c7 selftests/bpf: Drop unneeded error.h includes
| * c8c590f07a selftests/bpf: Implement get_hw_ring_size function to retrieve current and max interface size
| * 7c877bad03 selftests/bpf: Fix error compiling bpf_iter_setsockopt.c with musl libc
| * db5cde7b43 selftests/bpf: Fix compile error from rlim_t in sk_storage_map.c
| * 7572c32f8e selftests/bpf: Use pid_t consistently in test_progs.c
| * b0b99c1226 tools/runqslower: Fix LDFLAGS and add LDLIBS support
| * cd1b7f772f selftests/bpf: Fix wrong binary in Makefile log output
| * 97e4a3ba9d selftests/bpf: Add CFLAGS per source file and runner
| * 5d99839bfe bpf: Temporarily define BPF_NO_PRESEVE_ACCESS_INDEX for GCC
| * 01aa0d2861 bpf: Disable some `attribute ignored' warnings in GCC
| * 5de3bd34dd bpf: Use -Wno-error in certain tests when building with GCC
| * b6529a310d selftests/bpf: Fix error linking uprobe_multi on mips
| * e7d263b294 selftests/bpf: Workaround strict bpf_lsm return value check.
| * 5a4f8de92d sched/fair: Make SCHED_IDLE entity be preempted in strict hierarchy
| * 82478cb8a2 tpm: Clean up TPM space after command failure
| * 9c21cdae4b xen/swiotlb: fix allocated size
| * d1691e9778 xen/swiotlb: add alignment check for dma buffers
| * ac8ec1268e xen: tolerate ACPI NVS memory overlapping with Xen allocated memory
| * 149fbd6aec xen: add capability to remap non-RAM pages to different PFNs
| * f12153eece xen: move max_pfn in xen_memory_setup() out of function scope
| * 242d0c3c40 xen: introduce generic helper checking for memory map conflicts
| * 35a10211de minmax: avoid overly complex min()/max() macro arguments in xen
| * 27f113dc12 ata: libata: Clear DID_TIME_OUT for ATA PT commands with sense data
| * f7b4ba5f78 HID: wacom: Do not warn about dropped packets for first packet
| * 85572bf646 HID: wacom: Support sequence numbers smaller than 16-bit
| * cafeba3c2a xen: use correct end address of kernel for conflict checking
| * 37c40c01cf drivers:drm:exynos_drm_gsc:Fix wrong assignment in gsc_bind()
| * 614773a4e5 drm/msm: fix %s null argument error
| * 476945372b drm/msm/dsi: correct programming sequence for SM8350 / SM8450
| * 52d571a213 ipmi: docs: don't advertise deprecated sysfs entries
| * cbd26fc9ec drm/msm/a5xx: workaround early ring-buffer emptiness check
| * d9bef5ba56 drm/msm/a5xx: fix races in preemption evaluation stage
| * dfd012052b drm/msm/a5xx: properly clear preemption records on resume
| * b941514532 drm/msm/a5xx: disable preemption in submits by default
| * 7e34440a3d drm/msm: Fix incorrect file name output in adreno_request_fw()
| * a02d92e8eb powerpc/vdso: Inconditionally use CFUNC macro
| * efdf2af50b powerpc/8xx: Fix kernel vs user address comparison
| * 6b7a006ab0 powerpc/8xx: Fix initial memory mapping
| * 415a2c2183 drm/mediatek: Use spin_lock_irqsave() for CRTC event lock
| * 5b9b8cd289 drm/mediatek: Fix missing configuration flags in mtk_crtc_ddp_config()
| * c1ba4b8ca7 jfs: fix out-of-bounds in dbNextAG() and diAlloc()
| * baeb8628ab scsi: elx: libefc: Fix potential use after free in efc_nport_vport_del()
| * 9263023a0b drm/vc4: hdmi: Handle error case of pm_runtime_resume_and_get
| * 087b880880 drm/bridge: lontium-lt8912b: Validate mode in drm_bridge_funcs::mode_valid()
| * fa94d60546 drm/radeon/evergreen_cs: fix int overflow errors in cs track offsets
| * 656803ab1a drm/rockchip: dw_hdmi: Fix reading EDID when using a forced mode
| * 9ec05e0b4a drm/rockchip: vop: Allow 4096px width scaling
| * 8e7760ed23 drm/amd/amdgpu: Properly tune the size of struct
| * 53c18f7baf scsi: NCR5380: Check for phase match during PDMA fixup
| * 464fd60a16 scsi: smartpqi: revert propagate-the-multipath-failure-to-SML-quickly
| * de67850b40 drm/radeon: properly handle vbios fake edid sizing
| * 78b9e10b3b drm/amdgpu: properly handle vbios fake edid sizing
| * ddf9ff244d drm/amd/display: Add null check for set_output_gamma in dcn30_set_output_transfer_func
| * fc8b0b8dbd drm/stm: ltdc: check memory returned by devm_kzalloc()
| * 6e513c2e94 drm/stm: Fix an error handling path in stm_drm_platform_probe()
| * 8e6f4aa43b pmdomain: core: Harden inter-column space in debug summary
| * c390a26db3 iommu/arm-smmu-qcom: apply num_context_bank fixes for SDM630 / SDM660
| * 7acaef4f28 iommu/arm-smmu-qcom: Work around SDM845 Adreno SMMU w/ 16K pages
| * 324e1ec463 iommu/arm-smmu-qcom: hide last LPASS SMMU context bank from linux
| * 0f0222d5ab mtd: rawnand: mtk: Fix init error path
| * e502a0db34 mtd: rawnand: mtk: Factorize out the logic cleaning mtk chips
| * ca63b1cbcd mtd: rawnand: mtk: Use for_each_child_of_node_scoped()
| * 9b52ee18f6 rcu/nocb: Fix RT throttling hrtimer armed from offline CPU
| * 4e31e50420 mtd: powernv: Add check devm_kasprintf() returned value
| * e109a01f3d iommu/amd: Do not set the D bit on AMD v2 table entries
| * 9b97d6b08b fbdev: hpfb: Fix an error handling path in hpfb_dio_probe()
| * 508a550eec power: supply: max17042_battery: Fix SOC threshold calc w/ no current sense
| * 05dba1274e power: supply: axp20x_battery: Remove design from min and max voltage
| * cbb2313e76 hwmon: (ntc_thermistor) fix module autoloading
| * 590960a5b3 mtd: slram: insert break after errors in parsing the map
| * 0a27e17475 hwmon: (max16065) Fix alarm attributes
| * fc702f5c3d hwmon: (max16065) Remove use of i2c_match_id()
| * 0c7af15f64 hwmon: (max16065) Fix overflows seen when writing limits
| * f606b9ac4a ASoC: loongson: fix error release
| * 886ea81de4 m68k: Fix kernel_clone_args.flags in m68k_clone()
| * cc08ac5f42 ALSA: hda: cs35l41: fix module autoloading
| * c239cfa322 selftests/ftrace: Add required dependency for kprobe tests
| * 7000e5f31c ASoC: tas2781-i2c: Get the right GPIO line
| * 92b53ece5d ASoC: tas2781-i2c: Drop weird GPIO code
| * ac7976b672 ASoC: tas2781: Use of_property_read_reg()
| * c0f6521806 ASoC: tas2781: remove unused acpi_subysystem_id
| * 06a95f7184 ASoC: rt5682s: Return devm_of_clk_add_hw_provider to transfer the error
| * 17c72808db x86/mm: Use IPIs to synchronize LAM enablement
| * ecd4adebb8 arm64: dts: mediatek: mt8195: Correct clock order for dp_intf*
| * 27106b0a29 clocksource/drivers/qcom: Add missing iounmap() on errors in msm_dt_timer_init()
| * ee7e02e780 reset: k210: fix OF node leak in probe() error path
| * cfbf049d16 reset: berlin: fix OF node leak in probe() error path
| * b2cce50abd ARM: versatile: fix OF node leak in CPUs prepare
| * 01f986dc64 ARM: dts: imx7d-zii-rmu2: fix Ethernet PHY pinctrl property
| * 58bd96e5ec ARM: dts: microchip: sama7g5: Fix RTT clock
| * e91e803da1 spi: bcmbca-hsspi: Fix missing pm_runtime_disable()
| * 7c84cb5a39 arm64: dts: ti: k3-j721e-beagleboneai64: Fix reversed C6x carveout locations
| * ff8444011f arm64: dts: ti: k3-j721e-sk: Fix reversed C6x carveout locations
| * 6d91b3f570 arm64: dts: rockchip: Correct vendor prefix for Hardkernel ODROID-M1
| * c742692fad ARM: dts: microchip: sam9x60: Fix rtc/rtt clocks
| * 514265b1f1 arm64: dts: renesas: r9a07g044: Correct GICD and GICR sizes
| * c2bae2675c arm64: dts: renesas: r9a07g054: Correct GICD and GICR sizes
| * 7d0be36223 arm64: dts: renesas: r9a07g043u: Correct GICD and GICR sizes
| * 1ccd886abf regulator: Return actual error in of_regulator_bulk_get_all()
| * 3bf127bc26 spi: ppc4xx: Avoid returning 0 when failed to parse and map IRQ
| * 6699567b0b firmware: arm_scmi: Fix double free in OPTEE transport
| * bd7fa63736 arm64: dts: mediatek: mt8186: Fix supported-hw mask for GPU OPPs
| * 8d81cd1a04 arm64: dts: exynos: exynos7885-jackpotlte: Correct RAM amount to 4GB
| * 1b08f7b5f5 spi: ppc4xx: handle irq_of_parse_and_map() errors
| * 80f5bfbb80 block: fix potential invalid pointer dereference in blk_add_partition
| * 0d7ddfc892 block: print symbolic error name instead of error code
| * 5740c0fa93 io_uring/io-wq: inherit cpuset of cgroup in io worker
| * 7b3a35584d io_uring/io-wq: do not allow pinning outside of cpuset
| * c3eba0a4e9 block, bfq: fix procress reference leakage for bfqq in merge chain
| * 0780451f03 block, bfq: fix uaf for accessing waker_bfqq after splitting
| * 0c9b52bfee erofs: fix incorrect symlink detection in fast symlink
| * 81b048b948 cachefiles: Fix non-taking of sb_writers around set/removexattr
| * 19f3bec2ac block, bfq: don't break merge chain in bfq_split_bfqq()
| * e50c9a3526 block, bfq: choose the last bfqq from merge chain in bfq_setup_cooperator()
| * 7faed2896d block, bfq: fix possible UAF for bfqq->bic with merge chain
| * 6e73b946a3 nbd: fix race between timeout and normal completion
| * 75a5e5909b ublk: move zone report data out of request pdu
| * 0ceb2f2b5c ipv6: avoid possible NULL deref in rt6_uncached_list_flush_dev()
| * 2b5e904dea net: tipc: avoid possible garbage value
| * a46add42bd net: ipv6: rpl_iptunnel: Fix memory leak in rpl_input
| * 50d062b6cc r8169: disable ALDPS per default for RTL8125
| * 1e8fc4ffa9 net: enetc: Use IRQF_NO_AUTOEN flag in request_irq()
| * 905e83c61b bareudp: Pull inner IP header on xmit.
| * 61761f08e3 bareudp: Pull inner IP header in bareudp_udp_encap_recv().
| * a4a70cba57 Bluetooth: btusb: Fix not handling ZPL/short-transfer
| * d7572187bc can: m_can: m_can_close(): stop clocks after device has been shut down
| * 7fb4f5605c can: m_can: enable NAPI before enabling interrupts
| * c3d941cc73 can: bcm: Clear bo->bcm_proc_read after remove_proc_entry().
| * 80bd490ac0 sock_map: Add a cond_resched() in sock_hash_free()
| * 7eebbdde4b Bluetooth: hci_sync: Ignore errors from HCI_OP_REMOTE_NAME_REQ_CANCEL
| * ea8d90a5b0 Bluetooth: hci_core: Fix sending MGMT_EV_CONNECT_FAILED
| * 84398204c5 wifi: wilc1000: fix potential RCU dereference issue in wilc_parse_join_bss_param
| * 058c9026ad wifi: mac80211: use two-phase skb reclamation in ieee80211_do_stop()
| * cacdc11898 wifi: cfg80211: fix two more possible UBSAN-detected off-by-one errors
| * 2780657f7f wifi: mt76: mt7996: fix uninitialized TLV data
| * 2d9f3e56b9 wifi: mt76: mt7996: ensure 4-byte alignment for beacon commands
| * 15c1d606fa wifi: mt76: mt7915: fix rx filter setting for bfee functionality
| * 9f05824b35 wifi: cfg80211: fix UBSAN noise in cfg80211_wext_siwscan()
| * 0940196c3d wifi: mt76: mt7603: fix mixed declarations and code
| * aa3e0db35a crypto: hisilicon/qm - inject error before stopping queue
| * 8b21a9b1d8 crypto: hisilicon/qm - reset device before enabling it
| * 7803e8cdaa crypto: hisilicon/hpre - mask cluster timeout error
| * 4589bb97e4 pm:cpupower: Add missing powercap_set_enabled() stub function
| * fb2d057539 x86/sgx: Fix deadlock in SGX NUMA node search
| * 6f68e1e9ad wifi: mt76: mt7996: fix EHT beamforming capability check
| * c07082fa24 wifi: mt76: mt7996: fix HE and EHT beamforming capabilities
| * 29516e5db9 wifi: mt76: mt7996: fix wmm set of station interface to 3
| * 7146e5aeff wifi: mt76: mt7996: fix traffic delay when switching back to working channel
| * 50d87e3b70 wifi: mt76: mt7996: use hweight16 to get correct tx antenna
| * 818dd118f4 wifi: mt76: mt7915: fix oops on non-dbdc mt7986
| * 4d3608ae15 cpufreq: ti-cpufreq: Introduce quirks to handle syscon fails appropriately
| * c902e515b6 perf/arm-cmn: Ensure dtm_idx is big enough
| * 5418a61e32 perf/arm-cmn: Fix CCLA register offset
| * a687d9d1fe perf/arm-cmn: Refactor node ID handling. Again.
| * a1b25661a0 perf/arm-cmn: Improve debugfs pretty-printing for large configs
| * f5c4ec8d0e perf/arm-cmn: Rework DTC counters (again)
| * 814b8bc5cc netfilter: nf_tables: remove annotation to access set timeout while holding lock
| * 9431e5eddc netfilter: nf_tables: reject expiration higher than timeout
| * 2a5e648a0c netfilter: nf_tables: reject element expiration with no timeout
| * 08b25d59ff netfilter: nf_tables: elements with timeout below CONFIG_HZ never expire
| * 8ad28208be ACPI: CPPC: Fix MASK_VAL() usage
| * fa3ef5ea3f can: j1939: use correct function name in comment
| * 37c5024e46 kselftest/arm64: Actually test SME vector length changes via sigreturn
| * 666a46a90f drivers/perf: hisi_pcie: Fix TLP headers bandwidth counting
| * 6206a0edb2 drivers/perf: hisi_pcie: Record hardware counts correctly
| * 39dd1f1f48 padata: Honor the caller's alignment in case of chunk_size 0
| * 1661f1352b wifi: iwlwifi: mvm: increase the time between ranging measurements
| * 2c4a7b5014 wifi: iwlwifi: config: label 'gl' devices as discrete
| * 305b7827cf wifi: iwlwifi: remove AX101, AX201 and AX203 support from LNL
| * d54455a3a9 wifi: mac80211: don't use rate mask for offchannel TX either
| * 3b839d4619 drivers/perf: Fix ali_drw_pmu driver interrupt status clearing
| * be158b7e6a kselftest/arm64: signal: fix/refactor SVE vector length enumeration
| * 288cbc505e powercap: intel_rapl: Fix off by one in get_rpi()
| * 9fc60f2bdd ARM: 9410/1: vfp: Use asm volatile in fmrx/fmxr macros
| * c82ea72d96 mount: handle OOM on mnt_warn_timestamp_expiry
| * 032ca566f5 RISC-V: KVM: Fix to allow hpmcounter31 from the guest
| * 3c39f253e2 RISC-V: KVM: Allow legacy PMU access from guest
| * a72a99da7a RISC-V: KVM: Fix sbiret init before forwarding to userspace
| * 07b90bbfe9 wifi: rtw88: remove CPT execution branch never used
| * 32ba316088 arm64: signal: Fix some under-bracketed UAPI macros
| * f0525a641a net: stmmac: dwmac-loongson: Init ref and PTP clocks rate
| * 0a9445aa8e wifi: ath12k: fix invalid AMPDU factor calculation in ath12k_peer_assoc_h_he()
| * aafd6ad1d9 wifi: ath12k: match WMI BSS chan info structure with firmware definition
| * d45fe0115e wifi: ath12k: fix BSS chan info request WMI command
| * dda028a8aa wifi: ath9k: Remove error checks when creating debugfs entries
| * fb1862ce26 wifi: brcmfmac: introducing fwil query functions
| * c3cfcf51b4 wifi: brcmfmac: export firmware interface functions
| * 9349283fc6 ACPI: PMIC: Remove unneeded check in tps68470_pmic_opregion_probe()
| * e55fcc821d crypto: xor - fix template benchmarking
| * 1b8178a2ae wifi: rtw88: always wait for both firmware loading attempts
| * b3e360e00d EDAC/synopsys: Fix error injection on Zynq UltraScale+
| * 23752ababd EDAC/synopsys: Fix ECC status and IRQ control race condition
* | 5611cd3d91 Merge 6.6.53 into android15-6.6-lts
|\|
| * 4ad9fa5c30 Linux 6.6.53
| * 51297ef7ad USB: usbtmc: prevent kernel-usb-infoleak
| * 39d6923889 USB: serial: pl2303: add device id for Macrosilicon MS3020
| * 3a2532d882 can: mcp251xfd: move mcp251xfd_timestamp_start()/stop() into mcp251xfd_chip_start/stop()
| * fa45741f1e can: mcp251xfd: properly indent labels
| * 26b0a1cd9f x86/mm: Switch to new Intel CPU model defines
| * ab51a98de8 nvme-pci: qdepth 1 quirk
| * c4e9800609 gpiolib: cdev: Ignore reconfiguration without direction
| * 53dc61ae5c Revert "wifi: cfg80211: check wiphy mutex is held for wdev mutex"
| * 424bd79517 netfilter: nf_tables: missing iterator type in lookup walk
| * f24d8abc2b netfilter: nft_set_pipapo: walk over current view on netlink dump
| * 94d6fe6b6e netfilter: nft_socket: Fix a NULL vs IS_ERR() bug in nft_socket_cgroup_subtree_level()
| * f07e28e4c6 netfilter: nft_socket: make cgroupsv2 matching work with namespaces
| * ea71c39d46 powercap/intel_rapl: Add support for AMD family 1Ah
| * e615cd84dc drm: Expand max DRM device number to full MINORBITS
| * f6b589e361 accel: Use XArray instead of IDR for minors
| * d2e3d344e2 drm: Use XArray instead of IDR for minors
| * c726dea9d0 ocfs2: strict bound check before memcmp in ocfs2_xattr_find_entry()
| * 1f6e167d67 ocfs2: add bounds checking to ocfs2_xattr_find_entry()
| * 4c21bba38b spi: spidev: Add missing spi_device_id for jg10309-01
| * c20e89c96f block: Fix where bio IO priority gets set
| * 532ba43dce tools: hv: rm .*.cmd when make clean
| * f0759b0973 x86/hyperv: Set X86_FEATURE_TSC_KNOWN_FREQ when Hyper-V provides frequency
| * fabc4ed200 smb: client: fix hang in wait_for_response() for negproto
| * e79896417c spi: bcm63xx: Enable module autoloading
| * 745fe9f19d drm: komeda: Fix an issue related to normalized zpos
| * d7c126497d ALSA: hda: add HDMI codec ID for Intel PTL
| * 16fb61afff ASoC: amd: yc: Add a quirk for MSI Bravo 17 (D7VEK)
| * a9affc6dd8 spi: spidev: Add an entry for elgin,jg10309-01
| * 5a8f8d49bc ASoC: fix module autoloading
| * b3cc98bd86 ASoC: tda7419: fix module autoloading
| * 1803f06c86 ASoC: google: fix module autoloading
| * 7675ab5900 ASoC: intel: fix module autoloading
| * ec39e3104a ASoC: Intel: soc-acpi-cht: Make Lenovo Yoga Tab 3 X90F DMI match less strict
| * 740253ebb5 can: mcp251xfd: mcp251xfd_ring_init(): check TX-coalescing configuration
| * 021cd8f0e4 wifi: iwlwifi: clear trans->state earlier upon error
| * 9902dacd5b wifi: mac80211: free skb on error path in ieee80211_beacon_get_ap()
| * 4d0a900ec4 wifi: iwlwifi: mvm: don't wait for tx queues if firmware is dead
| * 2c61b561ba wifi: iwlwifi: mvm: pause TCM when the firmware is stopped
| * 8587a0ed5f wifi: iwlwifi: mvm: fix iwl_mvm_max_scan_ie_fw_cmd_room()
| * 0d07f12e1f wifi: iwlwifi: mvm: fix iwl_mvm_scan_fits() calculation
| * dfa94a93f7 wifi: iwlwifi: lower message level for FW buffer destination
| * 8a834f251f LoongArch: Define ARCH_IRQ_INIT_FLAGS as IRQ_NOPROBE
| * d44cfa992b net: ftgmac100: Ensure tx descriptor updates are visible
| * 001eaeaac7 platform/x86: x86-android-tablets: Make Lenovo Yoga Tab 3 X90F DMI match less strict
| * 1bab72a2b9 microblaze: don't treat zero reserved memory regions as error
| * 76f74a1c3d hwmon: (asus-ec-sensors) remove VRM temp X570-E GAMING
| * af08f45061 pinctrl: at91: make it work with current gpiolib
| * 013180bf23 scsi: lpfc: Fix overflow build issue
| * 49a9fe95eb ALSA: hda/realtek - FIxed ALC285 headphone no sound
| * 4a31d48c09 ALSA: hda/realtek - Fixed ALC256 headphone no sound
| * 50dcf4b7b7 ASoC: allow module autoloading for table board_ids
| * b7420317a9 ASoC: allow module autoloading for table db1200_pids
| * 0627ba9434 ASoC: mediatek: mt8188: Mark AFE_DAC_CON0 register as volatile
| * aef2673741 ASoC: SOF: mediatek: Add missing board compatible
* | 9fc5c41d7a Merge 6.6.52 into android15-6.6-lts
|\|
| * 561bbd55f9 Linux 6.6.52
| * bd9c3c2d7e riscv: dts: starfive: add assigned-clock* to limit frquency
| * e43364f578 ASoC: meson: axg-card: fix 'use-after-free'
| * 2a01f3b7b1 pinctrl: meteorlake: Add Arrow Lake-H/U ACPI ID
| * b9d510e085 cifs: Fix signature miscalculation
| * 6ec7cbc7f5 ASoC: codecs: avoid possible garbage value in peb2466_reg_read()
| * 86238603c8 drm/i915/guc: prevent a possible int overflow in wq offsets
| * f9e08c2017 spi: geni-qcom: Fix incorrect free_irq() sequence
| * 64cdc5d114 spi: geni-qcom: Undo runtime PM changes at driver exit time
| * ff65ae25d3 drm/amd/amdgpu: apply command submission parser for JPEG v1
| * 5426846839 drm/amdgpu/atomfirmware: Silence UBSAN warning
| * def80cdb26 drm/nouveau/fb: restore init() for ramgp102
| * eb7fc8b65c dma-buf: heaps: Fix off-by-one in CMA heap fault handler
| * 8e1ffb2579 drm/syncobj: Fix syncobj leak in drm_syncobj_eventfd_ioctl
| * 28425a10a4 soundwire: stream: Revert "soundwire: stream: fix programming slave ports for non-continous port maps"
| * af9ca9ca3e spi: nxp-fspi: fix the KASAN report out-of-bounds bug
| * a8632ef4fc tracing/osnoise: Fix build when timerlat is not enabled
| * 34fcac2621 net: dpaa: Pad packets to ETH_ZLEN
| * fc8c0cec1b net: dsa: felix: ignore pending status of TAS module when it's disabled
| * 83e6fb5904 netfilter: nft_socket: fix sk refcount leaks
| * 033a71efab selftests: net: csum: Fix checksums for packets with non-zero padding
| * 38859fb5bd net: ftgmac100: Enable TX interrupt to avoid TX timeout
| * 5bfbf2c18c octeontx2-af: Modify SMQ flush sequence to drop packets
| * 7ae890ee19 fou: fix initialization of grc
| * 65feee671e net/mlx5: Fix bridge mode operations when there are no VFs
| * 4bb9745cc3 net/mlx5: Verify support for scheduling element and TSAR type
| * 9f806d0959 net/mlx5: Correct TASR typo into TSAR
| * fa2e98068d net/mlx5: Add missing masks and QoS bit masks for scheduling elements
| * f015f63cc9 net/mlx5: Explicitly set scheduling element and TSAR type
| * f7e7dbdfc0 net/mlx5e: Add missing link mode to ptys2ext_ethtool_map
| * 4ce59074d5 IB/mlx5: Rename 400G_8X speed to comply to naming convention
| * 93fd5e028b net/mlx5e: Add missing link modes to ptys2ethtool_map
| * 02518dc443 net/mlx5: Update the list of the PCI supported devices
| * e8db32a902 igb: Always call igb_xdp_ring_update_tail() under Tx lock
| * cbaed60c69 ice: fix VSI lists confusion when adding VLANs
| * 01a786ada1 ice: fix accounting for filters shared by multiple VSIs
| * d21559e203 ice: Fix lldp packets dropping after changing the number of channels
| * 1bc085e997 hwmon: (pmbus) Conditionally clear individual status bits for pmbus rev >= 1.2
| * 14f6a11ea2 selftests/bpf: Support SOCK_STREAM in unix_inet_redir_to_connected()
| * 8295194a50 cxl/core: Fix incorrect vendor debug UUID define
| * cb735cf79a eeprom: digsy_mtc: Fix 93xx46 driver probe failure
| * 7853c146f8 drm/amd/display: Fix FEC_READY write on DP LT
| * 27bbf0b1ca drm/amd/display: Disable error correction if it's not supported
| * d72432755b arm64: dts: rockchip: fix PMIC interrupt pin in pinctrl for ROCK Pi E
| * b1e1daf012 net: xilinx: axienet: Fix race in axienet_stop
| * a95a24fcae mm: avoid leaving partial pfn mappings around in error case
| * 2ae1beb3ab x86/hyperv: fix kexec crash due to VP assist page corruption
| * 9b27991f3f dm-integrity: fix a race condition when accessing recalc_sector
| * 4ec0d8dbd7 net: tighten bad gso csum offset check in virtio_net_hdr
| * 1705209b3e minmax: reduce min/max macro expansion in atomisp driver
| * 3844bc360e arm64: dts: rockchip: override BIOS_DISABLE signal via GPIO hog on RK3399 Puma
| * d52643ced1 arm64: dts: rockchip: fix eMMC/SPI corruption when audio has been used on RK3399 Puma
| * 7e2e638c59 selftests: mptcp: join: restrict fullmesh endp on 1st sf
| * 6452b16254 mptcp: pm: Fix uaf in __timer_delete_sync
| * c54fc405a0 platform/x86: panasonic-laptop: Allocate 1 entry extra in the sinf array
| * 6821a82616 platform/x86: panasonic-laptop: Fix SINF array out of bounds accesses
| * d07216aa30 NFS: Avoid unnecessary rescanning of the per-server delegation list
| * d8a7055ffd NFSv4: Fix clearing of layout segments in layoutreturn
| * 75e6572ccb smb/server: fix return value of smb2_open()
| * 91043a573c Input: i8042 - add Fujitsu Lifebook E756 to i8042 quirk table
| * 57ac3b43fb drm/msm/adreno: Fix error return if missing firmware-name
| * c5331c6342 platform/surface: aggregator_registry: Add support for Surface Laptop Go 3
| * 64f8ed257c platform/surface: aggregator_registry: Add Support for Surface Pro 10
| * 86a1aaee7f scripts: kconfig: merge_config: config files: add a trailing newline
| * 1d5c7d0a49 HID: multitouch: Add support for GT7868Q
| * 11eb4a8228 Input: synaptics - enable SMBus for HP Elitebook 840 G2
| * 3acb2392df Input: ads7846 - ratelimit the spi_sync error message
| * 5ee7efa629 btrfs: update target inode's ctime on unlink
| * ab8f0c4986 net: hns3: use correct release function during uninitialization
| * 4bfee9346d wifi: mt76: mt7921: fix NULL pointer access in mt7921_ipv6_addr_change
| * 4a9a1edd9c powerpc/mm: Fix boot warning with hugepages and CONFIG_DEBUG_VIRTUAL
| * af252750bf net: phy: vitesse: repair vsc73xx autonegotiation
| * 31b9fc3d0c drm: panel-orientation-quirks: Add quirk for Ayn Loki Max
| * 7d42d19973 drm: panel-orientation-quirks: Add quirk for Ayn Loki Zero
| * 9569e1fd06 net: ethernet: use ip_hdrlen() instead of bit shift
| * 28123a54f8 usbnet: ipheth: fix carrier detection in modes 1 and 4
| * 4d1cfa3afb usbnet: ipheth: do not stop RX on failing RX callback
| * 9c8c230e2e usbnet: ipheth: drop RX URBs with no payload
| * c2fb33a7fe usbnet: ipheth: remove extraneous rx URB length check
| * 78bce66914 ksmbd: override fsids for smb2_query_info()
| * 5a199eedfd ksmbd: override fsids for share path check
| * 2278629c3e nvmem: u-boot-env: error if NVMEM device is too small
| * 368fa77b79 nvmem: u-boot-env: improve coding style
| * 2eea394c31 nvmem: u-boot-env: use nvmem device helpers
| * ae91c9c7b6 nvmem: u-boot-env: use nvmem_add_one_cell() nvmem subsystem helper
| * 820b1b981a nvmem: core: add nvmem_dev_size() helper
| * f7dc14df1b iio: adc: ad7124: fix DT configuration parsing
| * fbed740058 iio: adc: ad7124: Switch from of specific to fwnode based property handling
| * bfc8dab8c7 device property: Introduce device_for_each_child_node_scoped()
| * fce8373d31 device property: Add cleanup.h based fwnode_handle_put() scope based cleanup.
* | eb7cf642ed Merge branch 'android15-6.6' into android15-6.6-lts
* | 886869d11e Merge branch 'android15-6.6' into android15-6.6-lts
* | e7a4f5e3ae Revert "perf/aux: Fix AUX buffer serialization"
* | d16ed636ab Revert "clocksource/drivers/timer-of: Remove percpu irq related code"
* | 18bea82acf Merge 6.6.51 into android15-6.6-lts
|/
* 6d1dc55b5b Linux 6.6.51
* 611e428111 Bluetooth: hci_sync: Fix UAF on hci_abort_conn_sync
* 4d6cf010d8 Bluetooth: hci_sync: Fix UAF on create_le_conn_complete
* 78155f30be Bluetooth: hci_sync: Fix UAF in hci_acl_create_conn_sync
* 50b6744c12 spi: spi-fsl-lpspi: Fix off-by-one in prescale max
* 7b5595f33c btrfs: fix race between direct IO write and fsync when using same fd
* 8eeda5fb59 x86/mm: Fix PTI for i386 some more
* a2977c0ca3 membarrier: riscv: Add full memory barrier in switch_mm()
* 136a29d811 ublk_drv: fix NULL pointer dereference in ublk_ctrl_start_recovery()
* bd29d84520 riscv: Do not restrict memory size because of linear mapping on nommu
* 8289dc916e riscv: Fix toolchain vector detection
* b27ea9c96e smb: client: fix double put of @cfile in smb2_rename_path()
* 52b688c808 gpio: modepin: Enable module autoloading
* 9ceae54e65 gpio: rockchip: fix OF node leak in probe()
* 60d54a45db drm/i915/fence: Mark debug_fence_free() with __maybe_unused
* a65ebba873 drm/i915/fence: Mark debug_fence_init_onstack() with __maybe_unused
* 7c391eaf2c clk: qcom: gcc-sm8550: Don't park the USB RCG at registration time
* a5e871d26b clk: qcom: gcc-sm8550: Don't use parking clk_ops for QUPs
* b9bb963436 ASoC: sunxi: sun4i-i2s: fix LRCLK polarity in i2s mode
* f39bde3f78 ASoc: SOF: topology: Clear SOF link platform name upon unload
* 05500a48d8 nvme-pci: allocate tagset on reset if necessary
* 489f2913a6 nvmet-tcp: fix kernel crash if commands allocation fails
* 585c598082 ASoC: tegra: Fix CBB error during probe()
* af4d5630d9 powerpc/vdso: Don't discard rela sections
* 547acc20e5 powerpc/64e: Define mmu_pte_psize static
* 8ea58996f5 powerpc/64e: split out nohash Book3E 64-bit code
* 8ebe3bb368 powerpc/64e: remove unused IBM HTW code
* eaccebe663 clk: qcom: ipq9574: Update the alpha PLL type for GPLLs
* 37b65ea6c7 crypto: starfive - Fix nent assignment in rsa dec
* 02b3f88609 crypto: starfive - Align rsa input data to 32-bit
* 872f86e175 ata: libata-scsi: Check ATA_QCFLAG_RTF_FILLED before using result_tf
* c8d4acb325 ata: libata-scsi: Remove redundant sense_buffer memsets
* 302ba299c3 drm/amdgpu: handle gfx12 in amdgpu_display_verify_sizes
* 5f2a2bf253 drm/amd: Add gfx12 swizzle mode defs
* 5ea24ddc26 can: mcp251xfd: rx: add workaround for erratum DS80000789E 6 of mcp2518fd
* 6cdc3fc4fb can: mcp251xfd: clarify the meaning of timestamp
* bf501ab4cb can: mcp251xfd: rx: prepare to workaround broken RX FIFO head index erratum
* 2370061f07 can: mcp251xfd: mcp251xfd_handle_rxif_ring_uinc(): factor out in separate function
* 62ca6d3a90 arm64: acpi: Harden get_cpu_for_acpi_id() against missing CPU entry
* acf9ef8d1b arm64: acpi: Move get_cpu_for_acpi_id() to a header
* 47c310fbaa ACPI: processor: Fix memory leaks in error paths of processor_add()
* 6bf77014db ACPI: processor: Return an error if acpi_processor_get_info() fails in processor_add()
* 241bce1c75 workqueue: Improve scalability of workqueue watchdog touch
* 5ff0a44141 workqueue: wq_watchdog_touch is always called with valid CPU
* 0eceaa9d05 Revert "mm: skip CMA pages when they are not available"
* 9a9974713d mm/vmscan: use folio_migratetype() instead of get_pageblock_migratetype()
* c4b69bee3f perf/aux: Fix AUX buffer serialization
* 9faed52b98 uprobes: Use kzalloc to allocate xol area
* 7eeb7189c4 clocksource/drivers/timer-of: Remove percpu irq related code
* 444c3927a0 clocksource/drivers/imx-tpm: Fix next event not taking effect sometime
* c4f27b17d3 clocksource/drivers/imx-tpm: Fix return -ETIME when delta exceeds INT_MAX
* 39e7e59341 VMCI: Fix use-after-free when removing resource in vmci_resource_remove()
* 6ed45748c1 Drivers: hv: vmbus: Fix rescind handling in uio_hv_generic
* de6946be9c uio_hv_generic: Fix kernel NULL pointer dereference in hv_uio_rescind
* 3d1baf322a nvmem: Fix return type of devm_nvmem_device_get() in kerneldoc
* 1f33d9f1d9 binder: fix UAF caused by offsets overwrite
* f77dc8a758 misc: fastrpc: Fix double free of 'buf' in error path
* 7d301dd272 usb: dwc3: Avoid waking up gadget during startxfer
* f224f37297 usb: cdns2: Fix controller reset issue
* 6ef746b0b6 usb: dwc3: core: update LC timer as per USB Spec V3.2
* 314125cbae iio: adc: ad7124: fix chip ID mismatch
* 66d0d59afe iio: adc: ad7606: remove frstdata check for serial mode
* fb5d58f238 iio: adc: ad7124: fix config comparison
* ecc8e1bcac iio: fix scale application in iio_convert_raw_to_processed_unlocked
* cb0f3f0c10 iio: buffer-dmaengine: fix releasing dma channel on error
* dc12e49f97 staging: iio: frequency: ad9834: Validate frequency parameter value
* 5c007a9804 intel: legacy: Partial revert of field get conversion
* 4fe707a297 tcp: process the 3rd ACK with sk_socket for TFO/MPTCP
* 3b843046db cpufreq: amd-pstate: fix the highest frequency issue which limits performance
* 1ec40a175a cpufreq: amd-pstate: Enable amd-pstate preferred core support
* 0b983c08ca ACPI: CPPC: Add helper to get the highest performance value
* e0316069fa riscv: Use accessors to page table entries instead of direct dereference
* 59c9160a7e riscv: mm: Only compile pgtable.c if MMU
* 1a8b2391e0 mm: Introduce pudp/p4dp/pgdp_get() functions
* 193b1fc1cb riscv: Use WRITE_ONCE() when setting page table entries
* 6c4a878e1c NFSv4: Add missing rescheduling points in nfs_client_return_marked_delegations
* 07f384c5be smb/server: fix potential null-ptr-deref of lease_ctx_info in smb2_open()
* b777131d03 ata: pata_macio: Use WARN instead of BUG
* ff62110ec5 spi: spi-fsl-lpspi: limit PRESCALE bit in TCR register
* 32ee052015 MIPS: cevt-r4k: Don't call get_c0_compare_int if timer irq is installed
* d942e85532 lib/generic-radix-tree.c: Fix rare race in __genradix_ptr_alloc()
* 7ead730af1 of/irq: Prevent device address out-of-bounds read in interrupt map walk
* c3af7e460a Squashfs: sanity check symbolic link size
* 6604d76253 usbnet: ipheth: race between ipheth_close and error handling
* a4858b00a1 Input: uinput - reject requests with unreasonable number of slots
* 60dc4ee042 HID: amd_sfh: free driver_data after destroying hid device
* 30e9ce7cd5 HID: cougar: fix slab-out-of-bounds Read in cougar_report_fixup
* fc9fabeee1 s390/vmlinux.lds.S: Move ro_after_init section behind rodata section
* f1eb69aa85 btrfs: initialize location to fix -Wmaybe-uninitialized in btrfs_lookup_dentry()
* 16ccaf581d spi: hisi-kunpeng: Add verification for the max_frequency provided by the firmware
* d43fde5ebf kselftests: dmabuf-heaps: Ensure the driver name is null-terminated
* 5a022269ab i3c: mipi-i3c-hci: Error out instead on BUG_ON() in IBI DMA setup
* 1f489656d5 i3c: master: svc: resend target address when get NACK
* c03185f4a2 vfs: Fix potential circular locking through setxattr() and removexattr()
* e42ea96d6d regmap: maple: work around gcc-14.1 false-positive warning
* fd8e141223 LoongArch: Use correct API to map cmdline in relocate_kernel()
* 938acd8e3a net: dpaa: avoid on-stack arrays of NR_CPUS elements
* 013dae4735 Bluetooth: btnxpuart: Fix Null pointer dereference in btnxpuart_flush()
* 9fd2973837 tcp: Don't drop SYN+ACK for simultaneous connect().
* 78c6e39fef PCI: Add missing bridge lock to pci_bus_lock()
* ce2e63804a riscv: set trap vector earlier
* 124451bbc2 cxl/region: Verify target positions using the ordered target list
* 41a0f85e26 btrfs: replace BUG_ON() with error handling at update_ref_for_cow()
* 7d1df13bf0 btrfs: clean up our handling of refs == 0 in snapshot delete
* e7469c65b3 btrfs: replace BUG_ON with ASSERT in walk_down_proc()
* 951b696db1 fs/ntfs3: Check more cases when directory is corrupted
* 6b1b0a86d9 smp: Add missing destroy_work_on_stack() call in smp_call_on_cpu()
* 6922ab2932 drm/amdgpu: reject gang submit on reserved VMIDs
* c2618dcb26 wifi: mwifiex: Do not return unused priv in mwifiex_get_priv_by_id()
* abc8b81b6f dma-mapping: benchmark: Don't starve others when doing the test
* e16c4c2451 jbd2: avoid mount failed when commit block is partial submitted
* 3236afd1a2 ext4: fix possible tid_t sequence overflows
* 077c7e5fee drm/amdgpu: Set no_hw_access when VF request full GPU fails
* 030958c2d0 libbpf: Add NULL checks to bpf_object__{prev_map,next_map}
* 4b83b207f0 ASoc: TAS2781: replace beXX_to_cpup with get_unaligned_beXX for potentially broken alignment
* 8fecb75bff hwmon: (w83627ehf) Fix underflows seen when writing limit attributes
* 2f69554408 hwmon: (nct6775-core) Fix underflows seen when writing limit attributes
* 46e4fd338d hwmon: (lm95234) Fix underflows seen when writing limit attributes
* 6891b11a0c hwmon: (adc128d818) Fix underflows seen when writing limit attributes
* 3a986d1344 crypto: qat - fix unintentional re-enabling of error interrupts
* 7b1d779647 scsi: pm80xx: Set phy->enable_completion only when we wait for it
* 2f49e05d6b scsi: ufs: core: Remove SCSI host only if added
* c83d464b82 wifi: rtw88: usb: schedule rx work after everything is set up
* c5b30148ef virtio_ring: fix KMSAN error for premapped mode
* b82d4d5c73 pci/hotplug/pnv_php: Fix hotplug driver crash on Powernv
* 72377cee3f devres: Initialize an uninitialized struct member
* ec5b47a370 um: line: always fill *error_out in setup_one_line()
* 84a6b76b28 cgroup: Protect css->cgroup write under css_set_lock
* 7cfa7abb24 iommu/vt-d: Handle volatile descriptor status read
* 8b32674283 dm init: Handle minors larger than 255
* 67786b291e ASoC: topology: Properly initialize soc_enum values
* 8bdbc44c6d phy: zynqmp: Take the phy mutex in xlate
* 441e6f5829 firmware: cs_dsp: Don't allow writes to read-only controls
* e997b357b1 xen: privcmd: Fix possible access to a freed kirqfd instance
* 2b110cce19 selftests: net: enable bind tests
* 97d6274615 net: dsa: vsc73xx: fix possible subblocks range of CAPT block
* c6c535a444 net: bridge: br_fdb_external_learn_add(): always set EXT_LEARN
* 565eb51b3d r8152: fix the firmware doesn't work
* 1df42be305 fou: Fix null-ptr-deref in GRO.
* 40531583c5 bareudp: Fix device stats updates.
* f8d6acb19f bpf, net: Fix a potential race in do_sock_getsockopt()
* 2174a3c368 net/socket: Break down __sys_getsockopt
* e88c16a4f0 net/socket: Break down __sys_setsockopt
* 09fba0162b bpf: Add sockptr support for setsockopt
* 4a746fb253 bpf: Add sockptr support for getsockopt
* 07200e313c usbnet: modern method to get random MAC
* 81e5622c05 ice: do not bring the VSI up, if it was down before the XDP setup
* 2f057db2fb ice: protect XDP configuration with a mutex
* 26928c8f00 net: phy: Fix missing of_node_put() for leds
* 217539e994 hwmon: (hp-wmi-sensors) Check if WMI event data exists
* ed60aab606 igc: Unlock on error in igc_io_resume()
* 249c88e7fb Bluetooth: MGMT: Fix not generating command complete for MGMT_OP_DISCONNECT
* d56412ee7c Bluetooth: hci_sync: Introduce hci_cmd_sync_run/hci_cmd_sync_run_once
* d948e1ffa1 Bluetooth: hci_sync: Attempt to dequeue connection attempt
* 1499f79995 Bluetooth: hci_sync: Add helper functions to manipulate cmd_sync queue
* 98f66ea456 Bluetooth: hci_conn: Fix UAF Write in __hci_acl_create_connection_sync
* e78bd85af2 Bluetooth: Remove pending ACL connection attempts
* c57edb5482 Bluetooth: hci_conn: Only do ACL connections sequentially
* 9cd7289bcc Bluetooth: hci_event: Use HCI error defines instead of magic values
* a22cbf1e08 Bluetooth: qca: If memdump doesn't work, re-enable IBS
* 503901d3c9 can: kvaser_pciefd: Use a single write when releasing RX buffers
* 6587b387cd can: kvaser_pciefd: Move reset of DMA RX buffers to the end of the ISR
* 00e4c69422 can: kvaser_pciefd: Rename board_irq to pci_irq
* 4240850736 can: kvaser_pciefd: Remove unnecessary comment
* c1fb622679 can: kvaser_pciefd: Skip redundant NULL pointer check in ISR
* c5e236744d regulator: core: Stub devm_regulator_bulk_get_const() if !CONFIG_REGULATOR
* dc2694e474 platform/x86: dell-smbios: Fix error path in dell_smbios_init()
* efe8effe13 ice: Add netif_device_attach/detach into PF reset flow
* 4dde043705 igb: Fix not clearing TimeSync interrupts for 82580
* 0a9423f99d cifs: Fix FALLOC_FL_ZERO_RANGE to preflush buffered part of target region
* b4b2115d1f rust: kbuild: fix export of bss symbols
* 4de4e53bbd rust: Use awk instead of recent xargs
* 0e52907493 can: mcp251xfd: fix ring configuration when switching from CAN-CC to CAN-FD mode
* db5aca78e2 can: m_can: Release irq on error in m_can_open
* 4377b79323 can: bcm: Remove proc entry when dev is unregistered.
* 9a41def4c4 drm/amdgpu: check for LINEAR_ALIGNED correctly in check_tiling_flags_gfx6
* dfafee0a7b drm/amd/display: Check denominator pbn_div before used
* dd48992a8a pcmcia: Use resource_size function on resource object
* b4987d0236 media: qcom: camss: Add check for v4l2_fwnode_endpoint_parse
* 6970213c7e Input: ili210x - use kvmalloc() to allocate buffer for firmware update
* 576d0fb6f8 PCI: keystone: Add workaround for Errata #i2037 (AM65x SR 1.0)
* 7b645e6870 ice: Check all ice_vsi_rebuild() errors in function
* 4ef01846c6 vfio/spapr: Always clear TCEs before unsetting the window
* 4676bacc6e media: vivid: don't set HDMI TX controls if there are no HDMI outputs
* 2521ba3cfa drm/amdgpu: clear RB_OVERFLOW bit when enabling interrupts
* 8bc7b3ce33 drm/amdgpu: Fix smatch static checker warning
* 1bd1fe1109 drm/amd/display: Check HDCP returned status
* 874e3bb302 drm/amd/display: Run DC_LOG_DC after checking link->link_enc
* b2a50ffdd1 usb: gadget: aspeed_udc: validate endpoint index for ast udc
* 4292441b87 usb: uas: set host status byte on data completion error
* d22d72e2bf wifi: brcmsmac: advertise MFP_CAPABLE to enable WPA3
* 56b7104b82 leds: spi-byte: Call of_node_put() on error path
* 6ae2e315a3 media: vivid: fix wrong sizeimage value for mplane
* 1741021fc1 riscv: kprobes: Use patch_text_nosync() for insn slots
* d670934d4f fs/ntfs3: One more reason to mark inode bad
* a563307619 udf: Avoid excessive partition lengths
* 415f3634d5 wifi: iwlwifi: mvm: use IWL_FW_CHECK for link ID check
* 54921e9a7a netfilter: nf_conncount: fix wrong variable type
* 75758ca26c iommu: sun50i: clear bypass register
* 4ebd15ab4b x86/kmsan: Fix hook for unaligned accesses
* 9c2450cf5d af_unix: Remove put_pid()/put_cred() in copy_peercred().
* 29ac5a9b6e irqchip/armada-370-xp: Do not allow mapping IRQ 0 and 1
* 9a173212a3 accel/habanalabs/gaudi2: unsecure edma max outstanding register
* 53f17409ab ELF: fix kernel.randomize_va_space double read
* 3c9e7909df bpf, verifier: Correct tail_call_reachable for bpf prog
* b181e96e80 smack: unix sockets: fix accept()ed socket label
* 838c2cfdb6 wifi: ath12k: fix firmware crash due to invalid peer nss
* b366b1e1dd wifi: ath12k: fix uninitialize symbol error on ath12k_peer_assoc_h_he()
* fd05943b05 ALSA: hda: Add input value sanity checks to HDMI channel map controls
* 4a67c7c038 ALSA: control: Apply sanity check of input values for user elements
* 337266ada8 drm/i915: Do not attempt to load the GSC multiple times
* 0a1a961bde nilfs2: fix state management in error path of log writing function
* 8c6e43b3d5 nilfs2: protect references to superblock parameters exposed in sysfs
* 9d8c3a585d nilfs2: fix missing cleanup on rollforward recovery error
* d4a9039a7b sched: sch_cake: fix bulk flow accounting logic for host fairness
* 18a5a16940 ila: call nf_unregister_net_hooks() sooner
* c8219a27fa tcp_bpf: fix return value of tcp_bpf_sendmsg()
* 94479011f4 Revert "drm/amdgpu: align pp_power_profile_mode with kernel docs"
* 73d20d08d3 x86/apic: Make x2apic_disable() work correctly
* 55c834bc9f x86/fpu: Avoid writing LBR bit to IA32_XSS unless supported
* ec36815215 net: mctp-serial: Fix missing escapes on transmit
* 9e0bff4900 net: mana: Fix error handling in mana_create_txq/rxq's NAPI cleanup
* 05e08297c3 eventfs: Use list_del_rcu() for SRCU protected list variable
* e0d724932a fscache: delete fscache_cookie_lru_timer when fscache exits to avoid UAF
* 3c6b4bcf37 userfaultfd: fix checks for huge PMDs
* 4a594acc12 userfaultfd: don't BUG_ON() if khugepaged yanks our page table
* b4fdabffae tracing/timerlat: Add interface_lock around clearing of kthread in stop_kthread()
* 993ecb4ec1 tracing: Avoid possible softlockup in tracing_iter_reset()
* 8c72f0b2c4 tracing/timerlat: Only clear timer if a kthread exists
* 7a5f01828e tracing/osnoise: Use a cpumask to know what threads are kthreads
* d034bff62f spi: rockchip: Resolve unbalanced runtime PM / system PM handling
* 1b2770e27d mm: vmalloc: ensure vmap_block is initialised before adding to queue
* c318a4bb36 kexec_file: fix elfcorehdr digest exclusion when CONFIG_CRASH_HOTPLUG=y
* 8fecde9c3f can: mcp251x: fix deadlock if an interrupt occurs during mcp251x_open
* f58f233289 clk: qcom: clk-alpha-pll: Fix the trion pll postdiv set rate API
* 229493828d clk: qcom: clk-alpha-pll: Fix the pll post div mask
* 72f4fc5fb2 clk: starfive: jh7110-sys: Add notifier for PLL0 clock
* f36df5cc86 fuse: fix memory leak in fuse_create_open
* bfd55cd429 fuse: use unsigned type for getxattr/listxattr size truncation
* ad6451ab31 fuse: update stats for pages in dropped aux writeback list
* a7fa220ebb mmc: cqhci: Fix checking of CQHCI_HALT state
* 4c6520627b mmc: sdhci-of-aspeed: fix module autoloading
* 5b4bf39488 mmc: dw_mmc: Fix IDMAC operation with pages bigger than 4K
* 115a755bb3 mmc: core: apply SD quirks earlier during probe
* 84996e92a1 Bluetooth: MGMT: Ignore keys being loaded with invalid type
* c4252955e1 Revert "Bluetooth: MGMT/SMP: Fix address type when using SMP over BREDR/LE"
* f9275893b0 rust: macros: provide correct provenance when constructing THIS_MODULE
* d6344cc86f rust: types: Make Opaque::get const
* 77ee2eaee4 nvme-pci: Add sleep quirk for Samsung 990 Evo
* 85f03ca98e rtmutex: Drop rt_mutex::wait_lock before scheduling
* 0b46b4ac92 x86/kaslr: Expose and use the end of the physical memory address space
* 2f4d7b7026 irqchip/gic-v2m: Fix refcount leak in gicv2m_of_init()
* 0eaf812aa1 perf/x86/intel: Limit the period on Haswell
* ef00818c50 x86/tdx: Fix data leak in mmio_read()
* c0fbc9593b ata: libata: Fix memory leak for error path in ata_host_alloc()
* f75881f54c ksmbd: Unlock on in ksmbd_tcp_set_interfaces()
* 41bc256da7 ksmbd: unset the binding mark of a reused connection
* 5a72d1edb0 smb: client: fix double put of @cfile in smb2_set_path_size()
* d84ab6661e powerpc/qspinlock: Fix deadlock in MCS queue
* c1f23443da ALSA: hda/realtek: Support mute LED on HP Laptop 14-dq2xxx
* 421c2701a9 ALSA: hda/realtek: add patch for internal mic in Lenovo V145
* 638e61b002 ALSA: hda/conexant: Add pincfg quirk to enable top speakers on Sirius devices
* 6c7c519c4d KVM: SVM: Don't advertise Bus Lock Detect to guest if SVM support is missing
* c98bb4f15e KVM: SVM: fix emulation of msr reads/writes of MSR_FS_BASE and MSR_GS_BASE
* 939375737b KVM: x86: Acquire kvm->srcu when handling KVM_SET_VCPU_EVENTS
* 5d13afd021 ASoC: dapm: Fix UAF for snd_soc_pcm_runtime object
* b0804c286c net: microchip: vcap: Fix use-after-free error in kunit test
* dde33a9d0b sch/netem: fix use after free in netem_dequeue

Change-Id: I4c89274883207e9790426a87db84fb4248fa0b2c
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2024-10-28 18:46:26 +00:00

6891 lines
191 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* kernel/workqueue.c - generic async execution with shared worker pool
*
* Copyright (C) 2002 Ingo Molnar
*
* Derived from the taskqueue/keventd code by:
* David Woodhouse <dwmw2@infradead.org>
* Andrew Morton
* Kai Petzke <wpp@marie.physik.tu-berlin.de>
* Theodore Ts'o <tytso@mit.edu>
*
* Made to use alloc_percpu by Christoph Lameter.
*
* Copyright (C) 2010 SUSE Linux Products GmbH
* Copyright (C) 2010 Tejun Heo <tj@kernel.org>
*
* This is the generic async execution mechanism. Work items as are
* executed in process context. The worker pool is shared and
* automatically managed. There are two worker pools for each CPU (one for
* normal work items and the other for high priority ones) and some extra
* pools for workqueues which are not bound to any specific CPU - the
* number of these backing pools is dynamic.
*
* Please read Documentation/core-api/workqueue.rst for details.
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/hardirq.h>
#include <linux/mempolicy.h>
#include <linux/freezer.h>
#include <linux/debug_locks.h>
#include <linux/lockdep.h>
#include <linux/idr.h>
#include <linux/jhash.h>
#include <linux/hashtable.h>
#include <linux/rculist.h>
#include <linux/nodemask.h>
#include <linux/moduleparam.h>
#include <linux/uaccess.h>
#include <linux/sched/isolation.h>
#include <linux/sched/debug.h>
#include <linux/nmi.h>
#include <linux/kvm_para.h>
#include <linux/delay.h>
#include "workqueue_internal.h"
#include <trace/hooks/dtask.h>
#include <trace/hooks/wqlockup.h>
/* events/workqueue.h uses default TRACE_INCLUDE_PATH */
#undef TRACE_INCLUDE_PATH
enum {
/*
* worker_pool flags
*
* A bound pool is either associated or disassociated with its CPU.
* While associated (!DISASSOCIATED), all workers are bound to the
* CPU and none has %WORKER_UNBOUND set and concurrency management
* is in effect.
*
* While DISASSOCIATED, the cpu may be offline and all workers have
* %WORKER_UNBOUND set and concurrency management disabled, and may
* be executing on any CPU. The pool behaves as an unbound one.
*
* Note that DISASSOCIATED should be flipped only while holding
* wq_pool_attach_mutex to avoid changing binding state while
* worker_attach_to_pool() is in progress.
*/
POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
/* worker flags */
WORKER_DIE = 1 << 1, /* die die die */
WORKER_IDLE = 1 << 2, /* is idle */
WORKER_PREP = 1 << 3, /* preparing to run works */
WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
WORKER_UNBOUND = 1 << 7, /* worker is unbound */
WORKER_REBOUND = 1 << 8, /* worker was rebound */
WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
WORKER_UNBOUND | WORKER_REBOUND,
NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
/* call for help after 10ms
(min two ticks) */
MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
CREATE_COOLDOWN = HZ, /* time to breath after fail */
/*
* Rescue workers are used only on emergencies and shared by
* all cpus. Give MIN_NICE.
*/
RESCUER_NICE_LEVEL = MIN_NICE,
HIGHPRI_NICE_LEVEL = MIN_NICE,
WQ_NAME_LEN = 24,
};
/*
* Structure fields follow one of the following exclusion rules.
*
* I: Modifiable by initialization/destruction paths and read-only for
* everyone else.
*
* P: Preemption protected. Disabling preemption is enough and should
* only be modified and accessed from the local cpu.
*
* L: pool->lock protected. Access with pool->lock held.
*
* K: Only modified by worker while holding pool->lock. Can be safely read by
* self, while holding pool->lock or from IRQ context if %current is the
* kworker.
*
* S: Only modified by worker self.
*
* A: wq_pool_attach_mutex protected.
*
* PL: wq_pool_mutex protected.
*
* PR: wq_pool_mutex protected for writes. RCU protected for reads.
*
* PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
*
* PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
* RCU for reads.
*
* WQ: wq->mutex protected.
*
* WR: wq->mutex protected for writes. RCU protected for reads.
*
* MD: wq_mayday_lock protected.
*
* WD: Used internally by the watchdog.
*/
/* struct worker is defined in workqueue_internal.h */
struct worker_pool {
raw_spinlock_t lock; /* the pool lock */
int cpu; /* I: the associated cpu */
int node; /* I: the associated node ID */
int id; /* I: pool ID */
unsigned int flags; /* L: flags */
unsigned long watchdog_ts; /* L: watchdog timestamp */
bool cpu_stall; /* WD: stalled cpu bound pool */
/*
* The counter is incremented in a process context on the associated CPU
* w/ preemption disabled, and decremented or reset in the same context
* but w/ pool->lock held. The readers grab pool->lock and are
* guaranteed to see if the counter reached zero.
*/
int nr_running;
struct list_head worklist; /* L: list of pending works */
int nr_workers; /* L: total number of workers */
int nr_idle; /* L: currently idle workers */
struct list_head idle_list; /* L: list of idle workers */
struct timer_list idle_timer; /* L: worker idle timeout */
struct work_struct idle_cull_work; /* L: worker idle cleanup */
struct timer_list mayday_timer; /* L: SOS timer for workers */
/* a workers is either on busy_hash or idle_list, or the manager */
DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
/* L: hash of busy workers */
struct worker *manager; /* L: purely informational */
struct list_head workers; /* A: attached workers */
struct list_head dying_workers; /* A: workers about to die */
struct completion *detach_completion; /* all workers detached */
struct ida worker_ida; /* worker IDs for task name */
struct workqueue_attrs *attrs; /* I: worker attributes */
struct hlist_node hash_node; /* PL: unbound_pool_hash node */
int refcnt; /* PL: refcnt for unbound pools */
/*
* Destruction of pool is RCU protected to allow dereferences
* from get_work_pool().
*/
struct rcu_head rcu;
};
/*
* Per-pool_workqueue statistics. These can be monitored using
* tools/workqueue/wq_monitor.py.
*/
enum pool_workqueue_stats {
PWQ_STAT_STARTED, /* work items started execution */
PWQ_STAT_COMPLETED, /* work items completed execution */
PWQ_STAT_CPU_TIME, /* total CPU time consumed */
PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_intensive_thresh_us violations */
PWQ_STAT_CM_WAKEUP, /* concurrency-management worker wakeups */
PWQ_STAT_REPATRIATED, /* unbound workers brought back into scope */
PWQ_STAT_MAYDAY, /* maydays to rescuer */
PWQ_STAT_RESCUED, /* linked work items executed by rescuer */
PWQ_NR_STATS,
};
/*
* The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
* of work_struct->data are used for flags and the remaining high bits
* point to the pwq; thus, pwqs need to be aligned at two's power of the
* number of flag bits.
*/
struct pool_workqueue {
struct worker_pool *pool; /* I: the associated pool */
struct workqueue_struct *wq; /* I: the owning workqueue */
int work_color; /* L: current color */
int flush_color; /* L: flushing color */
int refcnt; /* L: reference count */
int nr_in_flight[WORK_NR_COLORS];
/* L: nr of in_flight works */
/*
* nr_active management and WORK_STRUCT_INACTIVE:
*
* When pwq->nr_active >= max_active, new work item is queued to
* pwq->inactive_works instead of pool->worklist and marked with
* WORK_STRUCT_INACTIVE.
*
* All work items marked with WORK_STRUCT_INACTIVE do not participate
* in pwq->nr_active and all work items in pwq->inactive_works are
* marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE
* work items are in pwq->inactive_works. Some of them are ready to
* run in pool->worklist or worker->scheduled. Those work itmes are
* only struct wq_barrier which is used for flush_work() and should
* not participate in pwq->nr_active. For non-barrier work item, it
* is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
*/
int nr_active; /* L: nr of active works */
int max_active; /* L: max active works */
struct list_head inactive_works; /* L: inactive works */
struct list_head pwqs_node; /* WR: node on wq->pwqs */
struct list_head mayday_node; /* MD: node on wq->maydays */
u64 stats[PWQ_NR_STATS];
/*
* Release of unbound pwq is punted to a kthread_worker. See put_pwq()
* and pwq_release_workfn() for details. pool_workqueue itself is also
* RCU protected so that the first pwq can be determined without
* grabbing wq->mutex.
*/
struct kthread_work release_work;
struct rcu_head rcu;
} __aligned(1 << WORK_STRUCT_FLAG_BITS);
/*
* Structure used to wait for workqueue flush.
*/
struct wq_flusher {
struct list_head list; /* WQ: list of flushers */
int flush_color; /* WQ: flush color waiting for */
struct completion done; /* flush completion */
};
struct wq_device;
/*
* The externally visible workqueue. It relays the issued work items to
* the appropriate worker_pool through its pool_workqueues.
*/
struct workqueue_struct {
struct list_head pwqs; /* WR: all pwqs of this wq */
struct list_head list; /* PR: list of all workqueues */
struct mutex mutex; /* protects this wq */
int work_color; /* WQ: current work color */
int flush_color; /* WQ: current flush color */
atomic_t nr_pwqs_to_flush; /* flush in progress */
struct wq_flusher *first_flusher; /* WQ: first flusher */
struct list_head flusher_queue; /* WQ: flush waiters */
struct list_head flusher_overflow; /* WQ: flush overflow list */
struct list_head maydays; /* MD: pwqs requesting rescue */
struct worker *rescuer; /* MD: rescue worker */
int nr_drainers; /* WQ: drain in progress */
int saved_max_active; /* WQ: saved pwq max_active */
struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */
struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */
#ifdef CONFIG_SYSFS
struct wq_device *wq_dev; /* I: for sysfs interface */
#endif
#ifdef CONFIG_LOCKDEP
char *lock_name;
struct lock_class_key key;
struct lockdep_map lockdep_map;
#endif
char name[WQ_NAME_LEN]; /* I: workqueue name */
/*
* Destruction of workqueue_struct is RCU protected to allow walking
* the workqueues list without grabbing wq_pool_mutex.
* This is used to dump all workqueues from sysrq.
*/
struct rcu_head rcu;
/* hot fields used during command issue, aligned to cacheline */
unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */
};
static struct kmem_cache *pwq_cache;
/*
* Each pod type describes how CPUs should be grouped for unbound workqueues.
* See the comment above workqueue_attrs->affn_scope.
*/
struct wq_pod_type {
int nr_pods; /* number of pods */
cpumask_var_t *pod_cpus; /* pod -> cpus */
int *pod_node; /* pod -> node */
int *cpu_pod; /* cpu -> pod */
};
static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES];
static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE;
static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = {
[WQ_AFFN_DFL] = "default",
[WQ_AFFN_CPU] = "cpu",
[WQ_AFFN_SMT] = "smt",
[WQ_AFFN_CACHE] = "cache",
[WQ_AFFN_NUMA] = "numa",
[WQ_AFFN_SYSTEM] = "system",
};
/*
* Per-cpu work items which run for longer than the following threshold are
* automatically considered CPU intensive and excluded from concurrency
* management to prevent them from noticeably delaying other per-cpu work items.
* ULONG_MAX indicates that the user hasn't overridden it with a boot parameter.
* The actual value is initialized in wq_cpu_intensive_thresh_init().
*/
static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX;
module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644);
#ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT
static unsigned int wq_cpu_intensive_warning_thresh = 4;
module_param_named(cpu_intensive_warning_thresh, wq_cpu_intensive_warning_thresh, uint, 0644);
#endif
/* see the comment above the definition of WQ_POWER_EFFICIENT */
static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
module_param_named(power_efficient, wq_power_efficient, bool, 0444);
static bool wq_online; /* can kworkers be created yet? */
/* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */
static struct workqueue_attrs *wq_update_pod_attrs_buf;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
/* wait for manager to go away */
static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
static bool workqueue_freezing; /* PL: have wqs started freezing? */
/* PL&A: allowable cpus for unbound wqs and work items */
static cpumask_var_t wq_unbound_cpumask;
/* for further constrain wq_unbound_cpumask by cmdline parameter*/
static struct cpumask wq_cmdline_cpumask __initdata;
/* CPU where unbound work was last round robin scheduled from this CPU */
static DEFINE_PER_CPU(int, wq_rr_cpu_last);
/*
* Local execution of unbound work items is no longer guaranteed. The
* following always forces round-robin CPU selection on unbound work items
* to uncover usages which depend on it.
*/
#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
static bool wq_debug_force_rr_cpu = true;
#else
static bool wq_debug_force_rr_cpu = false;
#endif
module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
/* the per-cpu worker pools */
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
/* PL: hash of all unbound pools keyed by pool->attrs */
static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
/* I: attributes used when instantiating standard unbound pools on demand */
static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
/* I: attributes used when instantiating ordered pools on demand */
static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
/*
* I: kthread_worker to release pwq's. pwq release needs to be bounced to a
* process context while holding a pool lock. Bounce to a dedicated kthread
* worker to avoid A-A deadlocks.
*/
static struct kthread_worker *pwq_release_worker;
struct workqueue_struct *system_wq __read_mostly;
EXPORT_SYMBOL(system_wq);
struct workqueue_struct *system_highpri_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_highpri_wq);
struct workqueue_struct *system_long_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_long_wq);
struct workqueue_struct *system_unbound_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_unbound_wq);
struct workqueue_struct *system_freezable_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_freezable_wq);
struct workqueue_struct *system_power_efficient_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_power_efficient_wq);
struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
static int worker_thread(void *__worker);
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
static void show_pwq(struct pool_workqueue *pwq);
static void show_one_worker_pool(struct worker_pool *pool);
#define CREATE_TRACE_POINTS
#include <trace/events/workqueue.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(workqueue_execute_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(workqueue_execute_end);
#define assert_rcu_or_pool_mutex() \
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
!lockdep_is_held(&wq_pool_mutex), \
"RCU or wq_pool_mutex should be held")
#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
!lockdep_is_held(&wq->mutex) && \
!lockdep_is_held(&wq_pool_mutex), \
"RCU, wq->mutex or wq_pool_mutex should be held")
#define for_each_cpu_worker_pool(pool, cpu) \
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
(pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
(pool)++)
/**
* for_each_pool - iterate through all worker_pools in the system
* @pool: iteration cursor
* @pi: integer used for iteration
*
* This must be called either with wq_pool_mutex held or RCU read
* locked. If the pool needs to be used beyond the locking in effect, the
* caller is responsible for guaranteeing that the pool stays online.
*
* The if/else clause exists only for the lockdep assertion and can be
* ignored.
*/
#define for_each_pool(pool, pi) \
idr_for_each_entry(&worker_pool_idr, pool, pi) \
if (({ assert_rcu_or_pool_mutex(); false; })) { } \
else
/**
* for_each_pool_worker - iterate through all workers of a worker_pool
* @worker: iteration cursor
* @pool: worker_pool to iterate workers of
*
* This must be called with wq_pool_attach_mutex.
*
* The if/else clause exists only for the lockdep assertion and can be
* ignored.
*/
#define for_each_pool_worker(worker, pool) \
list_for_each_entry((worker), &(pool)->workers, node) \
if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
else
/**
* for_each_pwq - iterate through all pool_workqueues of the specified workqueue
* @pwq: iteration cursor
* @wq: the target workqueue
*
* This must be called either with wq->mutex held or RCU read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
* The if/else clause exists only for the lockdep assertion and can be
* ignored.
*/
#define for_each_pwq(pwq, wq) \
list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
lockdep_is_held(&(wq->mutex)))
#ifdef CONFIG_DEBUG_OBJECTS_WORK
static const struct debug_obj_descr work_debug_descr;
static void *work_debug_hint(void *addr)
{
return ((struct work_struct *) addr)->func;
}
static bool work_is_static_object(void *addr)
{
struct work_struct *work = addr;
return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
}
/*
* fixup_init is called when:
* - an active object is initialized
*/
static bool work_fixup_init(void *addr, enum debug_obj_state state)
{
struct work_struct *work = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
cancel_work_sync(work);
debug_object_init(work, &work_debug_descr);
return true;
default:
return false;
}
}
/*
* fixup_free is called when:
* - an active object is freed
*/
static bool work_fixup_free(void *addr, enum debug_obj_state state)
{
struct work_struct *work = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
cancel_work_sync(work);
debug_object_free(work, &work_debug_descr);
return true;
default:
return false;
}
}
static const struct debug_obj_descr work_debug_descr = {
.name = "work_struct",
.debug_hint = work_debug_hint,
.is_static_object = work_is_static_object,
.fixup_init = work_fixup_init,
.fixup_free = work_fixup_free,
};
static inline void debug_work_activate(struct work_struct *work)
{
debug_object_activate(work, &work_debug_descr);
}
static inline void debug_work_deactivate(struct work_struct *work)
{
debug_object_deactivate(work, &work_debug_descr);
}
void __init_work(struct work_struct *work, int onstack)
{
if (onstack)
debug_object_init_on_stack(work, &work_debug_descr);
else
debug_object_init(work, &work_debug_descr);
}
EXPORT_SYMBOL_GPL(__init_work);
void destroy_work_on_stack(struct work_struct *work)
{
debug_object_free(work, &work_debug_descr);
}
EXPORT_SYMBOL_GPL(destroy_work_on_stack);
void destroy_delayed_work_on_stack(struct delayed_work *work)
{
destroy_timer_on_stack(&work->timer);
debug_object_free(&work->work, &work_debug_descr);
}
EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
#else
static inline void debug_work_activate(struct work_struct *work) { }
static inline void debug_work_deactivate(struct work_struct *work) { }
#endif
/**
* worker_pool_assign_id - allocate ID and assign it to @pool
* @pool: the pool pointer of interest
*
* Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
* successfully, -errno on failure.
*/
static int worker_pool_assign_id(struct worker_pool *pool)
{
int ret;
lockdep_assert_held(&wq_pool_mutex);
ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
GFP_KERNEL);
if (ret >= 0) {
pool->id = ret;
return 0;
}
return ret;
}
static unsigned int work_color_to_flags(int color)
{
return color << WORK_STRUCT_COLOR_SHIFT;
}
static int get_work_color(unsigned long work_data)
{
return (work_data >> WORK_STRUCT_COLOR_SHIFT) &
((1 << WORK_STRUCT_COLOR_BITS) - 1);
}
static int work_next_color(int color)
{
return (color + 1) % WORK_NR_COLORS;
}
/*
* While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
* contain the pointer to the queued pwq. Once execution starts, the flag
* is cleared and the high bits contain OFFQ flags and pool ID.
*
* set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
* and clear_work_data() can be used to set the pwq, pool or clear
* work->data. These functions should only be called while the work is
* owned - ie. while the PENDING bit is set.
*
* get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
* corresponding to a work. Pool is available once the work has been
* queued anywhere after initialization until it is sync canceled. pwq is
* available only while the work item is queued.
*
* %WORK_OFFQ_CANCELING is used to mark a work item which is being
* canceled. While being canceled, a work item may have its PENDING set
* but stay off timer and worklist for arbitrarily long and nobody should
* try to steal the PENDING bit.
*/
static inline void set_work_data(struct work_struct *work, unsigned long data,
unsigned long flags)
{
WARN_ON_ONCE(!work_pending(work));
atomic_long_set(&work->data, data | flags | work_static(work));
}
static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
unsigned long extra_flags)
{
set_work_data(work, (unsigned long)pwq,
WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
}
static void set_work_pool_and_keep_pending(struct work_struct *work,
int pool_id)
{
set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
WORK_STRUCT_PENDING);
}
static void set_work_pool_and_clear_pending(struct work_struct *work,
int pool_id)
{
/*
* The following wmb is paired with the implied mb in
* test_and_set_bit(PENDING) and ensures all updates to @work made
* here are visible to and precede any updates by the next PENDING
* owner.
*/
smp_wmb();
set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
/*
* The following mb guarantees that previous clear of a PENDING bit
* will not be reordered with any speculative LOADS or STORES from
* work->current_func, which is executed afterwards. This possible
* reordering can lead to a missed execution on attempt to queue
* the same @work. E.g. consider this case:
*
* CPU#0 CPU#1
* ---------------------------- --------------------------------
*
* 1 STORE event_indicated
* 2 queue_work_on() {
* 3 test_and_set_bit(PENDING)
* 4 } set_..._and_clear_pending() {
* 5 set_work_data() # clear bit
* 6 smp_mb()
* 7 work->current_func() {
* 8 LOAD event_indicated
* }
*
* Without an explicit full barrier speculative LOAD on line 8 can
* be executed before CPU#0 does STORE on line 1. If that happens,
* CPU#0 observes the PENDING bit is still set and new execution of
* a @work is not queued in a hope, that CPU#1 will eventually
* finish the queued @work. Meanwhile CPU#1 does not see
* event_indicated is set, because speculative LOAD was executed
* before actual STORE.
*/
smp_mb();
}
static void clear_work_data(struct work_struct *work)
{
smp_wmb(); /* see set_work_pool_and_clear_pending() */
set_work_data(work, WORK_STRUCT_NO_POOL, 0);
}
static inline struct pool_workqueue *work_struct_pwq(unsigned long data)
{
return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK);
}
static struct pool_workqueue *get_work_pwq(struct work_struct *work)
{
unsigned long data = atomic_long_read(&work->data);
if (data & WORK_STRUCT_PWQ)
return work_struct_pwq(data);
else
return NULL;
}
/**
* get_work_pool - return the worker_pool a given work was associated with
* @work: the work item of interest
*
* Pools are created and destroyed under wq_pool_mutex, and allows read
* access under RCU read lock. As such, this function should be
* called under wq_pool_mutex or inside of a rcu_read_lock() region.
*
* All fields of the returned pool are accessible as long as the above
* mentioned locking is in effect. If the returned pool needs to be used
* beyond the critical section, the caller is responsible for ensuring the
* returned pool is and stays online.
*
* Return: The worker_pool @work was last associated with. %NULL if none.
*/
static struct worker_pool *get_work_pool(struct work_struct *work)
{
unsigned long data = atomic_long_read(&work->data);
int pool_id;
assert_rcu_or_pool_mutex();
if (data & WORK_STRUCT_PWQ)
return work_struct_pwq(data)->pool;
pool_id = data >> WORK_OFFQ_POOL_SHIFT;
if (pool_id == WORK_OFFQ_POOL_NONE)
return NULL;
return idr_find(&worker_pool_idr, pool_id);
}
/**
* get_work_pool_id - return the worker pool ID a given work is associated with
* @work: the work item of interest
*
* Return: The worker_pool ID @work was last associated with.
* %WORK_OFFQ_POOL_NONE if none.
*/
static int get_work_pool_id(struct work_struct *work)
{
unsigned long data = atomic_long_read(&work->data);
if (data & WORK_STRUCT_PWQ)
return work_struct_pwq(data)->pool->id;
return data >> WORK_OFFQ_POOL_SHIFT;
}
static void mark_work_canceling(struct work_struct *work)
{
unsigned long pool_id = get_work_pool_id(work);
pool_id <<= WORK_OFFQ_POOL_SHIFT;
set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
}
static bool work_is_canceling(struct work_struct *work)
{
unsigned long data = atomic_long_read(&work->data);
return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
}
/*
* Policy functions. These define the policies on how the global worker
* pools are managed. Unless noted otherwise, these functions assume that
* they're being called with pool->lock held.
*/
/*
* Need to wake up a worker? Called from anything but currently
* running workers.
*
* Note that, because unbound workers never contribute to nr_running, this
* function will always return %true for unbound pools as long as the
* worklist isn't empty.
*/
static bool need_more_worker(struct worker_pool *pool)
{
return !list_empty(&pool->worklist) && !pool->nr_running;
}
/* Can I start working? Called from busy but !running workers. */
static bool may_start_working(struct worker_pool *pool)
{
return pool->nr_idle;
}
/* Do I need to keep working? Called from currently running workers. */
static bool keep_working(struct worker_pool *pool)
{
return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
}
/* Do we need a new worker? Called from manager. */
static bool need_to_create_worker(struct worker_pool *pool)
{
return need_more_worker(pool) && !may_start_working(pool);
}
/* Do we have too many workers and should some go away? */
static bool too_many_workers(struct worker_pool *pool)
{
bool managing = pool->flags & POOL_MANAGER_ACTIVE;
int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
int nr_busy = pool->nr_workers - nr_idle;
return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
}
/**
* worker_set_flags - set worker flags and adjust nr_running accordingly
* @worker: self
* @flags: flags to set
*
* Set @flags in @worker->flags and adjust nr_running accordingly.
*/
static inline void worker_set_flags(struct worker *worker, unsigned int flags)
{
struct worker_pool *pool = worker->pool;
lockdep_assert_held(&pool->lock);
/* If transitioning into NOT_RUNNING, adjust nr_running. */
if ((flags & WORKER_NOT_RUNNING) &&
!(worker->flags & WORKER_NOT_RUNNING)) {
pool->nr_running--;
}
worker->flags |= flags;
}
/**
* worker_clr_flags - clear worker flags and adjust nr_running accordingly
* @worker: self
* @flags: flags to clear
*
* Clear @flags in @worker->flags and adjust nr_running accordingly.
*/
static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
{
struct worker_pool *pool = worker->pool;
unsigned int oflags = worker->flags;
lockdep_assert_held(&pool->lock);
worker->flags &= ~flags;
/*
* If transitioning out of NOT_RUNNING, increment nr_running. Note
* that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
* of multiple flags, not a single flag.
*/
if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
if (!(worker->flags & WORKER_NOT_RUNNING))
pool->nr_running++;
}
/* Return the first idle worker. Called with pool->lock held. */
static struct worker *first_idle_worker(struct worker_pool *pool)
{
if (unlikely(list_empty(&pool->idle_list)))
return NULL;
return list_first_entry(&pool->idle_list, struct worker, entry);
}
/**
* worker_enter_idle - enter idle state
* @worker: worker which is entering idle state
*
* @worker is entering idle state. Update stats and idle timer if
* necessary.
*
* LOCKING:
* raw_spin_lock_irq(pool->lock).
*/
static void worker_enter_idle(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
WARN_ON_ONCE(!list_empty(&worker->entry) &&
(worker->hentry.next || worker->hentry.pprev)))
return;
/* can't use worker_set_flags(), also called from create_worker() */
worker->flags |= WORKER_IDLE;
pool->nr_idle++;
worker->last_active = jiffies;
/* idle_list is LIFO */
list_add(&worker->entry, &pool->idle_list);
if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
/* Sanity check nr_running. */
WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
}
/**
* worker_leave_idle - leave idle state
* @worker: worker which is leaving idle state
*
* @worker is leaving idle state. Update stats.
*
* LOCKING:
* raw_spin_lock_irq(pool->lock).
*/
static void worker_leave_idle(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
return;
worker_clr_flags(worker, WORKER_IDLE);
pool->nr_idle--;
list_del_init(&worker->entry);
}
/**
* find_worker_executing_work - find worker which is executing a work
* @pool: pool of interest
* @work: work to find worker for
*
* Find a worker which is executing @work on @pool by searching
* @pool->busy_hash which is keyed by the address of @work. For a worker
* to match, its current execution should match the address of @work and
* its work function. This is to avoid unwanted dependency between
* unrelated work executions through a work item being recycled while still
* being executed.
*
* This is a bit tricky. A work item may be freed once its execution
* starts and nothing prevents the freed area from being recycled for
* another work item. If the same work item address ends up being reused
* before the original execution finishes, workqueue will identify the
* recycled work item as currently executing and make it wait until the
* current execution finishes, introducing an unwanted dependency.
*
* This function checks the work item address and work function to avoid
* false positives. Note that this isn't complete as one may construct a
* work function which can introduce dependency onto itself through a
* recycled work item. Well, if somebody wants to shoot oneself in the
* foot that badly, there's only so much we can do, and if such deadlock
* actually occurs, it should be easy to locate the culprit work function.
*
* CONTEXT:
* raw_spin_lock_irq(pool->lock).
*
* Return:
* Pointer to worker which is executing @work if found, %NULL
* otherwise.
*/
static struct worker *find_worker_executing_work(struct worker_pool *pool,
struct work_struct *work)
{
struct worker *worker;
hash_for_each_possible(pool->busy_hash, worker, hentry,
(unsigned long)work)
if (worker->current_work == work &&
worker->current_func == work->func)
return worker;
return NULL;
}
/**
* move_linked_works - move linked works to a list
* @work: start of series of works to be scheduled
* @head: target list to append @work to
* @nextp: out parameter for nested worklist walking
*
* Schedule linked works starting from @work to @head. Work series to be
* scheduled starts at @work and includes any consecutive work with
* WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on
* @nextp.
*
* CONTEXT:
* raw_spin_lock_irq(pool->lock).
*/
static void move_linked_works(struct work_struct *work, struct list_head *head,
struct work_struct **nextp)
{
struct work_struct *n;
/*
* Linked worklist will always end before the end of the list,
* use NULL for list head.
*/
list_for_each_entry_safe_from(work, n, NULL, entry) {
list_move_tail(&work->entry, head);
if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
break;
}
/*
* If we're already inside safe list traversal and have moved
* multiple works to the scheduled queue, the next position
* needs to be updated.
*/
if (nextp)
*nextp = n;
}
/**
* assign_work - assign a work item and its linked work items to a worker
* @work: work to assign
* @worker: worker to assign to
* @nextp: out parameter for nested worklist walking
*
* Assign @work and its linked work items to @worker. If @work is already being
* executed by another worker in the same pool, it'll be punted there.
*
* If @nextp is not NULL, it's updated to point to the next work of the last
* scheduled work. This allows assign_work() to be nested inside
* list_for_each_entry_safe().
*
* Returns %true if @work was successfully assigned to @worker. %false if @work
* was punted to another worker already executing it.
*/
static bool assign_work(struct work_struct *work, struct worker *worker,
struct work_struct **nextp)
{
struct worker_pool *pool = worker->pool;
struct worker *collision;
lockdep_assert_held(&pool->lock);
/*
* A single work shouldn't be executed concurrently by multiple workers.
* __queue_work() ensures that @work doesn't jump to a different pool
* while still running in the previous pool. Here, we should ensure that
* @work is not executed concurrently by multiple workers from the same
* pool. Check whether anyone is already processing the work. If so,
* defer the work to the currently executing one.
*/
collision = find_worker_executing_work(pool, work);
if (unlikely(collision)) {
move_linked_works(work, &collision->scheduled, nextp);
return false;
}
move_linked_works(work, &worker->scheduled, nextp);
return true;
}
/**
* kick_pool - wake up an idle worker if necessary
* @pool: pool to kick
*
* @pool may have pending work items. Wake up worker if necessary. Returns
* whether a worker was woken up.
*/
static bool kick_pool(struct worker_pool *pool)
{
struct worker *worker = first_idle_worker(pool);
struct task_struct *p;
lockdep_assert_held(&pool->lock);
if (!need_more_worker(pool) || !worker)
return false;
p = worker->task;
#ifdef CONFIG_SMP
/*
* Idle @worker is about to execute @work and waking up provides an
* opportunity to migrate @worker at a lower cost by setting the task's
* wake_cpu field. Let's see if we want to move @worker to improve
* execution locality.
*
* We're waking the worker that went idle the latest and there's some
* chance that @worker is marked idle but hasn't gone off CPU yet. If
* so, setting the wake_cpu won't do anything. As this is a best-effort
* optimization and the race window is narrow, let's leave as-is for
* now. If this becomes pronounced, we can skip over workers which are
* still on cpu when picking an idle worker.
*
* If @pool has non-strict affinity, @worker might have ended up outside
* its affinity scope. Repatriate.
*/
if (!pool->attrs->affn_strict &&
!cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) {
struct work_struct *work = list_first_entry(&pool->worklist,
struct work_struct, entry);
int wake_cpu = cpumask_any_and_distribute(pool->attrs->__pod_cpumask,
cpu_online_mask);
if (wake_cpu < nr_cpu_ids) {
p->wake_cpu = wake_cpu;
get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
}
}
#endif
trace_android_vh_wq_wake_idle_worker(p, list_first_entry(&pool->worklist,
struct work_struct, entry));
wake_up_process(p);
return true;
}
#ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT
/*
* Concurrency-managed per-cpu work items that hog CPU for longer than
* wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism,
* which prevents them from stalling other concurrency-managed work items. If a
* work function keeps triggering this mechanism, it's likely that the work item
* should be using an unbound workqueue instead.
*
* wq_cpu_intensive_report() tracks work functions which trigger such conditions
* and report them so that they can be examined and converted to use unbound
* workqueues as appropriate. To avoid flooding the console, each violating work
* function is tracked and reported with exponential backoff.
*/
#define WCI_MAX_ENTS 128
struct wci_ent {
work_func_t func;
atomic64_t cnt;
struct hlist_node hash_node;
};
static struct wci_ent wci_ents[WCI_MAX_ENTS];
static int wci_nr_ents;
static DEFINE_RAW_SPINLOCK(wci_lock);
static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_MAX_ENTS));
static struct wci_ent *wci_find_ent(work_func_t func)
{
struct wci_ent *ent;
hash_for_each_possible_rcu(wci_hash, ent, hash_node,
(unsigned long)func) {
if (ent->func == func)
return ent;
}
return NULL;
}
static void wq_cpu_intensive_report(work_func_t func)
{
struct wci_ent *ent;
restart:
ent = wci_find_ent(func);
if (ent) {
u64 cnt;
/*
* Start reporting from the warning_thresh and back off
* exponentially.
*/
cnt = atomic64_inc_return_relaxed(&ent->cnt);
if (wq_cpu_intensive_warning_thresh &&
cnt >= wq_cpu_intensive_warning_thresh &&
is_power_of_2(cnt + 1 - wq_cpu_intensive_warning_thresh))
printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n",
ent->func, wq_cpu_intensive_thresh_us,
atomic64_read(&ent->cnt));
return;
}
/*
* @func is a new violation. Allocate a new entry for it. If wcn_ents[]
* is exhausted, something went really wrong and we probably made enough
* noise already.
*/
if (wci_nr_ents >= WCI_MAX_ENTS)
return;
raw_spin_lock(&wci_lock);
if (wci_nr_ents >= WCI_MAX_ENTS) {
raw_spin_unlock(&wci_lock);
return;
}
if (wci_find_ent(func)) {
raw_spin_unlock(&wci_lock);
goto restart;
}
ent = &wci_ents[wci_nr_ents++];
ent->func = func;
atomic64_set(&ent->cnt, 0);
hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func);
raw_spin_unlock(&wci_lock);
goto restart;
}
#else /* CONFIG_WQ_CPU_INTENSIVE_REPORT */
static void wq_cpu_intensive_report(work_func_t func) {}
#endif /* CONFIG_WQ_CPU_INTENSIVE_REPORT */
/**
* wq_worker_running - a worker is running again
* @task: task waking up
*
* This function is called when a worker returns from schedule()
*/
void wq_worker_running(struct task_struct *task)
{
struct worker *worker = kthread_data(task);
if (!READ_ONCE(worker->sleeping))
return;
/*
* If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
* and the nr_running increment below, we may ruin the nr_running reset
* and leave with an unexpected pool->nr_running == 1 on the newly unbound
* pool. Protect against such race.
*/
preempt_disable();
if (!(worker->flags & WORKER_NOT_RUNNING))
worker->pool->nr_running++;
preempt_enable();
/*
* CPU intensive auto-detection cares about how long a work item hogged
* CPU without sleeping. Reset the starting timestamp on wakeup.
*/
worker->current_at = worker->task->se.sum_exec_runtime;
WRITE_ONCE(worker->sleeping, 0);
}
/**
* wq_worker_sleeping - a worker is going to sleep
* @task: task going to sleep
*
* This function is called from schedule() when a busy worker is
* going to sleep.
*/
void wq_worker_sleeping(struct task_struct *task)
{
struct worker *worker = kthread_data(task);
struct worker_pool *pool;
/*
* Rescuers, which may not have all the fields set up like normal
* workers, also reach here, let's not access anything before
* checking NOT_RUNNING.
*/
if (worker->flags & WORKER_NOT_RUNNING)
return;
pool = worker->pool;
/* Return if preempted before wq_worker_running() was reached */
if (READ_ONCE(worker->sleeping))
return;
WRITE_ONCE(worker->sleeping, 1);
raw_spin_lock_irq(&pool->lock);
/*
* Recheck in case unbind_workers() preempted us. We don't
* want to decrement nr_running after the worker is unbound
* and nr_running has been reset.
*/
if (worker->flags & WORKER_NOT_RUNNING) {
raw_spin_unlock_irq(&pool->lock);
return;
}
pool->nr_running--;
if (kick_pool(pool))
worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++;
raw_spin_unlock_irq(&pool->lock);
}
/**
* wq_worker_tick - a scheduler tick occurred while a kworker is running
* @task: task currently running
*
* Called from scheduler_tick(). We're in the IRQ context and the current
* worker's fields which follow the 'K' locking rule can be accessed safely.
*/
void wq_worker_tick(struct task_struct *task)
{
struct worker *worker = kthread_data(task);
struct pool_workqueue *pwq = worker->current_pwq;
struct worker_pool *pool = worker->pool;
if (!pwq)
return;
pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC;
if (!wq_cpu_intensive_thresh_us)
return;
/*
* If the current worker is concurrency managed and hogged the CPU for
* longer than wq_cpu_intensive_thresh_us, it's automatically marked
* CPU_INTENSIVE to avoid stalling other concurrency-managed work items.
*
* Set @worker->sleeping means that @worker is in the process of
* switching out voluntarily and won't be contributing to
* @pool->nr_running until it wakes up. As wq_worker_sleeping() also
* decrements ->nr_running, setting CPU_INTENSIVE here can lead to
* double decrements. The task is releasing the CPU anyway. Let's skip.
* We probably want to make this prettier in the future.
*/
if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) ||
worker->task->se.sum_exec_runtime - worker->current_at <
wq_cpu_intensive_thresh_us * NSEC_PER_USEC)
return;
raw_spin_lock(&pool->lock);
worker_set_flags(worker, WORKER_CPU_INTENSIVE);
wq_cpu_intensive_report(worker->current_func);
pwq->stats[PWQ_STAT_CPU_INTENSIVE]++;
if (kick_pool(pool))
pwq->stats[PWQ_STAT_CM_WAKEUP]++;
raw_spin_unlock(&pool->lock);
}
/**
* wq_worker_last_func - retrieve worker's last work function
* @task: Task to retrieve last work function of.
*
* Determine the last function a worker executed. This is called from
* the scheduler to get a worker's last known identity.
*
* CONTEXT:
* raw_spin_lock_irq(rq->lock)
*
* This function is called during schedule() when a kworker is going
* to sleep. It's used by psi to identify aggregation workers during
* dequeuing, to allow periodic aggregation to shut-off when that
* worker is the last task in the system or cgroup to go to sleep.
*
* As this function doesn't involve any workqueue-related locking, it
* only returns stable values when called from inside the scheduler's
* queuing and dequeuing paths, when @task, which must be a kworker,
* is guaranteed to not be processing any works.
*
* Return:
* The last work function %current executed as a worker, NULL if it
* hasn't executed any work yet.
*/
work_func_t wq_worker_last_func(struct task_struct *task)
{
struct worker *worker = kthread_data(task);
return worker->last_func;
}
/**
* get_pwq - get an extra reference on the specified pool_workqueue
* @pwq: pool_workqueue to get
*
* Obtain an extra reference on @pwq. The caller should guarantee that
* @pwq has positive refcnt and be holding the matching pool->lock.
*/
static void get_pwq(struct pool_workqueue *pwq)
{
lockdep_assert_held(&pwq->pool->lock);
WARN_ON_ONCE(pwq->refcnt <= 0);
pwq->refcnt++;
}
/**
* put_pwq - put a pool_workqueue reference
* @pwq: pool_workqueue to put
*
* Drop a reference of @pwq. If its refcnt reaches zero, schedule its
* destruction. The caller should be holding the matching pool->lock.
*/
static void put_pwq(struct pool_workqueue *pwq)
{
lockdep_assert_held(&pwq->pool->lock);
if (likely(--pwq->refcnt))
return;
/*
* @pwq can't be released under pool->lock, bounce to a dedicated
* kthread_worker to avoid A-A deadlocks.
*/
kthread_queue_work(pwq_release_worker, &pwq->release_work);
}
/**
* put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
* @pwq: pool_workqueue to put (can be %NULL)
*
* put_pwq() with locking. This function also allows %NULL @pwq.
*/
static void put_pwq_unlocked(struct pool_workqueue *pwq)
{
if (pwq) {
/*
* As both pwqs and pools are RCU protected, the
* following lock operations are safe.
*/
raw_spin_lock_irq(&pwq->pool->lock);
put_pwq(pwq);
raw_spin_unlock_irq(&pwq->pool->lock);
}
}
static void pwq_activate_inactive_work(struct work_struct *work)
{
struct pool_workqueue *pwq = get_work_pwq(work);
trace_workqueue_activate_work(work);
if (list_empty(&pwq->pool->worklist))
pwq->pool->watchdog_ts = jiffies;
move_linked_works(work, &pwq->pool->worklist, NULL);
__clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
pwq->nr_active++;
}
static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
{
struct work_struct *work = list_first_entry(&pwq->inactive_works,
struct work_struct, entry);
pwq_activate_inactive_work(work);
}
/**
* pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
* @pwq: pwq of interest
* @work_data: work_data of work which left the queue
*
* A work either has completed or is removed from pending queue,
* decrement nr_in_flight of its pwq and handle workqueue flushing.
*
* CONTEXT:
* raw_spin_lock_irq(pool->lock).
*/
static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data)
{
int color = get_work_color(work_data);
if (!(work_data & WORK_STRUCT_INACTIVE)) {
pwq->nr_active--;
if (!list_empty(&pwq->inactive_works)) {
/* one down, submit an inactive one */
if (pwq->nr_active < pwq->max_active)
pwq_activate_first_inactive(pwq);
}
}
pwq->nr_in_flight[color]--;
/* is flush in progress and are we at the flushing tip? */
if (likely(pwq->flush_color != color))
goto out_put;
/* are there still in-flight works? */
if (pwq->nr_in_flight[color])
goto out_put;
/* this pwq is done, clear flush_color */
pwq->flush_color = -1;
/*
* If this was the last pwq, wake up the first flusher. It
* will handle the rest.
*/
if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
complete(&pwq->wq->first_flusher->done);
out_put:
put_pwq(pwq);
}
/**
* try_to_grab_pending - steal work item from worklist and disable irq
* @work: work item to steal
* @is_dwork: @work is a delayed_work
* @flags: place to store irq state
*
* Try to grab PENDING bit of @work. This function can handle @work in any
* stable state - idle, on timer or on worklist.
*
* Return:
*
* ======== ================================================================
* 1 if @work was pending and we successfully stole PENDING
* 0 if @work was idle and we claimed PENDING
* -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
* -ENOENT if someone else is canceling @work, this state may persist
* for arbitrarily long
* ======== ================================================================
*
* Note:
* On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
* interrupted while holding PENDING and @work off queue, irq must be
* disabled on entry. This, combined with delayed_work->timer being
* irqsafe, ensures that we return -EAGAIN for finite short period of time.
*
* On successful return, >= 0, irq is disabled and the caller is
* responsible for releasing it using local_irq_restore(*@flags).
*
* This function is safe to call from any context including IRQ handler.
*/
static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
unsigned long *flags)
{
struct worker_pool *pool;
struct pool_workqueue *pwq;
local_irq_save(*flags);
/* try to steal the timer if it exists */
if (is_dwork) {
struct delayed_work *dwork = to_delayed_work(work);
/*
* dwork->timer is irqsafe. If del_timer() fails, it's
* guaranteed that the timer is not queued anywhere and not
* running on the local CPU.
*/
if (likely(del_timer(&dwork->timer)))
return 1;
}
/* try to claim PENDING the normal way */
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0;
rcu_read_lock();
/*
* The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
*/
pool = get_work_pool(work);
if (!pool)
goto fail;
raw_spin_lock(&pool->lock);
/*
* work->data is guaranteed to point to pwq only while the work
* item is queued on pwq->wq, and both updating work->data to point
* to pwq on queueing and to pool on dequeueing are done under
* pwq->pool->lock. This in turn guarantees that, if work->data
* points to pwq which is associated with a locked pool, the work
* item is currently queued on that pool.
*/
pwq = get_work_pwq(work);
if (pwq && pwq->pool == pool) {
debug_work_deactivate(work);
/*
* A cancelable inactive work item must be in the
* pwq->inactive_works since a queued barrier can't be
* canceled (see the comments in insert_wq_barrier()).
*
* An inactive work item cannot be grabbed directly because
* it might have linked barrier work items which, if left
* on the inactive_works list, will confuse pwq->nr_active
* management later on and cause stall. Make sure the work
* item is activated before grabbing.
*/
if (*work_data_bits(work) & WORK_STRUCT_INACTIVE)
pwq_activate_inactive_work(work);
list_del_init(&work->entry);
pwq_dec_nr_in_flight(pwq, *work_data_bits(work));
/* work->data points to pwq iff queued, point to pool */
set_work_pool_and_keep_pending(work, pool->id);
raw_spin_unlock(&pool->lock);
rcu_read_unlock();
return 1;
}
raw_spin_unlock(&pool->lock);
fail:
rcu_read_unlock();
local_irq_restore(*flags);
if (work_is_canceling(work))
return -ENOENT;
cpu_relax();
return -EAGAIN;
}
/**
* insert_work - insert a work into a pool
* @pwq: pwq @work belongs to
* @work: work to insert
* @head: insertion point
* @extra_flags: extra WORK_STRUCT_* flags to set
*
* Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
* work_struct flags.
*
* CONTEXT:
* raw_spin_lock_irq(pool->lock).
*/
static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
struct list_head *head, unsigned int extra_flags)
{
debug_work_activate(work);
/* record the work call stack in order to print it in KASAN reports */
kasan_record_aux_stack_noalloc(work);
/* we own @work, set data and link */
set_work_pwq(work, pwq, extra_flags);
list_add_tail(&work->entry, head);
get_pwq(pwq);
}
/*
* Test whether @work is being queued from another work executing on the
* same workqueue.
*/
static bool is_chained_work(struct workqueue_struct *wq)
{
struct worker *worker;
worker = current_wq_worker();
/*
* Return %true iff I'm a worker executing a work item on @wq. If
* I'm @worker, it's safe to dereference it without locking.
*/
return worker && worker->current_pwq->wq == wq;
}
/*
* When queueing an unbound work item to a wq, prefer local CPU if allowed
* by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to
* avoid perturbing sensitive tasks.
*/
static int wq_select_unbound_cpu(int cpu)
{
int new_cpu;
if (likely(!wq_debug_force_rr_cpu)) {
if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
return cpu;
} else {
pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n");
}
new_cpu = __this_cpu_read(wq_rr_cpu_last);
new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
if (unlikely(new_cpu >= nr_cpu_ids)) {
new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
if (unlikely(new_cpu >= nr_cpu_ids))
return cpu;
}
__this_cpu_write(wq_rr_cpu_last, new_cpu);
return new_cpu;
}
static void __queue_work(int cpu, struct workqueue_struct *wq,
struct work_struct *work)
{
struct pool_workqueue *pwq;
struct worker_pool *last_pool, *pool;
unsigned int work_flags;
unsigned int req_cpu = cpu;
/*
* While a work item is PENDING && off queue, a task trying to
* steal the PENDING will busy-loop waiting for it to either get
* queued or lose PENDING. Grabbing PENDING and queueing should
* happen with IRQ disabled.
*/
lockdep_assert_irqs_disabled();
/*
* For a draining wq, only works from the same workqueue are
* allowed. The __WQ_DESTROYING helps to spot the issue that
* queues a new work item to a wq after destroy_workqueue(wq).
*/
if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq))))
return;
rcu_read_lock();
retry:
/* pwq which will be used unless @work is executing elsewhere */
if (req_cpu == WORK_CPU_UNBOUND) {
if (wq->flags & WQ_UNBOUND)
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
else
cpu = raw_smp_processor_id();
}
pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu));
pool = pwq->pool;
/*
* If @work was previously on a different pool, it might still be
* running there, in which case the work needs to be queued on that
* pool to guarantee non-reentrancy.
*/
last_pool = get_work_pool(work);
if (last_pool && last_pool != pool) {
struct worker *worker;
raw_spin_lock(&last_pool->lock);
worker = find_worker_executing_work(last_pool, work);
if (worker && worker->current_pwq->wq == wq) {
pwq = worker->current_pwq;
pool = pwq->pool;
WARN_ON_ONCE(pool != last_pool);
} else {
/* meh... not running there, queue here */
raw_spin_unlock(&last_pool->lock);
raw_spin_lock(&pool->lock);
}
} else {
raw_spin_lock(&pool->lock);
}
/*
* pwq is determined and locked. For unbound pools, we could have raced
* with pwq release and it could already be dead. If its refcnt is zero,
* repeat pwq selection. Note that unbound pwqs never die without
* another pwq replacing it in cpu_pwq or while work items are executing
* on it, so the retrying is guaranteed to make forward-progress.
*/
if (unlikely(!pwq->refcnt)) {
if (wq->flags & WQ_UNBOUND) {
raw_spin_unlock(&pool->lock);
cpu_relax();
goto retry;
}
/* oops */
WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
wq->name, cpu);
}
/* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work);
trace_android_vh_wq_queue_work(work, wq->name, wq->flags, cpu);
if (WARN_ON(!list_empty(&work->entry)))
goto out;
pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color);
if (likely(pwq->nr_active < pwq->max_active)) {
if (list_empty(&pool->worklist))
pool->watchdog_ts = jiffies;
trace_workqueue_activate_work(work);
pwq->nr_active++;
insert_work(pwq, work, &pool->worklist, work_flags);
kick_pool(pool);
} else {
work_flags |= WORK_STRUCT_INACTIVE;
insert_work(pwq, work, &pwq->inactive_works, work_flags);
}
out:
raw_spin_unlock(&pool->lock);
rcu_read_unlock();
}
/**
* queue_work_on - queue work on specific cpu
* @cpu: CPU number to execute work on
* @wq: workqueue to use
* @work: work to queue
*
* We queue the work to a specific CPU, the caller must ensure it
* can't go away. Callers that fail to ensure that the specified
* CPU cannot go away will execute on a randomly chosen CPU.
* But note well that callers specifying a CPU that never has been
* online will get a splat.
*
* Return: %false if @work was already on a queue, %true otherwise.
*/
bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work)
{
bool ret = false;
unsigned long flags;
local_irq_save(flags);
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
__queue_work(cpu, wq, work);
ret = true;
}
local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL(queue_work_on);
/**
* select_numa_node_cpu - Select a CPU based on NUMA node
* @node: NUMA node ID that we want to select a CPU from
*
* This function will attempt to find a "random" cpu available on a given
* node. If there are no CPUs available on the given node it will return
* WORK_CPU_UNBOUND indicating that we should just schedule to any
* available CPU if we need to schedule this work.
*/
static int select_numa_node_cpu(int node)
{
int cpu;
/* Delay binding to CPU if node is not valid or online */
if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
return WORK_CPU_UNBOUND;
/* Use local node/cpu if we are already there */
cpu = raw_smp_processor_id();
if (node == cpu_to_node(cpu))
return cpu;
/* Use "random" otherwise know as "first" online CPU of node */
cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
/* If CPU is valid return that, otherwise just defer */
return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
}
/**
* queue_work_node - queue work on a "random" cpu for a given NUMA node
* @node: NUMA node that we are targeting the work for
* @wq: workqueue to use
* @work: work to queue
*
* We queue the work to a "random" CPU within a given NUMA node. The basic
* idea here is to provide a way to somehow associate work with a given
* NUMA node.
*
* This function will only make a best effort attempt at getting this onto
* the right NUMA node. If no node is requested or the requested node is
* offline then we just fall back to standard queue_work behavior.
*
* Currently the "random" CPU ends up being the first available CPU in the
* intersection of cpu_online_mask and the cpumask of the node, unless we
* are running on the node. In that case we just use the current CPU.
*
* Return: %false if @work was already on a queue, %true otherwise.
*/
bool queue_work_node(int node, struct workqueue_struct *wq,
struct work_struct *work)
{
unsigned long flags;
bool ret = false;
/*
* This current implementation is specific to unbound workqueues.
* Specifically we only return the first available CPU for a given
* node instead of cycling through individual CPUs within the node.
*
* If this is used with a per-cpu workqueue then the logic in
* workqueue_select_cpu_near would need to be updated to allow for
* some round robin type logic.
*/
WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
local_irq_save(flags);
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
int cpu = select_numa_node_cpu(node);
__queue_work(cpu, wq, work);
ret = true;
}
local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL_GPL(queue_work_node);
void delayed_work_timer_fn(struct timer_list *t)
{
struct delayed_work *dwork = from_timer(dwork, t, timer);
/* should have been called from irqsafe timer with irq already off */
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
}
EXPORT_SYMBOL(delayed_work_timer_fn);
static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
struct timer_list *timer = &dwork->timer;
struct work_struct *work = &dwork->work;
WARN_ON_ONCE(!wq);
WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
WARN_ON_ONCE(timer_pending(timer));
WARN_ON_ONCE(!list_empty(&work->entry));
/*
* If @delay is 0, queue @dwork->work immediately. This is for
* both optimization and correctness. The earliest @timer can
* expire is on the closest next tick and delayed_work users depend
* on that there's no such delay when @delay is 0.
*/
if (!delay) {
__queue_work(cpu, wq, &dwork->work);
return;
}
dwork->wq = wq;
dwork->cpu = cpu;
timer->expires = jiffies + delay;
if (unlikely(cpu != WORK_CPU_UNBOUND))
add_timer_on(timer, cpu);
else
add_timer(timer);
}
/**
* queue_delayed_work_on - queue work on specific CPU after delay
* @cpu: CPU number to execute work on
* @wq: workqueue to use
* @dwork: work to queue
* @delay: number of jiffies to wait before queueing
*
* Return: %false if @work was already on a queue, %true otherwise. If
* @delay is zero and @dwork is idle, it will be scheduled for immediate
* execution.
*/
bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
struct work_struct *work = &dwork->work;
bool ret = false;
unsigned long flags;
/* read the comment in __queue_work() */
local_irq_save(flags);
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
__queue_delayed_work(cpu, wq, dwork, delay);
ret = true;
}
local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL(queue_delayed_work_on);
/**
* mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
* @cpu: CPU number to execute work on
* @wq: workqueue to use
* @dwork: work to queue
* @delay: number of jiffies to wait before queueing
*
* If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
* modify @dwork's timer so that it expires after @delay. If @delay is
* zero, @work is guaranteed to be scheduled immediately regardless of its
* current state.
*
* Return: %false if @dwork was idle and queued, %true if @dwork was
* pending and its timer was modified.
*
* This function is safe to call from any context including IRQ handler.
* See try_to_grab_pending() for details.
*/
bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
unsigned long flags;
int ret;
do {
ret = try_to_grab_pending(&dwork->work, true, &flags);
} while (unlikely(ret == -EAGAIN));
if (likely(ret >= 0)) {
__queue_delayed_work(cpu, wq, dwork, delay);
local_irq_restore(flags);
}
/* -ENOENT from try_to_grab_pending() becomes %true */
return ret;
}
EXPORT_SYMBOL_GPL(mod_delayed_work_on);
static void rcu_work_rcufn(struct rcu_head *rcu)
{
struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
/* read the comment in __queue_work() */
local_irq_disable();
__queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
local_irq_enable();
}
/**
* queue_rcu_work - queue work after a RCU grace period
* @wq: workqueue to use
* @rwork: work to queue
*
* Return: %false if @rwork was already pending, %true otherwise. Note
* that a full RCU grace period is guaranteed only after a %true return.
* While @rwork is guaranteed to be executed after a %false return, the
* execution may happen before a full RCU grace period has passed.
*/
bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
{
struct work_struct *work = &rwork->work;
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
rwork->wq = wq;
call_rcu_hurry(&rwork->rcu, rcu_work_rcufn);
return true;
}
return false;
}
EXPORT_SYMBOL(queue_rcu_work);
static struct worker *alloc_worker(int node)
{
struct worker *worker;
worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
if (worker) {
INIT_LIST_HEAD(&worker->entry);
INIT_LIST_HEAD(&worker->scheduled);
INIT_LIST_HEAD(&worker->node);
/* on creation a worker is in !idle && prep state */
worker->flags = WORKER_PREP;
}
return worker;
}
static cpumask_t *pool_allowed_cpus(struct worker_pool *pool)
{
if (pool->cpu < 0 && pool->attrs->affn_strict)
return pool->attrs->__pod_cpumask;
else
return pool->attrs->cpumask;
}
/**
* worker_attach_to_pool() - attach a worker to a pool
* @worker: worker to be attached
* @pool: the target pool
*
* Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and
* cpu-binding of @worker are kept coordinated with the pool across
* cpu-[un]hotplugs.
*/
static void worker_attach_to_pool(struct worker *worker,
struct worker_pool *pool)
{
mutex_lock(&wq_pool_attach_mutex);
/*
* The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
* stable across this function. See the comments above the flag
* definition for details.
*/
if (pool->flags & POOL_DISASSOCIATED)
worker->flags |= WORKER_UNBOUND;
else
kthread_set_per_cpu(worker->task, pool->cpu);
if (worker->rescue_wq)
set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool));
list_add_tail(&worker->node, &pool->workers);
worker->pool = pool;
mutex_unlock(&wq_pool_attach_mutex);
}
/**
* worker_detach_from_pool() - detach a worker from its pool
* @worker: worker which is attached to its pool
*
* Undo the attaching which had been done in worker_attach_to_pool(). The
* caller worker shouldn't access to the pool after detached except it has
* other reference to the pool.
*/
static void worker_detach_from_pool(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
struct completion *detach_completion = NULL;
mutex_lock(&wq_pool_attach_mutex);
kthread_set_per_cpu(worker->task, -1);
list_del(&worker->node);
worker->pool = NULL;
if (list_empty(&pool->workers) && list_empty(&pool->dying_workers))
detach_completion = pool->detach_completion;
mutex_unlock(&wq_pool_attach_mutex);
/* clear leftover flags without pool->lock after it is detached */
worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
if (detach_completion)
complete(detach_completion);
}
/**
* create_worker - create a new workqueue worker
* @pool: pool the new worker will belong to
*
* Create and start a new worker which is attached to @pool.
*
* CONTEXT:
* Might sleep. Does GFP_KERNEL allocations.
*
* Return:
* Pointer to the newly created worker.
*/
static struct worker *create_worker(struct worker_pool *pool)
{
struct worker *worker;
int id;
char id_buf[23];
/* ID is needed to determine kthread name */
id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
if (id < 0) {
pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n",
ERR_PTR(id));
return NULL;
}
worker = alloc_worker(pool->node);
if (!worker) {
pr_err_once("workqueue: Failed to allocate a worker\n");
goto fail;
}
worker->id = id;
if (pool->cpu >= 0)
snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
pool->attrs->nice < 0 ? "H" : "");
else
snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
"kworker/%s", id_buf);
if (IS_ERR(worker->task)) {
if (PTR_ERR(worker->task) == -EINTR) {
pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n",
id_buf);
} else {
pr_err_once("workqueue: Failed to create a worker thread: %pe",
worker->task);
}
goto fail;
}
set_user_nice(worker->task, pool->attrs->nice);
trace_android_rvh_create_worker(worker->task, pool->attrs);
kthread_bind_mask(worker->task, pool_allowed_cpus(pool));
/* successful, attach the worker to the pool */
worker_attach_to_pool(worker, pool);
/* start the newly created worker */
raw_spin_lock_irq(&pool->lock);
worker->pool->nr_workers++;
worker_enter_idle(worker);
kick_pool(pool);
/*
* @worker is waiting on a completion in kthread() and will trigger hung
* check if not woken up soon. As kick_pool() might not have waken it
* up, wake it up explicitly once more.
*/
wake_up_process(worker->task);
raw_spin_unlock_irq(&pool->lock);
return worker;
fail:
ida_free(&pool->worker_ida, id);
kfree(worker);
return NULL;
}
static void unbind_worker(struct worker *worker)
{
lockdep_assert_held(&wq_pool_attach_mutex);
kthread_set_per_cpu(worker->task, -1);
if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
else
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
}
static void wake_dying_workers(struct list_head *cull_list)
{
struct worker *worker, *tmp;
list_for_each_entry_safe(worker, tmp, cull_list, entry) {
list_del_init(&worker->entry);
unbind_worker(worker);
/*
* If the worker was somehow already running, then it had to be
* in pool->idle_list when set_worker_dying() happened or we
* wouldn't have gotten here.
*
* Thus, the worker must either have observed the WORKER_DIE
* flag, or have set its state to TASK_IDLE. Either way, the
* below will be observed by the worker and is safe to do
* outside of pool->lock.
*/
wake_up_process(worker->task);
}
}
/**
* set_worker_dying - Tag a worker for destruction
* @worker: worker to be destroyed
* @list: transfer worker away from its pool->idle_list and into list
*
* Tag @worker for destruction and adjust @pool stats accordingly. The worker
* should be idle.
*
* CONTEXT:
* raw_spin_lock_irq(pool->lock).
*/
static void set_worker_dying(struct worker *worker, struct list_head *list)
{
struct worker_pool *pool = worker->pool;
lockdep_assert_held(&pool->lock);
lockdep_assert_held(&wq_pool_attach_mutex);
/* sanity check frenzy */
if (WARN_ON(worker->current_work) ||
WARN_ON(!list_empty(&worker->scheduled)) ||
WARN_ON(!(worker->flags & WORKER_IDLE)))
return;
pool->nr_workers--;
pool->nr_idle--;
worker->flags |= WORKER_DIE;
list_move(&worker->entry, list);
list_move(&worker->node, &pool->dying_workers);
}
/**
* idle_worker_timeout - check if some idle workers can now be deleted.
* @t: The pool's idle_timer that just expired
*
* The timer is armed in worker_enter_idle(). Note that it isn't disarmed in
* worker_leave_idle(), as a worker flicking between idle and active while its
* pool is at the too_many_workers() tipping point would cause too much timer
* housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let
* it expire and re-evaluate things from there.
*/
static void idle_worker_timeout(struct timer_list *t)
{
struct worker_pool *pool = from_timer(pool, t, idle_timer);
bool do_cull = false;
if (work_pending(&pool->idle_cull_work))
return;
raw_spin_lock_irq(&pool->lock);
if (too_many_workers(pool)) {
struct worker *worker;
unsigned long expires;
/* idle_list is kept in LIFO order, check the last one */
worker = list_entry(pool->idle_list.prev, struct worker, entry);
expires = worker->last_active + IDLE_WORKER_TIMEOUT;
do_cull = !time_before(jiffies, expires);
if (!do_cull)
mod_timer(&pool->idle_timer, expires);
}
raw_spin_unlock_irq(&pool->lock);
if (do_cull)
queue_work(system_unbound_wq, &pool->idle_cull_work);
}
/**
* idle_cull_fn - cull workers that have been idle for too long.
* @work: the pool's work for handling these idle workers
*
* This goes through a pool's idle workers and gets rid of those that have been
* idle for at least IDLE_WORKER_TIMEOUT seconds.
*
* We don't want to disturb isolated CPUs because of a pcpu kworker being
* culled, so this also resets worker affinity. This requires a sleepable
* context, hence the split between timer callback and work item.
*/
static void idle_cull_fn(struct work_struct *work)
{
struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work);
LIST_HEAD(cull_list);
/*
* Grabbing wq_pool_attach_mutex here ensures an already-running worker
* cannot proceed beyong worker_detach_from_pool() in its self-destruct
* path. This is required as a previously-preempted worker could run after
* set_worker_dying() has happened but before wake_dying_workers() did.
*/
mutex_lock(&wq_pool_attach_mutex);
raw_spin_lock_irq(&pool->lock);
while (too_many_workers(pool)) {
struct worker *worker;
unsigned long expires;
worker = list_entry(pool->idle_list.prev, struct worker, entry);
expires = worker->last_active + IDLE_WORKER_TIMEOUT;
if (time_before(jiffies, expires)) {
mod_timer(&pool->idle_timer, expires);
break;
}
set_worker_dying(worker, &cull_list);
}
raw_spin_unlock_irq(&pool->lock);
wake_dying_workers(&cull_list);
mutex_unlock(&wq_pool_attach_mutex);
}
static void send_mayday(struct work_struct *work)
{
struct pool_workqueue *pwq = get_work_pwq(work);
struct workqueue_struct *wq = pwq->wq;
lockdep_assert_held(&wq_mayday_lock);
if (!wq->rescuer)
return;
/* mayday mayday mayday */
if (list_empty(&pwq->mayday_node)) {
/*
* If @pwq is for an unbound wq, its base ref may be put at
* any time due to an attribute change. Pin @pwq until the
* rescuer is done with it.
*/
get_pwq(pwq);
list_add_tail(&pwq->mayday_node, &wq->maydays);
wake_up_process(wq->rescuer->task);
pwq->stats[PWQ_STAT_MAYDAY]++;
}
}
static void pool_mayday_timeout(struct timer_list *t)
{
struct worker_pool *pool = from_timer(pool, t, mayday_timer);
struct work_struct *work;
raw_spin_lock_irq(&pool->lock);
raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */
if (need_to_create_worker(pool)) {
/*
* We've been trying to create a new worker but
* haven't been successful. We might be hitting an
* allocation deadlock. Send distress signals to
* rescuers.
*/
list_for_each_entry(work, &pool->worklist, entry)
send_mayday(work);
}
raw_spin_unlock(&wq_mayday_lock);
raw_spin_unlock_irq(&pool->lock);
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
}
/**
* maybe_create_worker - create a new worker if necessary
* @pool: pool to create a new worker for
*
* Create a new worker for @pool if necessary. @pool is guaranteed to
* have at least one idle worker on return from this function. If
* creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
* sent to all rescuers with works scheduled on @pool to resolve
* possible allocation deadlock.
*
* On return, need_to_create_worker() is guaranteed to be %false and
* may_start_working() %true.
*
* LOCKING:
* raw_spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times. Does GFP_KERNEL allocations. Called only from
* manager.
*/
static void maybe_create_worker(struct worker_pool *pool)
__releases(&pool->lock)
__acquires(&pool->lock)
{
restart:
raw_spin_unlock_irq(&pool->lock);
/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
while (true) {
if (create_worker(pool) || !need_to_create_worker(pool))
break;
schedule_timeout_interruptible(CREATE_COOLDOWN);
if (!need_to_create_worker(pool))
break;
}
del_timer_sync(&pool->mayday_timer);
raw_spin_lock_irq(&pool->lock);
/*
* This is necessary even after a new worker was just successfully
* created as @pool->lock was dropped and the new worker might have
* already become busy.
*/
if (need_to_create_worker(pool))
goto restart;
}
/**
* manage_workers - manage worker pool
* @worker: self
*
* Assume the manager role and manage the worker pool @worker belongs
* to. At any given time, there can be only zero or one manager per
* pool. The exclusion is handled automatically by this function.
*
* The caller can safely start processing works on false return. On
* true return, it's guaranteed that need_to_create_worker() is false
* and may_start_working() is true.
*
* CONTEXT:
* raw_spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times. Does GFP_KERNEL allocations.
*
* Return:
* %false if the pool doesn't need management and the caller can safely
* start processing works, %true if management function was performed and
* the conditions that the caller verified before calling the function may
* no longer be true.
*/
static bool manage_workers(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
if (pool->flags & POOL_MANAGER_ACTIVE)
return false;
pool->flags |= POOL_MANAGER_ACTIVE;
pool->manager = worker;
maybe_create_worker(pool);
pool->manager = NULL;
pool->flags &= ~POOL_MANAGER_ACTIVE;
rcuwait_wake_up(&manager_wait);
return true;
}
/**
* process_one_work - process single work
* @worker: self
* @work: work to process
*
* Process @work. This function contains all the logics necessary to
* process a single work including synchronization against and
* interaction with other workers on the same cpu, queueing and
* flushing. As long as context requirement is met, any worker can
* call this function to process a work.
*
* CONTEXT:
* raw_spin_lock_irq(pool->lock) which is released and regrabbed.
*/
static void process_one_work(struct worker *worker, struct work_struct *work)
__releases(&pool->lock)
__acquires(&pool->lock)
{
struct pool_workqueue *pwq = get_work_pwq(work);
struct worker_pool *pool = worker->pool;
unsigned long work_data;
#ifdef CONFIG_LOCKDEP
/*
* It is permissible to free the struct work_struct from
* inside the function that is called from it, this we need to
* take into account for lockdep too. To avoid bogus "held
* lock freed" warnings as well as problems when looking into
* work->lockdep_map, make a copy and use that here.
*/
struct lockdep_map lockdep_map;
lockdep_copy_map(&lockdep_map, &work->lockdep_map);
#endif
/* ensure we're on the correct CPU */
WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
raw_smp_processor_id() != pool->cpu);
/* claim and dequeue */
debug_work_deactivate(work);
hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
worker->current_work = work;
worker->current_func = work->func;
worker->current_pwq = pwq;
worker->current_at = worker->task->se.sum_exec_runtime;
work_data = *work_data_bits(work);
worker->current_color = get_work_color(work_data);
/*
* Record wq name for cmdline and debug reporting, may get
* overridden through set_worker_desc().
*/
strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
list_del_init(&work->entry);
/*
* CPU intensive works don't participate in concurrency management.
* They're the scheduler's responsibility. This takes @worker out
* of concurrency management and the next code block will chain
* execution of the pending work items.
*/
if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE))
worker_set_flags(worker, WORKER_CPU_INTENSIVE);
/*
* Kick @pool if necessary. It's always noop for per-cpu worker pools
* since nr_running would always be >= 1 at this point. This is used to
* chain execution of the pending work items for WORKER_NOT_RUNNING
* workers such as the UNBOUND and CPU_INTENSIVE ones.
*/
kick_pool(pool);
/*
* Record the last pool and clear PENDING which should be the last
* update to @work. Also, do this inside @pool->lock so that
* PENDING and queued state changes happen together while IRQ is
* disabled.
*/
set_work_pool_and_clear_pending(work, pool->id);
pwq->stats[PWQ_STAT_STARTED]++;
raw_spin_unlock_irq(&pool->lock);
lock_map_acquire(&pwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map);
/*
* Strictly speaking we should mark the invariant state without holding
* any locks, that is, before these two lock_map_acquire()'s.
*
* However, that would result in:
*
* A(W1)
* WFC(C)
* A(W1)
* C(C)
*
* Which would create W1->C->W1 dependencies, even though there is no
* actual deadlock possible. There are two solutions, using a
* read-recursive acquire on the work(queue) 'locks', but this will then
* hit the lockdep limitation on recursive locks, or simply discard
* these locks.
*
* AFAICT there is no possible deadlock scenario between the
* flush_work() and complete() primitives (except for single-threaded
* workqueues), so hiding them isn't a problem.
*/
lockdep_invariant_state(true);
trace_workqueue_execute_start(work);
worker->current_func(work);
/*
* While we must be careful to not use "work" after this, the trace
* point will only record its address.
*/
trace_workqueue_execute_end(work, worker->current_func);
pwq->stats[PWQ_STAT_COMPLETED]++;
lock_map_release(&lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
if (unlikely(in_atomic() || lockdep_depth(current) > 0 ||
rcu_preempt_depth() > 0)) {
pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d/%d\n"
" last function: %ps\n",
current->comm, preempt_count(), rcu_preempt_depth(),
task_pid_nr(current), worker->current_func);
debug_show_held_locks(current);
dump_stack();
}
/*
* The following prevents a kworker from hogging CPU on !PREEMPTION
* kernels, where a requeueing work item waiting for something to
* happen could deadlock with stop_machine as such work item could
* indefinitely requeue itself while all other CPUs are trapped in
* stop_machine. At the same time, report a quiescent RCU state so
* the same condition doesn't freeze RCU.
*/
cond_resched();
raw_spin_lock_irq(&pool->lock);
/*
* In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked
* CPU intensive by wq_worker_tick() if @work hogged CPU longer than
* wq_cpu_intensive_thresh_us. Clear it.
*/
worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
/* tag the worker for identification in schedule() */
worker->last_func = worker->current_func;
/* we're done with it, release */
hash_del(&worker->hentry);
worker->current_work = NULL;
worker->current_func = NULL;
worker->current_pwq = NULL;
worker->current_color = INT_MAX;
pwq_dec_nr_in_flight(pwq, work_data);
}
/**
* process_scheduled_works - process scheduled works
* @worker: self
*
* Process all scheduled works. Please note that the scheduled list
* may change while processing a work, so this function repeatedly
* fetches a work from the top and executes it.
*
* CONTEXT:
* raw_spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times.
*/
static void process_scheduled_works(struct worker *worker)
{
struct work_struct *work;
bool first = true;
while ((work = list_first_entry_or_null(&worker->scheduled,
struct work_struct, entry))) {
if (first) {
worker->pool->watchdog_ts = jiffies;
first = false;
}
process_one_work(worker, work);
}
}
static void set_pf_worker(bool val)
{
mutex_lock(&wq_pool_attach_mutex);
if (val)
current->flags |= PF_WQ_WORKER;
else
current->flags &= ~PF_WQ_WORKER;
mutex_unlock(&wq_pool_attach_mutex);
}
/**
* worker_thread - the worker thread function
* @__worker: self
*
* The worker thread function. All workers belong to a worker_pool -
* either a per-cpu one or dynamic unbound one. These workers process all
* work items regardless of their specific target workqueue. The only
* exception is work items which belong to workqueues with a rescuer which
* will be explained in rescuer_thread().
*
* Return: 0
*/
static int worker_thread(void *__worker)
{
struct worker *worker = __worker;
struct worker_pool *pool = worker->pool;
/* tell the scheduler that this is a workqueue worker */
set_pf_worker(true);
woke_up:
raw_spin_lock_irq(&pool->lock);
/* am I supposed to die? */
if (unlikely(worker->flags & WORKER_DIE)) {
raw_spin_unlock_irq(&pool->lock);
set_pf_worker(false);
set_task_comm(worker->task, "kworker/dying");
ida_free(&pool->worker_ida, worker->id);
worker_detach_from_pool(worker);
WARN_ON_ONCE(!list_empty(&worker->entry));
kfree(worker);
return 0;
}
worker_leave_idle(worker);
recheck:
/* no more worker necessary? */
if (!need_more_worker(pool))
goto sleep;
/* do we need to manage? */
if (unlikely(!may_start_working(pool)) && manage_workers(worker))
goto recheck;
/*
* ->scheduled list can only be filled while a worker is
* preparing to process a work or actually processing it.
* Make sure nobody diddled with it while I was sleeping.
*/
WARN_ON_ONCE(!list_empty(&worker->scheduled));
/*
* Finish PREP stage. We're guaranteed to have at least one idle
* worker or that someone else has already assumed the manager
* role. This is where @worker starts participating in concurrency
* management if applicable and concurrency management is restored
* after being rebound. See rebind_workers() for details.
*/
worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
do {
struct work_struct *work =
list_first_entry(&pool->worklist,
struct work_struct, entry);
if (assign_work(work, worker, NULL))
process_scheduled_works(worker);
} while (keep_working(pool));
worker_set_flags(worker, WORKER_PREP);
sleep:
/*
* pool->lock is held and there's no work to process and no need to
* manage, sleep. Workers are woken up only while holding
* pool->lock or from local cpu, so setting the current state
* before releasing pool->lock is enough to prevent losing any
* event.
*/
worker_enter_idle(worker);
__set_current_state(TASK_IDLE);
raw_spin_unlock_irq(&pool->lock);
schedule();
goto woke_up;
}
/**
* rescuer_thread - the rescuer thread function
* @__rescuer: self
*
* Workqueue rescuer thread function. There's one rescuer for each
* workqueue which has WQ_MEM_RECLAIM set.
*
* Regular work processing on a pool may block trying to create a new
* worker which uses GFP_KERNEL allocation which has slight chance of
* developing into deadlock if some works currently on the same queue
* need to be processed to satisfy the GFP_KERNEL allocation. This is
* the problem rescuer solves.
*
* When such condition is possible, the pool summons rescuers of all
* workqueues which have works queued on the pool and let them process
* those works so that forward progress can be guaranteed.
*
* This should happen rarely.
*
* Return: 0
*/
static int rescuer_thread(void *__rescuer)
{
struct worker *rescuer = __rescuer;
struct workqueue_struct *wq = rescuer->rescue_wq;
bool should_stop;
set_user_nice(current, RESCUER_NICE_LEVEL);
/*
* Mark rescuer as worker too. As WORKER_PREP is never cleared, it
* doesn't participate in concurrency management.
*/
set_pf_worker(true);
repeat:
set_current_state(TASK_IDLE);
/*
* By the time the rescuer is requested to stop, the workqueue
* shouldn't have any work pending, but @wq->maydays may still have
* pwq(s) queued. This can happen by non-rescuer workers consuming
* all the work items before the rescuer got to them. Go through
* @wq->maydays processing before acting on should_stop so that the
* list is always empty on exit.
*/
should_stop = kthread_should_stop();
/* see whether any pwq is asking for help */
raw_spin_lock_irq(&wq_mayday_lock);
while (!list_empty(&wq->maydays)) {
struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
struct pool_workqueue, mayday_node);
struct worker_pool *pool = pwq->pool;
struct work_struct *work, *n;
__set_current_state(TASK_RUNNING);
list_del_init(&pwq->mayday_node);
raw_spin_unlock_irq(&wq_mayday_lock);
worker_attach_to_pool(rescuer, pool);
raw_spin_lock_irq(&pool->lock);
/*
* Slurp in all works issued via this workqueue and
* process'em.
*/
WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
list_for_each_entry_safe(work, n, &pool->worklist, entry) {
if (get_work_pwq(work) == pwq &&
assign_work(work, rescuer, &n))
pwq->stats[PWQ_STAT_RESCUED]++;
}
if (!list_empty(&rescuer->scheduled)) {
process_scheduled_works(rescuer);
/*
* The above execution of rescued work items could
* have created more to rescue through
* pwq_activate_first_inactive() or chained
* queueing. Let's put @pwq back on mayday list so
* that such back-to-back work items, which may be
* being used to relieve memory pressure, don't
* incur MAYDAY_INTERVAL delay inbetween.
*/
if (pwq->nr_active && need_to_create_worker(pool)) {
raw_spin_lock(&wq_mayday_lock);
/*
* Queue iff we aren't racing destruction
* and somebody else hasn't queued it already.
*/
if (wq->rescuer && list_empty(&pwq->mayday_node)) {
get_pwq(pwq);
list_add_tail(&pwq->mayday_node, &wq->maydays);
}
raw_spin_unlock(&wq_mayday_lock);
}
}
/*
* Put the reference grabbed by send_mayday(). @pool won't
* go away while we're still attached to it.
*/
put_pwq(pwq);
/*
* Leave this pool. Notify regular workers; otherwise, we end up
* with 0 concurrency and stalling the execution.
*/
kick_pool(pool);
raw_spin_unlock_irq(&pool->lock);
worker_detach_from_pool(rescuer);
raw_spin_lock_irq(&wq_mayday_lock);
}
raw_spin_unlock_irq(&wq_mayday_lock);
if (should_stop) {
__set_current_state(TASK_RUNNING);
set_pf_worker(false);
return 0;
}
/* rescuers should never participate in concurrency management */
WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
schedule();
goto repeat;
}
/**
* check_flush_dependency - check for flush dependency sanity
* @target_wq: workqueue being flushed
* @target_work: work item being flushed (NULL for workqueue flushes)
*
* %current is trying to flush the whole @target_wq or @target_work on it.
* If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
* reclaiming memory or running on a workqueue which doesn't have
* %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
* a deadlock.
*/
static void check_flush_dependency(struct workqueue_struct *target_wq,
struct work_struct *target_work)
{
work_func_t target_func = target_work ? target_work->func : NULL;
struct worker *worker;
if (target_wq->flags & WQ_MEM_RECLAIM)
return;
worker = current_wq_worker();
WARN_ONCE(current->flags & PF_MEMALLOC,
"workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
current->pid, current->comm, target_wq->name, target_func);
WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
(WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
"workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
worker->current_pwq->wq->name, worker->current_func,
target_wq->name, target_func);
}
struct wq_barrier {
struct work_struct work;
struct completion done;
struct task_struct *task; /* purely informational */
};
static void wq_barrier_func(struct work_struct *work)
{
struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
complete(&barr->done);
}
/**
* insert_wq_barrier - insert a barrier work
* @pwq: pwq to insert barrier into
* @barr: wq_barrier to insert
* @target: target work to attach @barr to
* @worker: worker currently executing @target, NULL if @target is not executing
*
* @barr is linked to @target such that @barr is completed only after
* @target finishes execution. Please note that the ordering
* guarantee is observed only with respect to @target and on the local
* cpu.
*
* Currently, a queued barrier can't be canceled. This is because
* try_to_grab_pending() can't determine whether the work to be
* grabbed is at the head of the queue and thus can't clear LINKED
* flag of the previous work while there must be a valid next work
* after a work with LINKED flag set.
*
* Note that when @worker is non-NULL, @target may be modified
* underneath us, so we can't reliably determine pwq from @target.
*
* CONTEXT:
* raw_spin_lock_irq(pool->lock).
*/
static void insert_wq_barrier(struct pool_workqueue *pwq,
struct wq_barrier *barr,
struct work_struct *target, struct worker *worker)
{
unsigned int work_flags = 0;
unsigned int work_color;
struct list_head *head;
/*
* debugobject calls are safe here even with pool->lock locked
* as we know for sure that this will not trigger any of the
* checks and call back into the fixup functions where we
* might deadlock.
*/
INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
init_completion_map(&barr->done, &target->lockdep_map);
barr->task = current;
/* The barrier work item does not participate in pwq->nr_active. */
work_flags |= WORK_STRUCT_INACTIVE;
/*
* If @target is currently being executed, schedule the
* barrier to the worker; otherwise, put it after @target.
*/
if (worker) {
head = worker->scheduled.next;
work_color = worker->current_color;
} else {
unsigned long *bits = work_data_bits(target);
head = target->entry.next;
/* there can already be other linked works, inherit and set */
work_flags |= *bits & WORK_STRUCT_LINKED;
work_color = get_work_color(*bits);
__set_bit(WORK_STRUCT_LINKED_BIT, bits);
}
pwq->nr_in_flight[work_color]++;
work_flags |= work_color_to_flags(work_color);
insert_work(pwq, &barr->work, head, work_flags);
}
/**
* flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
* @wq: workqueue being flushed
* @flush_color: new flush color, < 0 for no-op
* @work_color: new work color, < 0 for no-op
*
* Prepare pwqs for workqueue flushing.
*
* If @flush_color is non-negative, flush_color on all pwqs should be
* -1. If no pwq has in-flight commands at the specified color, all
* pwq->flush_color's stay at -1 and %false is returned. If any pwq
* has in flight commands, its pwq->flush_color is set to
* @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
* wakeup logic is armed and %true is returned.
*
* The caller should have initialized @wq->first_flusher prior to
* calling this function with non-negative @flush_color. If
* @flush_color is negative, no flush color update is done and %false
* is returned.
*
* If @work_color is non-negative, all pwqs should have the same
* work_color which is previous to @work_color and all will be
* advanced to @work_color.
*
* CONTEXT:
* mutex_lock(wq->mutex).
*
* Return:
* %true if @flush_color >= 0 and there's something to flush. %false
* otherwise.
*/
static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
int flush_color, int work_color)
{
bool wait = false;
struct pool_workqueue *pwq;
if (flush_color >= 0) {
WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
atomic_set(&wq->nr_pwqs_to_flush, 1);
}
for_each_pwq(pwq, wq) {
struct worker_pool *pool = pwq->pool;
raw_spin_lock_irq(&pool->lock);
if (flush_color >= 0) {
WARN_ON_ONCE(pwq->flush_color != -1);
if (pwq->nr_in_flight[flush_color]) {
pwq->flush_color = flush_color;
atomic_inc(&wq->nr_pwqs_to_flush);
wait = true;
}
}
if (work_color >= 0) {
WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
pwq->work_color = work_color;
}
raw_spin_unlock_irq(&pool->lock);
}
if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
complete(&wq->first_flusher->done);
return wait;
}
/**
* __flush_workqueue - ensure that any scheduled work has run to completion.
* @wq: workqueue to flush
*
* This function sleeps until all work items which were queued on entry
* have finished execution, but it is not livelocked by new incoming ones.
*/
void __flush_workqueue(struct workqueue_struct *wq)
{
struct wq_flusher this_flusher = {
.list = LIST_HEAD_INIT(this_flusher.list),
.flush_color = -1,
.done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
};
int next_color;
if (WARN_ON(!wq_online))
return;
lock_map_acquire(&wq->lockdep_map);
lock_map_release(&wq->lockdep_map);
mutex_lock(&wq->mutex);
/*
* Start-to-wait phase
*/
next_color = work_next_color(wq->work_color);
if (next_color != wq->flush_color) {
/*
* Color space is not full. The current work_color
* becomes our flush_color and work_color is advanced
* by one.
*/
WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
this_flusher.flush_color = wq->work_color;
wq->work_color = next_color;
if (!wq->first_flusher) {
/* no flush in progress, become the first flusher */
WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
wq->first_flusher = &this_flusher;
if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
wq->work_color)) {
/* nothing to flush, done */
wq->flush_color = next_color;
wq->first_flusher = NULL;
goto out_unlock;
}
} else {
/* wait in queue */
WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
list_add_tail(&this_flusher.list, &wq->flusher_queue);
flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
}
} else {
/*
* Oops, color space is full, wait on overflow queue.
* The next flush completion will assign us
* flush_color and transfer to flusher_queue.
*/
list_add_tail(&this_flusher.list, &wq->flusher_overflow);
}
check_flush_dependency(wq, NULL);
mutex_unlock(&wq->mutex);
trace_android_vh_flush_wq_wait_start(wq);
wait_for_completion(&this_flusher.done);
trace_android_vh_flush_wq_wait_finish(wq);
/*
* Wake-up-and-cascade phase
*
* First flushers are responsible for cascading flushes and
* handling overflow. Non-first flushers can simply return.
*/
if (READ_ONCE(wq->first_flusher) != &this_flusher)
return;
mutex_lock(&wq->mutex);
/* we might have raced, check again with mutex held */
if (wq->first_flusher != &this_flusher)
goto out_unlock;
WRITE_ONCE(wq->first_flusher, NULL);
WARN_ON_ONCE(!list_empty(&this_flusher.list));
WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
while (true) {
struct wq_flusher *next, *tmp;
/* complete all the flushers sharing the current flush color */
list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
if (next->flush_color != wq->flush_color)
break;
list_del_init(&next->list);
complete(&next->done);
}
WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
wq->flush_color != work_next_color(wq->work_color));
/* this flush_color is finished, advance by one */
wq->flush_color = work_next_color(wq->flush_color);
/* one color has been freed, handle overflow queue */
if (!list_empty(&wq->flusher_overflow)) {
/*
* Assign the same color to all overflowed
* flushers, advance work_color and append to
* flusher_queue. This is the start-to-wait
* phase for these overflowed flushers.
*/
list_for_each_entry(tmp, &wq->flusher_overflow, list)
tmp->flush_color = wq->work_color;
wq->work_color = work_next_color(wq->work_color);
list_splice_tail_init(&wq->flusher_overflow,
&wq->flusher_queue);
flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
}
if (list_empty(&wq->flusher_queue)) {
WARN_ON_ONCE(wq->flush_color != wq->work_color);
break;
}
/*
* Need to flush more colors. Make the next flusher
* the new first flusher and arm pwqs.
*/
WARN_ON_ONCE(wq->flush_color == wq->work_color);
WARN_ON_ONCE(wq->flush_color != next->flush_color);
list_del_init(&next->list);
wq->first_flusher = next;
if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
break;
/*
* Meh... this color is already done, clear first
* flusher and repeat cascading.
*/
wq->first_flusher = NULL;
}
out_unlock:
mutex_unlock(&wq->mutex);
}
EXPORT_SYMBOL(__flush_workqueue);
/**
* drain_workqueue - drain a workqueue
* @wq: workqueue to drain
*
* Wait until the workqueue becomes empty. While draining is in progress,
* only chain queueing is allowed. IOW, only currently pending or running
* work items on @wq can queue further work items on it. @wq is flushed
* repeatedly until it becomes empty. The number of flushing is determined
* by the depth of chaining and should be relatively short. Whine if it
* takes too long.
*/
void drain_workqueue(struct workqueue_struct *wq)
{
unsigned int flush_cnt = 0;
struct pool_workqueue *pwq;
/*
* __queue_work() needs to test whether there are drainers, is much
* hotter than drain_workqueue() and already looks at @wq->flags.
* Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
*/
mutex_lock(&wq->mutex);
if (!wq->nr_drainers++)
wq->flags |= __WQ_DRAINING;
mutex_unlock(&wq->mutex);
reflush:
__flush_workqueue(wq);
mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq) {
bool drained;
raw_spin_lock_irq(&pwq->pool->lock);
drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
raw_spin_unlock_irq(&pwq->pool->lock);
if (drained)
continue;
if (++flush_cnt == 10 ||
(flush_cnt % 100 == 0 && flush_cnt <= 1000))
pr_warn("workqueue %s: %s() isn't complete after %u tries\n",
wq->name, __func__, flush_cnt);
mutex_unlock(&wq->mutex);
goto reflush;
}
if (!--wq->nr_drainers)
wq->flags &= ~__WQ_DRAINING;
mutex_unlock(&wq->mutex);
}
EXPORT_SYMBOL_GPL(drain_workqueue);
static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
bool from_cancel)
{
struct worker *worker = NULL;
struct worker_pool *pool;
struct pool_workqueue *pwq;
might_sleep();
rcu_read_lock();
pool = get_work_pool(work);
if (!pool) {
rcu_read_unlock();
return false;
}
raw_spin_lock_irq(&pool->lock);
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
if (unlikely(pwq->pool != pool))
goto already_gone;
} else {
worker = find_worker_executing_work(pool, work);
if (!worker)
goto already_gone;
pwq = worker->current_pwq;
}
check_flush_dependency(pwq->wq, work);
insert_wq_barrier(pwq, barr, work, worker);
raw_spin_unlock_irq(&pool->lock);
/*
* Force a lock recursion deadlock when using flush_work() inside a
* single-threaded or rescuer equipped workqueue.
*
* For single threaded workqueues the deadlock happens when the work
* is after the work issuing the flush_work(). For rescuer equipped
* workqueues the deadlock happens when the rescuer stalls, blocking
* forward progress.
*/
if (!from_cancel &&
(pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
lock_map_acquire(&pwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
}
rcu_read_unlock();
return true;
already_gone:
raw_spin_unlock_irq(&pool->lock);
rcu_read_unlock();
return false;
}
static bool __flush_work(struct work_struct *work, bool from_cancel)
{
struct wq_barrier barr;
if (WARN_ON(!wq_online))
return false;
if (WARN_ON(!work->func))
return false;
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
if (start_flush_work(work, &barr, from_cancel)) {
trace_android_vh_flush_work_wait_start(work);
wait_for_completion(&barr.done);
trace_android_vh_flush_work_wait_finish(work);
destroy_work_on_stack(&barr.work);
return true;
} else {
return false;
}
}
/**
* flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush
*
* Wait until @work has finished execution. @work is guaranteed to be idle
* on return if it hasn't been requeued since flush started.
*
* Return:
* %true if flush_work() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_work(struct work_struct *work)
{
return __flush_work(work, false);
}
EXPORT_SYMBOL_GPL(flush_work);
struct cwt_wait {
wait_queue_entry_t wait;
struct work_struct *work;
};
static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
if (cwait->work != key)
return 0;
return autoremove_wake_function(wait, mode, sync, key);
}
static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
{
static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
unsigned long flags;
int ret;
do {
ret = try_to_grab_pending(work, is_dwork, &flags);
/*
* If someone else is already canceling, wait for it to
* finish. flush_work() doesn't work for PREEMPT_NONE
* because we may get scheduled between @work's completion
* and the other canceling task resuming and clearing
* CANCELING - flush_work() will return false immediately
* as @work is no longer busy, try_to_grab_pending() will
* return -ENOENT as @work is still being canceled and the
* other canceling task won't be able to clear CANCELING as
* we're hogging the CPU.
*
* Let's wait for completion using a waitqueue. As this
* may lead to the thundering herd problem, use a custom
* wake function which matches @work along with exclusive
* wait and wakeup.
*/
if (unlikely(ret == -ENOENT)) {
struct cwt_wait cwait;
init_wait(&cwait.wait);
cwait.wait.func = cwt_wakefn;
cwait.work = work;
prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
TASK_UNINTERRUPTIBLE);
if (work_is_canceling(work))
schedule();
finish_wait(&cancel_waitq, &cwait.wait);
}
} while (unlikely(ret < 0));
/* tell other tasks trying to grab @work to back off */
mark_work_canceling(work);
local_irq_restore(flags);
/*
* This allows canceling during early boot. We know that @work
* isn't executing.
*/
if (wq_online)
__flush_work(work, true);
clear_work_data(work);
/*
* Paired with prepare_to_wait() above so that either
* waitqueue_active() is visible here or !work_is_canceling() is
* visible there.
*/
smp_mb();
if (waitqueue_active(&cancel_waitq))
__wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
return ret;
}
/**
* cancel_work_sync - cancel a work and wait for it to finish
* @work: the work to cancel
*
* Cancel @work and wait for its execution to finish. This function
* can be used even if the work re-queues itself or migrates to
* another workqueue. On return from this function, @work is
* guaranteed to be not pending or executing on any CPU.
*
* cancel_work_sync(&delayed_work->work) must not be used for
* delayed_work's. Use cancel_delayed_work_sync() instead.
*
* The caller must ensure that the workqueue on which @work was last
* queued can't be destroyed before this function returns.
*
* Return:
* %true if @work was pending, %false otherwise.
*/
bool cancel_work_sync(struct work_struct *work)
{
return __cancel_work_timer(work, false);
}
EXPORT_SYMBOL_GPL(cancel_work_sync);
/**
* flush_delayed_work - wait for a dwork to finish executing the last queueing
* @dwork: the delayed work to flush
*
* Delayed timer is cancelled and the pending work is queued for
* immediate execution. Like flush_work(), this function only
* considers the last queueing instance of @dwork.
*
* Return:
* %true if flush_work() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
local_irq_disable();
if (del_timer_sync(&dwork->timer))
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
local_irq_enable();
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
/**
* flush_rcu_work - wait for a rwork to finish executing the last queueing
* @rwork: the rcu work to flush
*
* Return:
* %true if flush_rcu_work() waited for the work to finish execution,
* %false if it was already idle.
*/
bool flush_rcu_work(struct rcu_work *rwork)
{
if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
rcu_barrier();
flush_work(&rwork->work);
return true;
} else {
return flush_work(&rwork->work);
}
}
EXPORT_SYMBOL(flush_rcu_work);
static bool __cancel_work(struct work_struct *work, bool is_dwork)
{
unsigned long flags;
int ret;
do {
ret = try_to_grab_pending(work, is_dwork, &flags);
} while (unlikely(ret == -EAGAIN));
if (unlikely(ret < 0))
return false;
set_work_pool_and_clear_pending(work, get_work_pool_id(work));
local_irq_restore(flags);
return ret;
}
/*
* See cancel_delayed_work()
*/
bool cancel_work(struct work_struct *work)
{
return __cancel_work(work, false);
}
EXPORT_SYMBOL(cancel_work);
/**
* cancel_delayed_work - cancel a delayed work
* @dwork: delayed_work to cancel
*
* Kill off a pending delayed_work.
*
* Return: %true if @dwork was pending and canceled; %false if it wasn't
* pending.
*
* Note:
* The work callback function may still be running on return, unless
* it returns %true and the work doesn't re-arm itself. Explicitly flush or
* use cancel_delayed_work_sync() to wait on it.
*
* This function is safe to call from any context including IRQ handler.
*/
bool cancel_delayed_work(struct delayed_work *dwork)
{
return __cancel_work(&dwork->work, true);
}
EXPORT_SYMBOL(cancel_delayed_work);
/**
* cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
* @dwork: the delayed work cancel
*
* This is cancel_work_sync() for delayed works.
*
* Return:
* %true if @dwork was pending, %false otherwise.
*/
bool cancel_delayed_work_sync(struct delayed_work *dwork)
{
return __cancel_work_timer(&dwork->work, true);
}
EXPORT_SYMBOL(cancel_delayed_work_sync);
/**
* schedule_on_each_cpu - execute a function synchronously on each online CPU
* @func: the function to call
*
* schedule_on_each_cpu() executes @func on each online CPU using the
* system workqueue and blocks until all CPUs have completed.
* schedule_on_each_cpu() is very slow.
*
* Return:
* 0 on success, -errno on failure.
*/
int schedule_on_each_cpu(work_func_t func)
{
int cpu;
struct work_struct __percpu *works;
works = alloc_percpu(struct work_struct);
if (!works)
return -ENOMEM;
cpus_read_lock();
for_each_online_cpu(cpu) {
struct work_struct *work = per_cpu_ptr(works, cpu);
INIT_WORK(work, func);
schedule_work_on(cpu, work);
}
for_each_online_cpu(cpu)
flush_work(per_cpu_ptr(works, cpu));
cpus_read_unlock();
free_percpu(works);
return 0;
}
/**
* execute_in_process_context - reliably execute the routine with user context
* @fn: the function to execute
* @ew: guaranteed storage for the execute work structure (must
* be available when the work executes)
*
* Executes the function immediately if process context is available,
* otherwise schedules the function for delayed execution.
*
* Return: 0 - function was executed
* 1 - function was scheduled for execution
*/
int execute_in_process_context(work_func_t fn, struct execute_work *ew)
{
if (!in_interrupt()) {
fn(&ew->work);
return 0;
}
INIT_WORK(&ew->work, fn);
schedule_work(&ew->work);
return 1;
}
EXPORT_SYMBOL_GPL(execute_in_process_context);
/**
* free_workqueue_attrs - free a workqueue_attrs
* @attrs: workqueue_attrs to free
*
* Undo alloc_workqueue_attrs().
*/
void free_workqueue_attrs(struct workqueue_attrs *attrs)
{
if (attrs) {
free_cpumask_var(attrs->cpumask);
free_cpumask_var(attrs->__pod_cpumask);
kfree(attrs);
}
}
EXPORT_SYMBOL_GPL(free_workqueue_attrs);
/**
* alloc_workqueue_attrs - allocate a workqueue_attrs
*
* Allocate a new workqueue_attrs, initialize with default settings and
* return it.
*
* Return: The allocated new workqueue_attr on success. %NULL on failure.
*/
struct workqueue_attrs *alloc_workqueue_attrs(void)
{
struct workqueue_attrs *attrs;
attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
if (!attrs)
goto fail;
if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
goto fail;
if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL))
goto fail;
cpumask_copy(attrs->cpumask, cpu_possible_mask);
attrs->affn_scope = WQ_AFFN_DFL;
return attrs;
fail:
free_workqueue_attrs(attrs);
return NULL;
}
EXPORT_SYMBOL_GPL(alloc_workqueue_attrs);
static void copy_workqueue_attrs(struct workqueue_attrs *to,
const struct workqueue_attrs *from)
{
to->nice = from->nice;
cpumask_copy(to->cpumask, from->cpumask);
cpumask_copy(to->__pod_cpumask, from->__pod_cpumask);
to->affn_strict = from->affn_strict;
/*
* Unlike hash and equality test, copying shouldn't ignore wq-only
* fields as copying is used for both pool and wq attrs. Instead,
* get_unbound_pool() explicitly clears the fields.
*/
to->affn_scope = from->affn_scope;
to->ordered = from->ordered;
}
/*
* Some attrs fields are workqueue-only. Clear them for worker_pool's. See the
* comments in 'struct workqueue_attrs' definition.
*/
static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs)
{
attrs->affn_scope = WQ_AFFN_NR_TYPES;
attrs->ordered = false;
}
/* hash value of the content of @attr */
static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
{
u32 hash = 0;
hash = jhash_1word(attrs->nice, hash);
hash = jhash(cpumask_bits(attrs->cpumask),
BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
hash = jhash(cpumask_bits(attrs->__pod_cpumask),
BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
hash = jhash_1word(attrs->affn_strict, hash);
return hash;
}
/* content equality test */
static bool wqattrs_equal(const struct workqueue_attrs *a,
const struct workqueue_attrs *b)
{
if (a->nice != b->nice)
return false;
if (!cpumask_equal(a->cpumask, b->cpumask))
return false;
if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask))
return false;
if (a->affn_strict != b->affn_strict)
return false;
return true;
}
/* Update @attrs with actually available CPUs */
static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs,
const cpumask_t *unbound_cpumask)
{
/*
* Calculate the effective CPU mask of @attrs given @unbound_cpumask. If
* @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to
* @unbound_cpumask.
*/
cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask);
if (unlikely(cpumask_empty(attrs->cpumask)))
cpumask_copy(attrs->cpumask, unbound_cpumask);
}
/* find wq_pod_type to use for @attrs */
static const struct wq_pod_type *
wqattrs_pod_type(const struct workqueue_attrs *attrs)
{
enum wq_affn_scope scope;
struct wq_pod_type *pt;
/* to synchronize access to wq_affn_dfl */
lockdep_assert_held(&wq_pool_mutex);
if (attrs->affn_scope == WQ_AFFN_DFL)
scope = wq_affn_dfl;
else
scope = attrs->affn_scope;
pt = &wq_pod_types[scope];
if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) &&
likely(pt->nr_pods))
return pt;
/*
* Before workqueue_init_topology(), only SYSTEM is available which is
* initialized in workqueue_init_early().
*/
pt = &wq_pod_types[WQ_AFFN_SYSTEM];
BUG_ON(!pt->nr_pods);
return pt;
}
/**
* init_worker_pool - initialize a newly zalloc'd worker_pool
* @pool: worker_pool to initialize
*
* Initialize a newly zalloc'd @pool. It also allocates @pool->attrs.
*
* Return: 0 on success, -errno on failure. Even on failure, all fields
* inside @pool proper are initialized and put_unbound_pool() can be called
* on @pool safely to release it.
*/
static int init_worker_pool(struct worker_pool *pool)
{
raw_spin_lock_init(&pool->lock);
pool->id = -1;
pool->cpu = -1;
pool->node = NUMA_NO_NODE;
pool->flags |= POOL_DISASSOCIATED;
pool->watchdog_ts = jiffies;
INIT_LIST_HEAD(&pool->worklist);
INIT_LIST_HEAD(&pool->idle_list);
hash_init(pool->busy_hash);
timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
INIT_WORK(&pool->idle_cull_work, idle_cull_fn);
timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
INIT_LIST_HEAD(&pool->workers);
INIT_LIST_HEAD(&pool->dying_workers);
ida_init(&pool->worker_ida);
INIT_HLIST_NODE(&pool->hash_node);
pool->refcnt = 1;
/* shouldn't fail above this point */
pool->attrs = alloc_workqueue_attrs();
if (!pool->attrs)
return -ENOMEM;
wqattrs_clear_for_pool(pool->attrs);
return 0;
}
#ifdef CONFIG_LOCKDEP
static void wq_init_lockdep(struct workqueue_struct *wq)
{
char *lock_name;
lockdep_register_key(&wq->key);
lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
if (!lock_name)
lock_name = wq->name;
wq->lock_name = lock_name;
lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
}
static void wq_unregister_lockdep(struct workqueue_struct *wq)
{
lockdep_unregister_key(&wq->key);
}
static void wq_free_lockdep(struct workqueue_struct *wq)
{
if (wq->lock_name != wq->name)
kfree(wq->lock_name);
}
#else
static void wq_init_lockdep(struct workqueue_struct *wq)
{
}
static void wq_unregister_lockdep(struct workqueue_struct *wq)
{
}
static void wq_free_lockdep(struct workqueue_struct *wq)
{
}
#endif
static void rcu_free_wq(struct rcu_head *rcu)
{
struct workqueue_struct *wq =
container_of(rcu, struct workqueue_struct, rcu);
wq_free_lockdep(wq);
free_percpu(wq->cpu_pwq);
free_workqueue_attrs(wq->unbound_attrs);
kfree(wq);
}
static void rcu_free_pool(struct rcu_head *rcu)
{
struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
ida_destroy(&pool->worker_ida);
free_workqueue_attrs(pool->attrs);
kfree(pool);
}
/**
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
*
* Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
* safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through,
* successfully or not, init_worker_pool().
*
* Should be called with wq_pool_mutex held.
*/
static void put_unbound_pool(struct worker_pool *pool)
{
DECLARE_COMPLETION_ONSTACK(detach_completion);
struct worker *worker;
LIST_HEAD(cull_list);
lockdep_assert_held(&wq_pool_mutex);
if (--pool->refcnt)
return;
/* sanity checks */
if (WARN_ON(!(pool->cpu < 0)) ||
WARN_ON(!list_empty(&pool->worklist)))
return;
/* release id and unhash */
if (pool->id >= 0)
idr_remove(&worker_pool_idr, pool->id);
hash_del(&pool->hash_node);
/*
* Become the manager and destroy all workers. This prevents
* @pool's workers from blocking on attach_mutex. We're the last
* manager and @pool gets freed with the flag set.
*
* Having a concurrent manager is quite unlikely to happen as we can
* only get here with
* pwq->refcnt == pool->refcnt == 0
* which implies no work queued to the pool, which implies no worker can
* become the manager. However a worker could have taken the role of
* manager before the refcnts dropped to 0, since maybe_create_worker()
* drops pool->lock
*/
while (true) {
rcuwait_wait_event(&manager_wait,
!(pool->flags & POOL_MANAGER_ACTIVE),
TASK_UNINTERRUPTIBLE);
mutex_lock(&wq_pool_attach_mutex);
raw_spin_lock_irq(&pool->lock);
if (!(pool->flags & POOL_MANAGER_ACTIVE)) {
pool->flags |= POOL_MANAGER_ACTIVE;
break;
}
raw_spin_unlock_irq(&pool->lock);
mutex_unlock(&wq_pool_attach_mutex);
}
while ((worker = first_idle_worker(pool)))
set_worker_dying(worker, &cull_list);
WARN_ON(pool->nr_workers || pool->nr_idle);
raw_spin_unlock_irq(&pool->lock);
wake_dying_workers(&cull_list);
if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers))
pool->detach_completion = &detach_completion;
mutex_unlock(&wq_pool_attach_mutex);
if (pool->detach_completion)
wait_for_completion(pool->detach_completion);
/* shut down the timers */
del_timer_sync(&pool->idle_timer);
cancel_work_sync(&pool->idle_cull_work);
del_timer_sync(&pool->mayday_timer);
/* RCU protected to allow dereferences from get_work_pool() */
call_rcu(&pool->rcu, rcu_free_pool);
}
/**
* get_unbound_pool - get a worker_pool with the specified attributes
* @attrs: the attributes of the worker_pool to get
*
* Obtain a worker_pool which has the same attributes as @attrs, bump the
* reference count and return it. If there already is a matching
* worker_pool, it will be used; otherwise, this function attempts to
* create a new one.
*
* Should be called with wq_pool_mutex held.
*
* Return: On success, a worker_pool with the same attributes as @attrs.
* On failure, %NULL.
*/
static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
{
struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA];
u32 hash = wqattrs_hash(attrs);
struct worker_pool *pool;
int pod, node = NUMA_NO_NODE;
lockdep_assert_held(&wq_pool_mutex);
/* do we already have a matching pool? */
hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
if (wqattrs_equal(pool->attrs, attrs)) {
pool->refcnt++;
return pool;
}
}
/* If __pod_cpumask is contained inside a NUMA pod, that's our node */
for (pod = 0; pod < pt->nr_pods; pod++) {
if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) {
node = pt->pod_node[pod];
break;
}
}
/* nope, create a new one */
pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node);
if (!pool || init_worker_pool(pool) < 0)
goto fail;
pool->node = node;
copy_workqueue_attrs(pool->attrs, attrs);
wqattrs_clear_for_pool(pool->attrs);
if (worker_pool_assign_id(pool) < 0)
goto fail;
/* create and start the initial worker */
if (wq_online && !create_worker(pool))
goto fail;
/* install */
hash_add(unbound_pool_hash, &pool->hash_node, hash);
return pool;
fail:
if (pool)
put_unbound_pool(pool);
return NULL;
}
static void rcu_free_pwq(struct rcu_head *rcu)
{
kmem_cache_free(pwq_cache,
container_of(rcu, struct pool_workqueue, rcu));
}
/*
* Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero
* refcnt and needs to be destroyed.
*/
static void pwq_release_workfn(struct kthread_work *work)
{
struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
release_work);
struct workqueue_struct *wq = pwq->wq;
struct worker_pool *pool = pwq->pool;
bool is_last = false;
/*
* When @pwq is not linked, it doesn't hold any reference to the
* @wq, and @wq is invalid to access.
*/
if (!list_empty(&pwq->pwqs_node)) {
mutex_lock(&wq->mutex);
list_del_rcu(&pwq->pwqs_node);
is_last = list_empty(&wq->pwqs);
mutex_unlock(&wq->mutex);
}
if (wq->flags & WQ_UNBOUND) {
mutex_lock(&wq_pool_mutex);
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
}
call_rcu(&pwq->rcu, rcu_free_pwq);
/*
* If we're the last pwq going away, @wq is already dead and no one
* is gonna access it anymore. Schedule RCU free.
*/
if (is_last) {
wq_unregister_lockdep(wq);
call_rcu(&wq->rcu, rcu_free_wq);
}
}
/**
* pwq_adjust_max_active - update a pwq's max_active to the current setting
* @pwq: target pool_workqueue
*
* If @pwq isn't freezing, set @pwq->max_active to the associated
* workqueue's saved_max_active and activate inactive work items
* accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
*/
static void pwq_adjust_max_active(struct pool_workqueue *pwq)
{
struct workqueue_struct *wq = pwq->wq;
bool freezable = wq->flags & WQ_FREEZABLE;
unsigned long flags;
/* for @wq->saved_max_active */
lockdep_assert_held(&wq->mutex);
/* fast exit for non-freezable wqs */
if (!freezable && pwq->max_active == wq->saved_max_active)
return;
/* this function can be called during early boot w/ irq disabled */
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
/*
* During [un]freezing, the caller is responsible for ensuring that
* this function is called at least once after @workqueue_freezing
* is updated and visible.
*/
if (!freezable || !workqueue_freezing) {
pwq->max_active = wq->saved_max_active;
while (!list_empty(&pwq->inactive_works) &&
pwq->nr_active < pwq->max_active)
pwq_activate_first_inactive(pwq);
kick_pool(pwq->pool);
} else {
pwq->max_active = 0;
}
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
}
/* initialize newly allocated @pwq which is associated with @wq and @pool */
static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
struct worker_pool *pool)
{
BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
memset(pwq, 0, sizeof(*pwq));
pwq->pool = pool;
pwq->wq = wq;
pwq->flush_color = -1;
pwq->refcnt = 1;
INIT_LIST_HEAD(&pwq->inactive_works);
INIT_LIST_HEAD(&pwq->pwqs_node);
INIT_LIST_HEAD(&pwq->mayday_node);
kthread_init_work(&pwq->release_work, pwq_release_workfn);
}
/* sync @pwq with the current state of its associated wq and link it */
static void link_pwq(struct pool_workqueue *pwq)
{
struct workqueue_struct *wq = pwq->wq;
lockdep_assert_held(&wq->mutex);
/* may be called multiple times, ignore if already linked */
if (!list_empty(&pwq->pwqs_node))
return;
/* set the matching work_color */
pwq->work_color = wq->work_color;
/* sync max_active to the current setting */
pwq_adjust_max_active(pwq);
/* link in @pwq */
list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
}
/* obtain a pool matching @attr and create a pwq associating the pool and @wq */
static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs)
{
struct worker_pool *pool;
struct pool_workqueue *pwq;
lockdep_assert_held(&wq_pool_mutex);
pool = get_unbound_pool(attrs);
if (!pool)
return NULL;
pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
if (!pwq) {
put_unbound_pool(pool);
return NULL;
}
init_pwq(pwq, wq, pool);
return pwq;
}
/**
* wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod
* @attrs: the wq_attrs of the default pwq of the target workqueue
* @cpu: the target CPU
* @cpu_going_down: if >= 0, the CPU to consider as offline
*
* Calculate the cpumask a workqueue with @attrs should use on @pod. If
* @cpu_going_down is >= 0, that cpu is considered offline during calculation.
* The result is stored in @attrs->__pod_cpumask.
*
* If pod affinity is not enabled, @attrs->cpumask is always used. If enabled
* and @pod has online CPUs requested by @attrs, the returned cpumask is the
* intersection of the possible CPUs of @pod and @attrs->cpumask.
*
* The caller is responsible for ensuring that the cpumask of @pod stays stable.
*/
static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu,
int cpu_going_down)
{
const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
int pod = pt->cpu_pod[cpu];
/* does @pod have any online CPUs @attrs wants? */
cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask);
cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask);
if (cpu_going_down >= 0)
cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask);
if (cpumask_empty(attrs->__pod_cpumask)) {
cpumask_copy(attrs->__pod_cpumask, attrs->cpumask);
return;
}
/* yeap, return possible CPUs in @pod that @attrs wants */
cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]);
if (cpumask_empty(attrs->__pod_cpumask))
pr_warn_once("WARNING: workqueue cpumask: online intersect > "
"possible intersect\n");
}
/* install @pwq into @wq's cpu_pwq and return the old pwq */
static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq,
int cpu, struct pool_workqueue *pwq)
{
struct pool_workqueue *old_pwq;
lockdep_assert_held(&wq_pool_mutex);
lockdep_assert_held(&wq->mutex);
/* link_pwq() can handle duplicate calls */
link_pwq(pwq);
old_pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu));
rcu_assign_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu), pwq);
return old_pwq;
}
/* context to store the prepared attrs & pwqs before applying */
struct apply_wqattrs_ctx {
struct workqueue_struct *wq; /* target workqueue */
struct workqueue_attrs *attrs; /* attrs to apply */
struct list_head list; /* queued for batching commit */
struct pool_workqueue *dfl_pwq;
struct pool_workqueue *pwq_tbl[];
};
/* free the resources after success or abort */
static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
{
if (ctx) {
int cpu;
for_each_possible_cpu(cpu)
put_pwq_unlocked(ctx->pwq_tbl[cpu]);
put_pwq_unlocked(ctx->dfl_pwq);
free_workqueue_attrs(ctx->attrs);
kfree(ctx);
}
}
/* allocate the attrs and pwqs for later installation */
static struct apply_wqattrs_ctx *
apply_wqattrs_prepare(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs,
const cpumask_var_t unbound_cpumask)
{
struct apply_wqattrs_ctx *ctx;
struct workqueue_attrs *new_attrs;
int cpu;
lockdep_assert_held(&wq_pool_mutex);
if (WARN_ON(attrs->affn_scope < 0 ||
attrs->affn_scope >= WQ_AFFN_NR_TYPES))
return ERR_PTR(-EINVAL);
ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL);
new_attrs = alloc_workqueue_attrs();
if (!ctx || !new_attrs)
goto out_free;
/*
* If something goes wrong during CPU up/down, we'll fall back to
* the default pwq covering whole @attrs->cpumask. Always create
* it even if we don't use it immediately.
*/
copy_workqueue_attrs(new_attrs, attrs);
wqattrs_actualize_cpumask(new_attrs, unbound_cpumask);
cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
if (!ctx->dfl_pwq)
goto out_free;
for_each_possible_cpu(cpu) {
if (new_attrs->ordered) {
ctx->dfl_pwq->refcnt++;
ctx->pwq_tbl[cpu] = ctx->dfl_pwq;
} else {
wq_calc_pod_cpumask(new_attrs, cpu, -1);
ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs);
if (!ctx->pwq_tbl[cpu])
goto out_free;
}
}
/* save the user configured attrs and sanitize it. */
copy_workqueue_attrs(new_attrs, attrs);
cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
ctx->attrs = new_attrs;
ctx->wq = wq;
return ctx;
out_free:
free_workqueue_attrs(new_attrs);
apply_wqattrs_cleanup(ctx);
return ERR_PTR(-ENOMEM);
}
/* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
{
int cpu;
/* all pwqs have been created successfully, let's install'em */
mutex_lock(&ctx->wq->mutex);
copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
/* save the previous pwq and install the new one */
for_each_possible_cpu(cpu)
ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu,
ctx->pwq_tbl[cpu]);
/* @dfl_pwq might not have been used, ensure it's linked */
link_pwq(ctx->dfl_pwq);
swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
mutex_unlock(&ctx->wq->mutex);
}
static void apply_wqattrs_lock(void)
{
/* CPUs should stay stable across pwq creations and installations */
cpus_read_lock();
mutex_lock(&wq_pool_mutex);
}
static void apply_wqattrs_unlock(void)
{
mutex_unlock(&wq_pool_mutex);
cpus_read_unlock();
}
static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs)
{
struct apply_wqattrs_ctx *ctx;
/* only unbound workqueues can change attributes */
if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
return -EINVAL;
/* creating multiple pwqs breaks ordering guarantee */
if (!list_empty(&wq->pwqs)) {
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
return -EINVAL;
wq->flags &= ~__WQ_ORDERED;
}
ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
/* the ctx has been prepared successfully, let's commit it */
apply_wqattrs_commit(ctx);
apply_wqattrs_cleanup(ctx);
return 0;
}
/**
* apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
* @wq: the target workqueue
* @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
*
* Apply @attrs to an unbound workqueue @wq. Unless disabled, this function maps
* a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that
* work items are affine to the pod it was issued on. Older pwqs are released as
* in-flight work items finish. Note that a work item which repeatedly requeues
* itself back-to-back will stay on its current pwq.
*
* Performs GFP_KERNEL allocations.
*
* Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock().
*
* Return: 0 on success and -errno on failure.
*/
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs)
{
int ret;
lockdep_assert_cpus_held();
mutex_lock(&wq_pool_mutex);
ret = apply_workqueue_attrs_locked(wq, attrs);
mutex_unlock(&wq_pool_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(apply_workqueue_attrs);
/**
* wq_update_pod - update pod affinity of a wq for CPU hot[un]plug
* @wq: the target workqueue
* @cpu: the CPU to update pool association for
* @hotplug_cpu: the CPU coming up or going down
* @online: whether @cpu is coming up or going down
*
* This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
* %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update pod affinity of
* @wq accordingly.
*
*
* If pod affinity can't be adjusted due to memory allocation failure, it falls
* back to @wq->dfl_pwq which may not be optimal but is always correct.
*
* Note that when the last allowed CPU of a pod goes offline for a workqueue
* with a cpumask spanning multiple pods, the workers which were already
* executing the work items for the workqueue will lose their CPU affinity and
* may execute on any CPU. This is similar to how per-cpu workqueues behave on
* CPU_DOWN. If a workqueue user wants strict affinity, it's the user's
* responsibility to flush the work item from CPU_DOWN_PREPARE.
*/
static void wq_update_pod(struct workqueue_struct *wq, int cpu,
int hotplug_cpu, bool online)
{
int off_cpu = online ? -1 : hotplug_cpu;
struct pool_workqueue *old_pwq = NULL, *pwq;
struct workqueue_attrs *target_attrs;
lockdep_assert_held(&wq_pool_mutex);
if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered)
return;
/*
* We don't wanna alloc/free wq_attrs for each wq for each CPU.
* Let's use a preallocated one. The following buf is protected by
* CPU hotplug exclusion.
*/
target_attrs = wq_update_pod_attrs_buf;
copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask);
/* nothing to do if the target cpumask matches the current pwq */
wq_calc_pod_cpumask(target_attrs, cpu, off_cpu);
pwq = rcu_dereference_protected(*per_cpu_ptr(wq->cpu_pwq, cpu),
lockdep_is_held(&wq_pool_mutex));
if (wqattrs_equal(target_attrs, pwq->pool->attrs))
return;
/* create a new pwq */
pwq = alloc_unbound_pwq(wq, target_attrs);
if (!pwq) {
pr_warn("workqueue: allocation failed while updating CPU pod affinity of \"%s\"\n",
wq->name);
goto use_dfl_pwq;
}
/* Install the new pwq. */
mutex_lock(&wq->mutex);
old_pwq = install_unbound_pwq(wq, cpu, pwq);
goto out_unlock;
use_dfl_pwq:
mutex_lock(&wq->mutex);
raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
get_pwq(wq->dfl_pwq);
raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
old_pwq = install_unbound_pwq(wq, cpu, wq->dfl_pwq);
out_unlock:
mutex_unlock(&wq->mutex);
put_pwq_unlocked(old_pwq);
}
static int alloc_and_link_pwqs(struct workqueue_struct *wq)
{
bool highpri = wq->flags & WQ_HIGHPRI;
int cpu, ret;
bool skip = false;
wq->cpu_pwq = alloc_percpu(struct pool_workqueue *);
if (!wq->cpu_pwq)
goto enomem;
if (!(wq->flags & WQ_UNBOUND)) {
for_each_possible_cpu(cpu) {
struct pool_workqueue **pwq_p =
per_cpu_ptr(wq->cpu_pwq, cpu);
struct worker_pool *pool =
&(per_cpu_ptr(cpu_worker_pools, cpu)[highpri]);
*pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL,
pool->node);
if (!*pwq_p)
goto enomem;
init_pwq(*pwq_p, wq, pool);
mutex_lock(&wq->mutex);
link_pwq(*pwq_p);
mutex_unlock(&wq->mutex);
}
return 0;
}
trace_android_rvh_alloc_and_link_pwqs(wq, &ret, &skip);
if (skip)
goto oem_skip;
cpus_read_lock();
if (wq->flags & __WQ_ORDERED) {
ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
/* there should only be single pwq for ordering guarantee */
WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
"ordering guarantee broken for workqueue %s\n", wq->name);
} else {
ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
}
cpus_read_unlock();
oem_skip:
/* for unbound pwq, flush the pwq_release_worker ensures that the
* pwq_release_workfn() completes before calling kfree(wq).
*/
if (ret)
kthread_flush_worker(pwq_release_worker);
return ret;
enomem:
if (wq->cpu_pwq) {
for_each_possible_cpu(cpu) {
struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
if (pwq)
kmem_cache_free(pwq_cache, pwq);
}
free_percpu(wq->cpu_pwq);
wq->cpu_pwq = NULL;
}
return -ENOMEM;
}
static int wq_clamp_max_active(int max_active, unsigned int flags,
const char *name)
{
if (max_active < 1 || max_active > WQ_MAX_ACTIVE)
pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
max_active, name, 1, WQ_MAX_ACTIVE);
return clamp_val(max_active, 1, WQ_MAX_ACTIVE);
}
/*
* Workqueues which may be used during memory reclaim should have a rescuer
* to guarantee forward progress.
*/
static int init_rescuer(struct workqueue_struct *wq)
{
struct worker *rescuer;
int ret;
if (!(wq->flags & WQ_MEM_RECLAIM))
return 0;
rescuer = alloc_worker(NUMA_NO_NODE);
if (!rescuer) {
pr_err("workqueue: Failed to allocate a rescuer for wq \"%s\"\n",
wq->name);
return -ENOMEM;
}
rescuer->rescue_wq = wq;
rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name);
if (IS_ERR(rescuer->task)) {
ret = PTR_ERR(rescuer->task);
pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe",
wq->name, ERR_PTR(ret));
kfree(rescuer);
return ret;
}
wq->rescuer = rescuer;
kthread_bind_mask(rescuer->task, cpu_possible_mask);
wake_up_process(rescuer->task);
return 0;
}
__printf(1, 4)
struct workqueue_struct *alloc_workqueue(const char *fmt,
unsigned int flags,
int max_active, ...)
{
va_list args;
struct workqueue_struct *wq;
struct pool_workqueue *pwq;
/*
* Unbound && max_active == 1 used to imply ordered, which is no longer
* the case on many machines due to per-pod pools. While
* alloc_ordered_workqueue() is the right way to create an ordered
* workqueue, keep the previous behavior to avoid subtle breakages.
*/
if ((flags & WQ_UNBOUND) && max_active == 1)
flags |= __WQ_ORDERED;
/* see the comment above the definition of WQ_POWER_EFFICIENT */
if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
flags |= WQ_UNBOUND;
/* allocate wq and format name */
wq = kzalloc(sizeof(*wq), GFP_KERNEL);
if (!wq)
return NULL;
if (flags & WQ_UNBOUND) {
wq->unbound_attrs = alloc_workqueue_attrs();
if (!wq->unbound_attrs)
goto err_free_wq;
}
va_start(args, max_active);
vsnprintf(wq->name, sizeof(wq->name), fmt, args);
va_end(args);
trace_android_rvh_alloc_workqueue(wq, &flags, &max_active);
max_active = max_active ?: WQ_DFL_ACTIVE;
max_active = wq_clamp_max_active(max_active, flags, wq->name);
/* init wq */
wq->flags = flags;
wq->saved_max_active = max_active;
mutex_init(&wq->mutex);
atomic_set(&wq->nr_pwqs_to_flush, 0);
INIT_LIST_HEAD(&wq->pwqs);
INIT_LIST_HEAD(&wq->flusher_queue);
INIT_LIST_HEAD(&wq->flusher_overflow);
INIT_LIST_HEAD(&wq->maydays);
wq_init_lockdep(wq);
INIT_LIST_HEAD(&wq->list);
if (alloc_and_link_pwqs(wq) < 0)
goto err_unreg_lockdep;
if (wq_online && init_rescuer(wq) < 0)
goto err_destroy;
if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
goto err_destroy;
/*
* wq_pool_mutex protects global freeze state and workqueues list.
* Grab it, adjust max_active and add the new @wq to workqueues
* list.
*/
mutex_lock(&wq_pool_mutex);
mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq)
pwq_adjust_max_active(pwq);
mutex_unlock(&wq->mutex);
list_add_tail_rcu(&wq->list, &workqueues);
mutex_unlock(&wq_pool_mutex);
return wq;
err_unreg_lockdep:
wq_unregister_lockdep(wq);
wq_free_lockdep(wq);
err_free_wq:
free_workqueue_attrs(wq->unbound_attrs);
kfree(wq);
return NULL;
err_destroy:
destroy_workqueue(wq);
return NULL;
}
EXPORT_SYMBOL_GPL(alloc_workqueue);
static bool pwq_busy(struct pool_workqueue *pwq)
{
int i;
for (i = 0; i < WORK_NR_COLORS; i++)
if (pwq->nr_in_flight[i])
return true;
if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
return true;
if (pwq->nr_active || !list_empty(&pwq->inactive_works))
return true;
return false;
}
/**
* destroy_workqueue - safely terminate a workqueue
* @wq: target workqueue
*
* Safely destroy a workqueue. All work currently pending will be done first.
*/
void destroy_workqueue(struct workqueue_struct *wq)
{
struct pool_workqueue *pwq;
int cpu;
/*
* Remove it from sysfs first so that sanity check failure doesn't
* lead to sysfs name conflicts.
*/
workqueue_sysfs_unregister(wq);
/* mark the workqueue destruction is in progress */
mutex_lock(&wq->mutex);
wq->flags |= __WQ_DESTROYING;
mutex_unlock(&wq->mutex);
/* drain it before proceeding with destruction */
drain_workqueue(wq);
/* kill rescuer, if sanity checks fail, leave it w/o rescuer */
if (wq->rescuer) {
struct worker *rescuer = wq->rescuer;
/* this prevents new queueing */
raw_spin_lock_irq(&wq_mayday_lock);
wq->rescuer = NULL;
raw_spin_unlock_irq(&wq_mayday_lock);
/* rescuer will empty maydays list before exiting */
kthread_stop(rescuer->task);
kfree(rescuer);
}
/*
* Sanity checks - grab all the locks so that we wait for all
* in-flight operations which may do put_pwq().
*/
mutex_lock(&wq_pool_mutex);
mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq) {
raw_spin_lock_irq(&pwq->pool->lock);
if (WARN_ON(pwq_busy(pwq))) {
pr_warn("%s: %s has the following busy pwq\n",
__func__, wq->name);
show_pwq(pwq);
raw_spin_unlock_irq(&pwq->pool->lock);
mutex_unlock(&wq->mutex);
mutex_unlock(&wq_pool_mutex);
show_one_workqueue(wq);
return;
}
raw_spin_unlock_irq(&pwq->pool->lock);
}
mutex_unlock(&wq->mutex);
/*
* wq list is used to freeze wq, remove from list after
* flushing is complete in case freeze races us.
*/
list_del_rcu(&wq->list);
mutex_unlock(&wq_pool_mutex);
/*
* We're the sole accessor of @wq. Directly access cpu_pwq and dfl_pwq
* to put the base refs. @wq will be auto-destroyed from the last
* pwq_put. RCU read lock prevents @wq from going away from under us.
*/
rcu_read_lock();
for_each_possible_cpu(cpu) {
pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu));
RCU_INIT_POINTER(*per_cpu_ptr(wq->cpu_pwq, cpu), NULL);
put_pwq_unlocked(pwq);
}
put_pwq_unlocked(wq->dfl_pwq);
wq->dfl_pwq = NULL;
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(destroy_workqueue);
/**
* workqueue_set_max_active - adjust max_active of a workqueue
* @wq: target workqueue
* @max_active: new max_active value.
*
* Set max_active of @wq to @max_active.
*
* CONTEXT:
* Don't call from IRQ context.
*/
void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
{
struct pool_workqueue *pwq;
/* disallow meddling with max_active for ordered workqueues */
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
return;
max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
mutex_lock(&wq->mutex);
wq->flags &= ~__WQ_ORDERED;
wq->saved_max_active = max_active;
for_each_pwq(pwq, wq)
pwq_adjust_max_active(pwq);
mutex_unlock(&wq->mutex);
}
EXPORT_SYMBOL_GPL(workqueue_set_max_active);
/**
* current_work - retrieve %current task's work struct
*
* Determine if %current task is a workqueue worker and what it's working on.
* Useful to find out the context that the %current task is running in.
*
* Return: work struct if %current task is a workqueue worker, %NULL otherwise.
*/
struct work_struct *current_work(void)
{
struct worker *worker = current_wq_worker();
return worker ? worker->current_work : NULL;
}
EXPORT_SYMBOL(current_work);
/**
* current_is_workqueue_rescuer - is %current workqueue rescuer?
*
* Determine whether %current is a workqueue rescuer. Can be used from
* work functions to determine whether it's being run off the rescuer task.
*
* Return: %true if %current is a workqueue rescuer. %false otherwise.
*/
bool current_is_workqueue_rescuer(void)
{
struct worker *worker = current_wq_worker();
return worker && worker->rescue_wq;
}
/**
* workqueue_congested - test whether a workqueue is congested
* @cpu: CPU in question
* @wq: target workqueue
*
* Test whether @wq's cpu workqueue for @cpu is congested. There is
* no synchronization around this function and the test result is
* unreliable and only useful as advisory hints or for debugging.
*
* If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
*
* With the exception of ordered workqueues, all workqueues have per-cpu
* pool_workqueues, each with its own congested state. A workqueue being
* congested on one CPU doesn't mean that the workqueue is contested on any
* other CPUs.
*
* Return:
* %true if congested, %false otherwise.
*/
bool workqueue_congested(int cpu, struct workqueue_struct *wq)
{
struct pool_workqueue *pwq;
bool ret;
rcu_read_lock();
preempt_disable();
if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id();
pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
ret = !list_empty(&pwq->inactive_works);
preempt_enable();
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(workqueue_congested);
/**
* work_busy - test whether a work is currently pending or running
* @work: the work to be tested
*
* Test whether @work is currently pending or running. There is no
* synchronization around this function and the test result is
* unreliable and only useful as advisory hints or for debugging.
*
* Return:
* OR'd bitmask of WORK_BUSY_* bits.
*/
unsigned int work_busy(struct work_struct *work)
{
struct worker_pool *pool;
unsigned long flags;
unsigned int ret = 0;
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
rcu_read_lock();
pool = get_work_pool(work);
if (pool) {
raw_spin_lock_irqsave(&pool->lock, flags);
if (find_worker_executing_work(pool, work))
ret |= WORK_BUSY_RUNNING;
raw_spin_unlock_irqrestore(&pool->lock, flags);
}
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(work_busy);
/**
* set_worker_desc - set description for the current work item
* @fmt: printf-style format string
* @...: arguments for the format string
*
* This function can be called by a running work function to describe what
* the work item is about. If the worker task gets dumped, this
* information will be printed out together to help debugging. The
* description can be at most WORKER_DESC_LEN including the trailing '\0'.
*/
void set_worker_desc(const char *fmt, ...)
{
struct worker *worker = current_wq_worker();
va_list args;
if (worker) {
va_start(args, fmt);
vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
va_end(args);
}
}
EXPORT_SYMBOL_GPL(set_worker_desc);
/**
* print_worker_info - print out worker information and description
* @log_lvl: the log level to use when printing
* @task: target task
*
* If @task is a worker and currently executing a work item, print out the
* name of the workqueue being serviced and worker description set with
* set_worker_desc() by the currently executing work item.
*
* This function can be safely called on any task as long as the
* task_struct itself is accessible. While safe, this function isn't
* synchronized and may print out mixups or garbages of limited length.
*/
void print_worker_info(const char *log_lvl, struct task_struct *task)
{
work_func_t *fn = NULL;
char name[WQ_NAME_LEN] = { };
char desc[WORKER_DESC_LEN] = { };
struct pool_workqueue *pwq = NULL;
struct workqueue_struct *wq = NULL;
struct worker *worker;
if (!(task->flags & PF_WQ_WORKER))
return;
/*
* This function is called without any synchronization and @task
* could be in any state. Be careful with dereferences.
*/
worker = kthread_probe_data(task);
/*
* Carefully copy the associated workqueue's workfn, name and desc.
* Keep the original last '\0' in case the original is garbage.
*/
copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1);
copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
if (fn || name[0] || desc[0]) {
printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
if (strcmp(name, desc))
pr_cont(" (%s)", desc);
pr_cont("\n");
}
}
static void pr_cont_pool_info(struct worker_pool *pool)
{
pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
if (pool->node != NUMA_NO_NODE)
pr_cont(" node=%d", pool->node);
pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
}
struct pr_cont_work_struct {
bool comma;
work_func_t func;
long ctr;
};
static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp)
{
if (!pcwsp->ctr)
goto out_record;
if (func == pcwsp->func) {
pcwsp->ctr++;
return;
}
if (pcwsp->ctr == 1)
pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func);
else
pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func);
pcwsp->ctr = 0;
out_record:
if ((long)func == -1L)
return;
pcwsp->comma = comma;
pcwsp->func = func;
pcwsp->ctr = 1;
}
static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp)
{
if (work->func == wq_barrier_func) {
struct wq_barrier *barr;
barr = container_of(work, struct wq_barrier, work);
pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
pr_cont("%s BAR(%d)", comma ? "," : "",
task_pid_nr(barr->task));
} else {
if (!comma)
pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
pr_cont_work_flush(comma, work->func, pcwsp);
}
}
static void show_pwq(struct pool_workqueue *pwq)
{
struct pr_cont_work_struct pcws = { .ctr = 0, };
struct worker_pool *pool = pwq->pool;
struct work_struct *work;
struct worker *worker;
bool has_in_flight = false, has_pending = false;
int bkt;
pr_info(" pwq %d:", pool->id);
pr_cont_pool_info(pool);
pr_cont(" active=%d/%d refcnt=%d%s\n",
pwq->nr_active, pwq->max_active, pwq->refcnt,
!list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
hash_for_each(pool->busy_hash, bkt, worker, hentry) {
if (worker->current_pwq == pwq) {
has_in_flight = true;
break;
}
}
if (has_in_flight) {
bool comma = false;
pr_info(" in-flight:");
hash_for_each(pool->busy_hash, bkt, worker, hentry) {
if (worker->current_pwq != pwq)
continue;
pr_cont("%s %d%s:%ps", comma ? "," : "",
task_pid_nr(worker->task),
worker->rescue_wq ? "(RESCUER)" : "",
worker->current_func);
list_for_each_entry(work, &worker->scheduled, entry)
pr_cont_work(false, work, &pcws);
pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
comma = true;
}
pr_cont("\n");
}
list_for_each_entry(work, &pool->worklist, entry) {
if (get_work_pwq(work) == pwq) {
has_pending = true;
break;
}
}
if (has_pending) {
bool comma = false;
pr_info(" pending:");
list_for_each_entry(work, &pool->worklist, entry) {
if (get_work_pwq(work) != pwq)
continue;
pr_cont_work(comma, work, &pcws);
comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
}
pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
pr_cont("\n");
}
if (!list_empty(&pwq->inactive_works)) {
bool comma = false;
pr_info(" inactive:");
list_for_each_entry(work, &pwq->inactive_works, entry) {
pr_cont_work(comma, work, &pcws);
comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
}
pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
pr_cont("\n");
}
}
/**
* show_one_workqueue - dump state of specified workqueue
* @wq: workqueue whose state will be printed
*/
void show_one_workqueue(struct workqueue_struct *wq)
{
struct pool_workqueue *pwq;
bool idle = true;
unsigned long flags;
for_each_pwq(pwq, wq) {
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
idle = false;
break;
}
}
if (idle) /* Nothing to print for idle workqueue */
return;
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
for_each_pwq(pwq, wq) {
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
/*
* Defer printing to avoid deadlocks in console
* drivers that queue work while holding locks
* also taken in their write paths.
*/
printk_deferred_enter();
show_pwq(pwq);
printk_deferred_exit();
}
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
/*
* We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_all_workqueues(). Avoid triggering
* hard lockup.
*/
touch_nmi_watchdog();
}
}
/**
* show_one_worker_pool - dump state of specified worker pool
* @pool: worker pool whose state will be printed
*/
static void show_one_worker_pool(struct worker_pool *pool)
{
struct worker *worker;
bool first = true;
unsigned long flags;
unsigned long hung = 0;
raw_spin_lock_irqsave(&pool->lock, flags);
if (pool->nr_workers == pool->nr_idle)
goto next_pool;
/* How long the first pending work is waiting for a worker. */
if (!list_empty(&pool->worklist))
hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
/*
* Defer printing to avoid deadlocks in console drivers that
* queue work while holding locks also taken in their write
* paths.
*/
printk_deferred_enter();
pr_info("pool %d:", pool->id);
pr_cont_pool_info(pool);
pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers);
if (pool->manager)
pr_cont(" manager: %d",
task_pid_nr(pool->manager->task));
list_for_each_entry(worker, &pool->idle_list, entry) {
pr_cont(" %s%d", first ? "idle: " : "",
task_pid_nr(worker->task));
first = false;
}
pr_cont("\n");
printk_deferred_exit();
next_pool:
raw_spin_unlock_irqrestore(&pool->lock, flags);
/*
* We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_all_workqueues(). Avoid triggering
* hard lockup.
*/
touch_nmi_watchdog();
}
/**
* show_all_workqueues - dump workqueue state
*
* Called from a sysrq handler and prints out all busy workqueues and pools.
*/
void show_all_workqueues(void)
{
struct workqueue_struct *wq;
struct worker_pool *pool;
int pi;
rcu_read_lock();
pr_info("Showing busy workqueues and worker pools:\n");
list_for_each_entry_rcu(wq, &workqueues, list)
show_one_workqueue(wq);
for_each_pool(pool, pi)
show_one_worker_pool(pool);
rcu_read_unlock();
}
/**
* show_freezable_workqueues - dump freezable workqueue state
*
* Called from try_to_freeze_tasks() and prints out all freezable workqueues
* still busy.
*/
void show_freezable_workqueues(void)
{
struct workqueue_struct *wq;
rcu_read_lock();
pr_info("Showing freezable workqueues that are still busy:\n");
list_for_each_entry_rcu(wq, &workqueues, list) {
if (!(wq->flags & WQ_FREEZABLE))
continue;
show_one_workqueue(wq);
}
rcu_read_unlock();
}
/* used to show worker information through /proc/PID/{comm,stat,status} */
void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
{
int off;
/* always show the actual comm */
off = strscpy(buf, task->comm, size);
if (off < 0)
return;
/* stabilize PF_WQ_WORKER and worker pool association */
mutex_lock(&wq_pool_attach_mutex);
if (task->flags & PF_WQ_WORKER) {
struct worker *worker = kthread_data(task);
struct worker_pool *pool = worker->pool;
if (pool) {
raw_spin_lock_irq(&pool->lock);
/*
* ->desc tracks information (wq name or
* set_worker_desc()) for the latest execution. If
* current, prepend '+', otherwise '-'.
*/
if (worker->desc[0] != '\0') {
if (worker->current_work)
scnprintf(buf + off, size - off, "+%s",
worker->desc);
else
scnprintf(buf + off, size - off, "-%s",
worker->desc);
}
raw_spin_unlock_irq(&pool->lock);
}
}
mutex_unlock(&wq_pool_attach_mutex);
}
EXPORT_SYMBOL_GPL(wq_worker_comm);
#ifdef CONFIG_SMP
/*
* CPU hotplug.
*
* There are two challenges in supporting CPU hotplug. Firstly, there
* are a lot of assumptions on strong associations among work, pwq and
* pool which make migrating pending and scheduled works very
* difficult to implement without impacting hot paths. Secondly,
* worker pools serve mix of short, long and very long running works making
* blocked draining impractical.
*
* This is solved by allowing the pools to be disassociated from the CPU
* running as an unbound one and allowing it to be reattached later if the
* cpu comes back online.
*/
static void unbind_workers(int cpu)
{
struct worker_pool *pool;
struct worker *worker;
for_each_cpu_worker_pool(pool, cpu) {
mutex_lock(&wq_pool_attach_mutex);
raw_spin_lock_irq(&pool->lock);
/*
* We've blocked all attach/detach operations. Make all workers
* unbound and set DISASSOCIATED. Before this, all workers
* must be on the cpu. After this, they may become diasporas.
* And the preemption disabled section in their sched callbacks
* are guaranteed to see WORKER_UNBOUND since the code here
* is on the same cpu.
*/
for_each_pool_worker(worker, pool)
worker->flags |= WORKER_UNBOUND;
pool->flags |= POOL_DISASSOCIATED;
/*
* The handling of nr_running in sched callbacks are disabled
* now. Zap nr_running. After this, nr_running stays zero and
* need_more_worker() and keep_working() are always true as
* long as the worklist is not empty. This pool now behaves as
* an unbound (in terms of concurrency management) pool which
* are served by workers tied to the pool.
*/
pool->nr_running = 0;
/*
* With concurrency management just turned off, a busy
* worker blocking could lead to lengthy stalls. Kick off
* unbound chain execution of currently pending work items.
*/
kick_pool(pool);
raw_spin_unlock_irq(&pool->lock);
for_each_pool_worker(worker, pool)
unbind_worker(worker);
mutex_unlock(&wq_pool_attach_mutex);
}
}
/**
* rebind_workers - rebind all workers of a pool to the associated CPU
* @pool: pool of interest
*
* @pool->cpu is coming online. Rebind all workers to the CPU.
*/
static void rebind_workers(struct worker_pool *pool)
{
struct worker *worker;
lockdep_assert_held(&wq_pool_attach_mutex);
/*
* Restore CPU affinity of all workers. As all idle workers should
* be on the run-queue of the associated CPU before any local
* wake-ups for concurrency management happen, restore CPU affinity
* of all workers first and then clear UNBOUND. As we're called
* from CPU_ONLINE, the following shouldn't fail.
*/
for_each_pool_worker(worker, pool) {
kthread_set_per_cpu(worker->task, pool->cpu);
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
pool_allowed_cpus(pool)) < 0);
}
raw_spin_lock_irq(&pool->lock);
pool->flags &= ~POOL_DISASSOCIATED;
for_each_pool_worker(worker, pool) {
unsigned int worker_flags = worker->flags;
/*
* We want to clear UNBOUND but can't directly call
* worker_clr_flags() or adjust nr_running. Atomically
* replace UNBOUND with another NOT_RUNNING flag REBOUND.
* @worker will clear REBOUND using worker_clr_flags() when
* it initiates the next execution cycle thus restoring
* concurrency management. Note that when or whether
* @worker clears REBOUND doesn't affect correctness.
*
* WRITE_ONCE() is necessary because @worker->flags may be
* tested without holding any lock in
* wq_worker_running(). Without it, NOT_RUNNING test may
* fail incorrectly leading to premature concurrency
* management operations.
*/
WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
worker_flags |= WORKER_REBOUND;
worker_flags &= ~WORKER_UNBOUND;
WRITE_ONCE(worker->flags, worker_flags);
}
raw_spin_unlock_irq(&pool->lock);
}
/**
* restore_unbound_workers_cpumask - restore cpumask of unbound workers
* @pool: unbound pool of interest
* @cpu: the CPU which is coming up
*
* An unbound pool may end up with a cpumask which doesn't have any online
* CPUs. When a worker of such pool get scheduled, the scheduler resets
* its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
* online CPU before, cpus_allowed of all its workers should be restored.
*/
static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
{
static cpumask_t cpumask;
struct worker *worker;
lockdep_assert_held(&wq_pool_attach_mutex);
/* is @cpu allowed for @pool? */
if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
return;
cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
/* as we're called from CPU_ONLINE, the following shouldn't fail */
for_each_pool_worker(worker, pool)
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
}
int workqueue_prepare_cpu(unsigned int cpu)
{
struct worker_pool *pool;
for_each_cpu_worker_pool(pool, cpu) {
if (pool->nr_workers)
continue;
if (!create_worker(pool))
return -ENOMEM;
}
return 0;
}
int workqueue_online_cpu(unsigned int cpu)
{
struct worker_pool *pool;
struct workqueue_struct *wq;
int pi;
mutex_lock(&wq_pool_mutex);
for_each_pool(pool, pi) {
mutex_lock(&wq_pool_attach_mutex);
if (pool->cpu == cpu)
rebind_workers(pool);
else if (pool->cpu < 0)
restore_unbound_workers_cpumask(pool, cpu);
mutex_unlock(&wq_pool_attach_mutex);
}
/* update pod affinity of unbound workqueues */
list_for_each_entry(wq, &workqueues, list) {
struct workqueue_attrs *attrs = wq->unbound_attrs;
if (attrs) {
const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
int tcpu;
for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
wq_update_pod(wq, tcpu, cpu, true);
}
}
mutex_unlock(&wq_pool_mutex);
return 0;
}
int workqueue_offline_cpu(unsigned int cpu)
{
struct workqueue_struct *wq;
/* unbinding per-cpu workers should happen on the local CPU */
if (WARN_ON(cpu != smp_processor_id()))
return -1;
unbind_workers(cpu);
/* update pod affinity of unbound workqueues */
mutex_lock(&wq_pool_mutex);
list_for_each_entry(wq, &workqueues, list) {
struct workqueue_attrs *attrs = wq->unbound_attrs;
if (attrs) {
const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
int tcpu;
for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
wq_update_pod(wq, tcpu, cpu, false);
}
}
mutex_unlock(&wq_pool_mutex);
return 0;
}
struct work_for_cpu {
struct work_struct work;
long (*fn)(void *);
void *arg;
long ret;
};
static void work_for_cpu_fn(struct work_struct *work)
{
struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
wfc->ret = wfc->fn(wfc->arg);
}
/**
* work_on_cpu_key - run a function in thread context on a particular cpu
* @cpu: the cpu to run on
* @fn: the function to run
* @arg: the function arg
* @key: The lock class key for lock debugging purposes
*
* It is up to the caller to ensure that the cpu doesn't go offline.
* The caller must not hold any locks which would prevent @fn from completing.
*
* Return: The value @fn returns.
*/
long work_on_cpu_key(int cpu, long (*fn)(void *),
void *arg, struct lock_class_key *key)
{
struct work_for_cpu wfc = { .fn = fn, .arg = arg };
INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key);
schedule_work_on(cpu, &wfc.work);
flush_work(&wfc.work);
destroy_work_on_stack(&wfc.work);
return wfc.ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu_key);
/**
* work_on_cpu_safe_key - run a function in thread context on a particular cpu
* @cpu: the cpu to run on
* @fn: the function to run
* @arg: the function argument
* @key: The lock class key for lock debugging purposes
*
* Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
* any locks which would prevent @fn from completing.
*
* Return: The value @fn returns.
*/
long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
void *arg, struct lock_class_key *key)
{
long ret = -ENODEV;
cpus_read_lock();
if (cpu_online(cpu))
ret = work_on_cpu_key(cpu, fn, arg, key);
cpus_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
#endif /* CONFIG_SMP */
#ifdef CONFIG_FREEZER
/**
* freeze_workqueues_begin - begin freezing workqueues
*
* Start freezing workqueues. After this function returns, all freezable
* workqueues will queue new works to their inactive_works list instead of
* pool->worklist.
*
* CONTEXT:
* Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
*/
void freeze_workqueues_begin(void)
{
struct workqueue_struct *wq;
struct pool_workqueue *pwq;
mutex_lock(&wq_pool_mutex);
WARN_ON_ONCE(workqueue_freezing);
workqueue_freezing = true;
list_for_each_entry(wq, &workqueues, list) {
mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq)
pwq_adjust_max_active(pwq);
mutex_unlock(&wq->mutex);
}
mutex_unlock(&wq_pool_mutex);
}
/**
* freeze_workqueues_busy - are freezable workqueues still busy?
*
* Check whether freezing is complete. This function must be called
* between freeze_workqueues_begin() and thaw_workqueues().
*
* CONTEXT:
* Grabs and releases wq_pool_mutex.
*
* Return:
* %true if some freezable workqueues are still busy. %false if freezing
* is complete.
*/
bool freeze_workqueues_busy(void)
{
bool busy = false;
struct workqueue_struct *wq;
struct pool_workqueue *pwq;
mutex_lock(&wq_pool_mutex);
WARN_ON_ONCE(!workqueue_freezing);
list_for_each_entry(wq, &workqueues, list) {
if (!(wq->flags & WQ_FREEZABLE))
continue;
/*
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
rcu_read_lock();
for_each_pwq(pwq, wq) {
WARN_ON_ONCE(pwq->nr_active < 0);
if (pwq->nr_active) {
busy = true;
rcu_read_unlock();
goto out_unlock;
}
}
rcu_read_unlock();
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
return busy;
}
/**
* thaw_workqueues - thaw workqueues
*
* Thaw workqueues. Normal queueing is restored and all collected
* frozen works are transferred to their respective pool worklists.
*
* CONTEXT:
* Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
*/
void thaw_workqueues(void)
{
struct workqueue_struct *wq;
struct pool_workqueue *pwq;
mutex_lock(&wq_pool_mutex);
if (!workqueue_freezing)
goto out_unlock;
workqueue_freezing = false;
/* restore max_active and repopulate worklist */
list_for_each_entry(wq, &workqueues, list) {
mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq)
pwq_adjust_max_active(pwq);
mutex_unlock(&wq->mutex);
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
}
#endif /* CONFIG_FREEZER */
static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
{
LIST_HEAD(ctxs);
int ret = 0;
struct workqueue_struct *wq;
struct apply_wqattrs_ctx *ctx, *n;
lockdep_assert_held(&wq_pool_mutex);
list_for_each_entry(wq, &workqueues, list) {
if (!(wq->flags & WQ_UNBOUND))
continue;
/* creating multiple pwqs breaks ordering guarantee */
if (wq->flags & __WQ_ORDERED)
continue;
ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
break;
}
list_add_tail(&ctx->list, &ctxs);
}
list_for_each_entry_safe(ctx, n, &ctxs, list) {
if (!ret)
apply_wqattrs_commit(ctx);
apply_wqattrs_cleanup(ctx);
}
if (!ret) {
mutex_lock(&wq_pool_attach_mutex);
cpumask_copy(wq_unbound_cpumask, unbound_cpumask);
mutex_unlock(&wq_pool_attach_mutex);
}
return ret;
}
/**
* workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
* @cpumask: the cpumask to set
*
* The low-level workqueues cpumask is a global cpumask that limits
* the affinity of all unbound workqueues. This function check the @cpumask
* and apply it to all unbound workqueues and updates all pwqs of them.
*
* Return: 0 - Success
* -EINVAL - Invalid @cpumask
* -ENOMEM - Failed to allocate memory for attrs or pwqs.
*/
int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
{
int ret = -EINVAL;
/*
* Not excluding isolated cpus on purpose.
* If the user wishes to include them, we allow that.
*/
cpumask_and(cpumask, cpumask, cpu_possible_mask);
if (!cpumask_empty(cpumask)) {
apply_wqattrs_lock();
if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
ret = 0;
goto out_unlock;
}
ret = workqueue_apply_unbound_cpumask(cpumask);
out_unlock:
apply_wqattrs_unlock();
}
return ret;
}
static int parse_affn_scope(const char *val)
{
int i;
for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) {
if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i])))
return i;
}
return -EINVAL;
}
static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp)
{
struct workqueue_struct *wq;
int affn, cpu;
affn = parse_affn_scope(val);
if (affn < 0)
return affn;
if (affn == WQ_AFFN_DFL)
return -EINVAL;
cpus_read_lock();
mutex_lock(&wq_pool_mutex);
wq_affn_dfl = affn;
list_for_each_entry(wq, &workqueues, list) {
for_each_online_cpu(cpu) {
wq_update_pod(wq, cpu, cpu, true);
}
}
mutex_unlock(&wq_pool_mutex);
cpus_read_unlock();
return 0;
}
static int wq_affn_dfl_get(char *buffer, const struct kernel_param *kp)
{
return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]);
}
static const struct kernel_param_ops wq_affn_dfl_ops = {
.set = wq_affn_dfl_set,
.get = wq_affn_dfl_get,
};
module_param_cb(default_affinity_scope, &wq_affn_dfl_ops, NULL, 0644);
#ifdef CONFIG_SYSFS
/*
* Workqueues with WQ_SYSFS flag set is visible to userland via
* /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the
* following attributes.
*
* per_cpu RO bool : whether the workqueue is per-cpu or unbound
* max_active RW int : maximum number of in-flight work items
*
* Unbound workqueues have the following extra attributes.
*
* nice RW int : nice value of the workers
* cpumask RW mask : bitmask of allowed CPUs for the workers
* affinity_scope RW str : worker CPU affinity scope (cache, numa, none)
* affinity_strict RW bool : worker CPU affinity is strict
*/
struct wq_device {
struct workqueue_struct *wq;
struct device dev;
};
static struct workqueue_struct *dev_to_wq(struct device *dev)
{
struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
return wq_dev->wq;
}
static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
}
static DEVICE_ATTR_RO(per_cpu);
static ssize_t max_active_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
}
static ssize_t max_active_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct workqueue_struct *wq = dev_to_wq(dev);
int val;
if (sscanf(buf, "%d", &val) != 1 || val <= 0)
return -EINVAL;
workqueue_set_max_active(wq, val);
return count;
}
static DEVICE_ATTR_RW(max_active);
static struct attribute *wq_sysfs_attrs[] = {
&dev_attr_per_cpu.attr,
&dev_attr_max_active.attr,
NULL,
};
ATTRIBUTE_GROUPS(wq_sysfs);
static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
int written;
mutex_lock(&wq->mutex);
written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
mutex_unlock(&wq->mutex);
return written;
}
/* prepare workqueue_attrs for sysfs store operations */
static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
{
struct workqueue_attrs *attrs;
lockdep_assert_held(&wq_pool_mutex);
attrs = alloc_workqueue_attrs();
if (!attrs)
return NULL;
copy_workqueue_attrs(attrs, wq->unbound_attrs);
return attrs;
}
static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct workqueue_struct *wq = dev_to_wq(dev);
struct workqueue_attrs *attrs;
int ret = -ENOMEM;
apply_wqattrs_lock();
attrs = wq_sysfs_prep_attrs(wq);
if (!attrs)
goto out_unlock;
if (sscanf(buf, "%d", &attrs->nice) == 1 &&
attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
ret = apply_workqueue_attrs_locked(wq, attrs);
else
ret = -EINVAL;
out_unlock:
apply_wqattrs_unlock();
free_workqueue_attrs(attrs);
return ret ?: count;
}
static ssize_t wq_cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
int written;
mutex_lock(&wq->mutex);
written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
cpumask_pr_args(wq->unbound_attrs->cpumask));
mutex_unlock(&wq->mutex);
return written;
}
static ssize_t wq_cpumask_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct workqueue_struct *wq = dev_to_wq(dev);
struct workqueue_attrs *attrs;
int ret = -ENOMEM;
apply_wqattrs_lock();
attrs = wq_sysfs_prep_attrs(wq);
if (!attrs)
goto out_unlock;
ret = cpumask_parse(buf, attrs->cpumask);
if (!ret)
ret = apply_workqueue_attrs_locked(wq, attrs);
out_unlock:
apply_wqattrs_unlock();
free_workqueue_attrs(attrs);
return ret ?: count;
}
static ssize_t wq_affn_scope_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
int written;
mutex_lock(&wq->mutex);
if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL)
written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n",
wq_affn_names[WQ_AFFN_DFL],
wq_affn_names[wq_affn_dfl]);
else
written = scnprintf(buf, PAGE_SIZE, "%s\n",
wq_affn_names[wq->unbound_attrs->affn_scope]);
mutex_unlock(&wq->mutex);
return written;
}
static ssize_t wq_affn_scope_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct workqueue_struct *wq = dev_to_wq(dev);
struct workqueue_attrs *attrs;
int affn, ret = -ENOMEM;
affn = parse_affn_scope(buf);
if (affn < 0)
return affn;
apply_wqattrs_lock();
attrs = wq_sysfs_prep_attrs(wq);
if (attrs) {
attrs->affn_scope = affn;
ret = apply_workqueue_attrs_locked(wq, attrs);
}
apply_wqattrs_unlock();
free_workqueue_attrs(attrs);
return ret ?: count;
}
static ssize_t wq_affinity_strict_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n",
wq->unbound_attrs->affn_strict);
}
static ssize_t wq_affinity_strict_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct workqueue_struct *wq = dev_to_wq(dev);
struct workqueue_attrs *attrs;
int v, ret = -ENOMEM;
if (sscanf(buf, "%d", &v) != 1)
return -EINVAL;
apply_wqattrs_lock();
attrs = wq_sysfs_prep_attrs(wq);
if (attrs) {
attrs->affn_strict = (bool)v;
ret = apply_workqueue_attrs_locked(wq, attrs);
}
apply_wqattrs_unlock();
free_workqueue_attrs(attrs);
return ret ?: count;
}
static struct device_attribute wq_sysfs_unbound_attrs[] = {
__ATTR(nice, 0644, wq_nice_show, wq_nice_store),
__ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
__ATTR(affinity_scope, 0644, wq_affn_scope_show, wq_affn_scope_store),
__ATTR(affinity_strict, 0644, wq_affinity_strict_show, wq_affinity_strict_store),
__ATTR_NULL,
};
static struct bus_type wq_subsys = {
.name = "workqueue",
.dev_groups = wq_sysfs_groups,
};
static ssize_t wq_unbound_cpumask_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int written;
mutex_lock(&wq_pool_mutex);
written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
cpumask_pr_args(wq_unbound_cpumask));
mutex_unlock(&wq_pool_mutex);
return written;
}
static ssize_t wq_unbound_cpumask_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
cpumask_var_t cpumask;
int ret;
if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
return -ENOMEM;
ret = cpumask_parse(buf, cpumask);
if (!ret)
ret = workqueue_set_unbound_cpumask(cpumask);
free_cpumask_var(cpumask);
return ret ? ret : count;
}
static struct device_attribute wq_sysfs_cpumask_attr =
__ATTR(cpumask, 0644, wq_unbound_cpumask_show,
wq_unbound_cpumask_store);
static int __init wq_sysfs_init(void)
{
struct device *dev_root;
int err;
err = subsys_virtual_register(&wq_subsys, NULL);
if (err)
return err;
dev_root = bus_get_dev_root(&wq_subsys);
if (dev_root) {
err = device_create_file(dev_root, &wq_sysfs_cpumask_attr);
put_device(dev_root);
}
return err;
}
core_initcall(wq_sysfs_init);
static void wq_device_release(struct device *dev)
{
struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
kfree(wq_dev);
}
/**
* workqueue_sysfs_register - make a workqueue visible in sysfs
* @wq: the workqueue to register
*
* Expose @wq in sysfs under /sys/bus/workqueue/devices.
* alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
* which is the preferred method.
*
* Workqueue user should use this function directly iff it wants to apply
* workqueue_attrs before making the workqueue visible in sysfs; otherwise,
* apply_workqueue_attrs() may race against userland updating the
* attributes.
*
* Return: 0 on success, -errno on failure.
*/
int workqueue_sysfs_register(struct workqueue_struct *wq)
{
struct wq_device *wq_dev;
int ret;
/*
* Adjusting max_active or creating new pwqs by applying
* attributes breaks ordering guarantee. Disallow exposing ordered
* workqueues.
*/
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
return -EINVAL;
wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
if (!wq_dev)
return -ENOMEM;
wq_dev->wq = wq;
wq_dev->dev.bus = &wq_subsys;
wq_dev->dev.release = wq_device_release;
dev_set_name(&wq_dev->dev, "%s", wq->name);
/*
* unbound_attrs are created separately. Suppress uevent until
* everything is ready.
*/
dev_set_uevent_suppress(&wq_dev->dev, true);
ret = device_register(&wq_dev->dev);
if (ret) {
put_device(&wq_dev->dev);
wq->wq_dev = NULL;
return ret;
}
if (wq->flags & WQ_UNBOUND) {
struct device_attribute *attr;
for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
ret = device_create_file(&wq_dev->dev, attr);
if (ret) {
device_unregister(&wq_dev->dev);
wq->wq_dev = NULL;
return ret;
}
}
}
dev_set_uevent_suppress(&wq_dev->dev, false);
kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
return 0;
}
/**
* workqueue_sysfs_unregister - undo workqueue_sysfs_register()
* @wq: the workqueue to unregister
*
* If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
*/
static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
{
struct wq_device *wq_dev = wq->wq_dev;
if (!wq->wq_dev)
return;
wq->wq_dev = NULL;
device_unregister(&wq_dev->dev);
}
#else /* CONFIG_SYSFS */
static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
#endif /* CONFIG_SYSFS */
/*
* Workqueue watchdog.
*
* Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
* flush dependency, a concurrency managed work item which stays RUNNING
* indefinitely. Workqueue stalls can be very difficult to debug as the
* usual warning mechanisms don't trigger and internal workqueue state is
* largely opaque.
*
* Workqueue watchdog monitors all worker pools periodically and dumps
* state if some pools failed to make forward progress for a while where
* forward progress is defined as the first item on ->worklist changing.
*
* This mechanism is controlled through the kernel parameter
* "workqueue.watchdog_thresh" which can be updated at runtime through the
* corresponding sysfs parameter file.
*/
#ifdef CONFIG_WQ_WATCHDOG
static unsigned long wq_watchdog_thresh = 30;
static struct timer_list wq_watchdog_timer;
static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
static unsigned int wq_panic_on_stall;
module_param_named(panic_on_stall, wq_panic_on_stall, uint, 0644);
/*
* Show workers that might prevent the processing of pending work items.
* The only candidates are CPU-bound workers in the running state.
* Pending work items should be handled by another idle worker
* in all other situations.
*/
static void show_cpu_pool_hog(struct worker_pool *pool)
{
struct worker *worker;
unsigned long flags;
int bkt;
raw_spin_lock_irqsave(&pool->lock, flags);
hash_for_each(pool->busy_hash, bkt, worker, hentry) {
if (task_is_running(worker->task)) {
/*
* Defer printing to avoid deadlocks in console
* drivers that queue work while holding locks
* also taken in their write paths.
*/
printk_deferred_enter();
pr_info("pool %d:\n", pool->id);
sched_show_task(worker->task);
printk_deferred_exit();
}
}
raw_spin_unlock_irqrestore(&pool->lock, flags);
}
static void show_cpu_pools_hogs(void)
{
struct worker_pool *pool;
int pi;
pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n");
rcu_read_lock();
for_each_pool(pool, pi) {
if (pool->cpu_stall)
show_cpu_pool_hog(pool);
}
rcu_read_unlock();
}
static void panic_on_wq_watchdog(void)
{
static unsigned int wq_stall;
if (wq_panic_on_stall) {
wq_stall++;
BUG_ON(wq_stall >= wq_panic_on_stall);
}
}
static void wq_watchdog_reset_touched(void)
{
int cpu;
wq_watchdog_touched = jiffies;
for_each_possible_cpu(cpu)
per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
}
static void wq_watchdog_timer_fn(struct timer_list *unused)
{
unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
bool lockup_detected = false;
bool cpu_pool_stall = false;
unsigned long now = jiffies;
struct worker_pool *pool;
int pi;
if (!thresh)
return;
rcu_read_lock();
for_each_pool(pool, pi) {
unsigned long pool_ts, touched, ts;
pool->cpu_stall = false;
if (list_empty(&pool->worklist))
continue;
/*
* If a virtual machine is stopped by the host it can look to
* the watchdog like a stall.
*/
kvm_check_and_clear_guest_paused();
/* get the latest of pool and touched timestamps */
if (pool->cpu >= 0)
touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
else
touched = READ_ONCE(wq_watchdog_touched);
pool_ts = READ_ONCE(pool->watchdog_ts);
if (time_after(pool_ts, touched))
ts = pool_ts;
else
ts = touched;
/* did we stall? */
if (time_after(now, ts + thresh)) {
lockup_detected = true;
if (pool->cpu >= 0) {
pool->cpu_stall = true;
cpu_pool_stall = true;
}
pr_emerg("BUG: workqueue lockup - pool");
pr_cont_pool_info(pool);
pr_cont(" stuck for %us!\n",
jiffies_to_msecs(now - pool_ts) / 1000);
trace_android_vh_wq_lockup_pool(pool->cpu, pool_ts);
}
}
rcu_read_unlock();
if (lockup_detected)
show_all_workqueues();
if (cpu_pool_stall)
show_cpu_pools_hogs();
if (lockup_detected)
panic_on_wq_watchdog();
wq_watchdog_reset_touched();
mod_timer(&wq_watchdog_timer, jiffies + thresh);
}
notrace void wq_watchdog_touch(int cpu)
{
unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
unsigned long touch_ts = READ_ONCE(wq_watchdog_touched);
unsigned long now = jiffies;
if (cpu >= 0)
per_cpu(wq_watchdog_touched_cpu, cpu) = now;
else
WARN_ONCE(1, "%s should be called with valid CPU", __func__);
/* Don't unnecessarily store to global cacheline */
if (time_after(now, touch_ts + thresh / 4))
WRITE_ONCE(wq_watchdog_touched, jiffies);
}
static void wq_watchdog_set_thresh(unsigned long thresh)
{
wq_watchdog_thresh = 0;
del_timer_sync(&wq_watchdog_timer);
if (thresh) {
wq_watchdog_thresh = thresh;
wq_watchdog_reset_touched();
mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
}
}
static int wq_watchdog_param_set_thresh(const char *val,
const struct kernel_param *kp)
{
unsigned long thresh;
int ret;
ret = kstrtoul(val, 0, &thresh);
if (ret)
return ret;
if (system_wq)
wq_watchdog_set_thresh(thresh);
else
wq_watchdog_thresh = thresh;
return 0;
}
static const struct kernel_param_ops wq_watchdog_thresh_ops = {
.set = wq_watchdog_param_set_thresh,
.get = param_get_ulong,
};
module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
0644);
static void wq_watchdog_init(void)
{
timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
wq_watchdog_set_thresh(wq_watchdog_thresh);
}
#else /* CONFIG_WQ_WATCHDOG */
static inline void wq_watchdog_init(void) { }
#endif /* CONFIG_WQ_WATCHDOG */
static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask)
{
if (!cpumask_intersects(wq_unbound_cpumask, mask)) {
pr_warn("workqueue: Restricting unbound_cpumask (%*pb) with %s (%*pb) leaves no CPU, ignoring\n",
cpumask_pr_args(wq_unbound_cpumask), name, cpumask_pr_args(mask));
return;
}
cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask);
}
/**
* workqueue_init_early - early init for workqueue subsystem
*
* This is the first step of three-staged workqueue subsystem initialization and
* invoked as soon as the bare basics - memory allocation, cpumasks and idr are
* up. It sets up all the data structures and system workqueues and allows early
* boot code to create workqueues and queue/cancel work items. Actual work item
* execution starts only after kthreads can be created and scheduled right
* before early initcalls.
*/
void __init workqueue_init_early(void)
{
struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM];
int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
int i, cpu;
BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ));
restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN));
if (!cpumask_empty(&wq_cmdline_cpumask))
restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask);
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
wq_update_pod_attrs_buf = alloc_workqueue_attrs();
BUG_ON(!wq_update_pod_attrs_buf);
/* initialize WQ_AFFN_SYSTEM pods */
pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL);
pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod);
BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE));
pt->nr_pods = 1;
cpumask_copy(pt->pod_cpus[0], cpu_possible_mask);
pt->pod_node[0] = NUMA_NO_NODE;
pt->cpu_pod[0] = 0;
/* initialize CPU pools */
for_each_possible_cpu(cpu) {
struct worker_pool *pool;
i = 0;
for_each_cpu_worker_pool(pool, cpu) {
BUG_ON(init_worker_pool(pool));
pool->cpu = cpu;
cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu));
pool->attrs->nice = std_nice[i++];
pool->attrs->affn_strict = true;
pool->node = cpu_to_node(cpu);
/* alloc pool ID */
mutex_lock(&wq_pool_mutex);
BUG_ON(worker_pool_assign_id(pool));
mutex_unlock(&wq_pool_mutex);
}
}
/* create default unbound and ordered wq attrs */
for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
struct workqueue_attrs *attrs;
BUG_ON(!(attrs = alloc_workqueue_attrs()));
attrs->nice = std_nice[i];
unbound_std_wq_attrs[i] = attrs;
/*
* An ordered wq should have only one pwq as ordering is
* guaranteed by max_active which is enforced by pwqs.
*/
BUG_ON(!(attrs = alloc_workqueue_attrs()));
attrs->nice = std_nice[i];
attrs->ordered = true;
ordered_wq_attrs[i] = attrs;
}
system_wq = alloc_workqueue("events", 0, 0);
system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
system_long_wq = alloc_workqueue("events_long", 0, 0);
system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
WQ_MAX_ACTIVE);
system_freezable_wq = alloc_workqueue("events_freezable",
WQ_FREEZABLE, 0);
system_power_efficient_wq = alloc_workqueue("events_power_efficient",
WQ_POWER_EFFICIENT, 0);
system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
WQ_FREEZABLE | WQ_POWER_EFFICIENT,
0);
BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
!system_unbound_wq || !system_freezable_wq ||
!system_power_efficient_wq ||
!system_freezable_power_efficient_wq);
}
static void __init wq_cpu_intensive_thresh_init(void)
{
unsigned long thresh;
unsigned long bogo;
pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release");
BUG_ON(IS_ERR(pwq_release_worker));
/* if the user set it to a specific value, keep it */
if (wq_cpu_intensive_thresh_us != ULONG_MAX)
return;
/*
* The default of 10ms is derived from the fact that most modern (as of
* 2023) processors can do a lot in 10ms and that it's just below what
* most consider human-perceivable. However, the kernel also runs on a
* lot slower CPUs including microcontrollers where the threshold is way
* too low.
*
* Let's scale up the threshold upto 1 second if BogoMips is below 4000.
* This is by no means accurate but it doesn't have to be. The mechanism
* is still useful even when the threshold is fully scaled up. Also, as
* the reports would usually be applicable to everyone, some machines
* operating on longer thresholds won't significantly diminish their
* usefulness.
*/
thresh = 10 * USEC_PER_MSEC;
/* see init/calibrate.c for lpj -> BogoMIPS calculation */
bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1);
if (bogo < 4000)
thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC);
pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n",
loops_per_jiffy, bogo, thresh);
wq_cpu_intensive_thresh_us = thresh;
}
/**
* workqueue_init - bring workqueue subsystem fully online
*
* This is the second step of three-staged workqueue subsystem initialization
* and invoked as soon as kthreads can be created and scheduled. Workqueues have
* been created and work items queued on them, but there are no kworkers
* executing the work items yet. Populate the worker pools with the initial
* workers and enable future kworker creations.
*/
void __init workqueue_init(void)
{
struct workqueue_struct *wq;
struct worker_pool *pool;
int cpu, bkt;
wq_cpu_intensive_thresh_init();
mutex_lock(&wq_pool_mutex);
/*
* Per-cpu pools created earlier could be missing node hint. Fix them
* up. Also, create a rescuer for workqueues that requested it.
*/
for_each_possible_cpu(cpu) {
for_each_cpu_worker_pool(pool, cpu) {
pool->node = cpu_to_node(cpu);
}
}
list_for_each_entry(wq, &workqueues, list) {
WARN(init_rescuer(wq),
"workqueue: failed to create early rescuer for %s",
wq->name);
}
mutex_unlock(&wq_pool_mutex);
/* create the initial workers */
for_each_online_cpu(cpu) {
for_each_cpu_worker_pool(pool, cpu) {
pool->flags &= ~POOL_DISASSOCIATED;
BUG_ON(!create_worker(pool));
}
}
hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
BUG_ON(!create_worker(pool));
wq_online = true;
wq_watchdog_init();
}
/*
* Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to
* @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique
* and consecutive pod ID. The rest of @pt is initialized accordingly.
*/
static void __init init_pod_type(struct wq_pod_type *pt,
bool (*cpus_share_pod)(int, int))
{
int cur, pre, cpu, pod;
pt->nr_pods = 0;
/* init @pt->cpu_pod[] according to @cpus_share_pod() */
pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
BUG_ON(!pt->cpu_pod);
for_each_possible_cpu(cur) {
for_each_possible_cpu(pre) {
if (pre >= cur) {
pt->cpu_pod[cur] = pt->nr_pods++;
break;
}
if (cpus_share_pod(cur, pre)) {
pt->cpu_pod[cur] = pt->cpu_pod[pre];
break;
}
}
}
/* init the rest to match @pt->cpu_pod[] */
pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL);
BUG_ON(!pt->pod_cpus || !pt->pod_node);
for (pod = 0; pod < pt->nr_pods; pod++)
BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL));
for_each_possible_cpu(cpu) {
cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]);
pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu);
}
}
static bool __init cpus_dont_share(int cpu0, int cpu1)
{
return false;
}
static bool __init cpus_share_smt(int cpu0, int cpu1)
{
#ifdef CONFIG_SCHED_SMT
return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1));
#else
return false;
#endif
}
static bool __init cpus_share_numa(int cpu0, int cpu1)
{
return cpu_to_node(cpu0) == cpu_to_node(cpu1);
}
/**
* workqueue_init_topology - initialize CPU pods for unbound workqueues
*
* This is the third step of there-staged workqueue subsystem initialization and
* invoked after SMP and topology information are fully initialized. It
* initializes the unbound CPU pods accordingly.
*/
void __init workqueue_init_topology(void)
{
struct workqueue_struct *wq;
int cpu;
init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share);
init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt);
init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
mutex_lock(&wq_pool_mutex);
/*
* Workqueues allocated earlier would have all CPUs sharing the default
* worker pool. Explicitly call wq_update_pod() on all workqueue and CPU
* combinations to apply per-pod sharing.
*/
list_for_each_entry(wq, &workqueues, list) {
for_each_online_cpu(cpu) {
wq_update_pod(wq, cpu, cpu, true);
}
}
mutex_unlock(&wq_pool_mutex);
}
void __warn_flushing_systemwide_wq(void)
{
pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n");
dump_stack();
}
EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
static int __init workqueue_unbound_cpus_setup(char *str)
{
if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) {
cpumask_clear(&wq_cmdline_cpumask);
pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n");
}
return 1;
}
__setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup);