This is the 6.6.54 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmb/+/0ACgkQONu9yGCS
 aT7VXg/9FNFZNtZPGpXCob/PXSXwgUxajbFwIne5fr57Ehstj2RXwAOOkVT0ED8l
 yyuLxketJum4qVfkb1DmH60g4+bS8iBjXsa5ouL1WXVTQEE+vZDM6rhtEyAUw3RW
 h92zfPNh8d/GzGt91AH8gCfVusM1bRSQtsdG4QTFgncFP2WEmDEz0PP2Wpyy6krq
 XESiBUfKeYPe41NQ5tyfahtg5QsvA7m/JOsiXF4EeY9isU76xAUv+g6xiCRJ6grH
 14lVDf09I77WB6O26Py5+mNeO0KA1hNzHCWSpkax4hZ9RAd02RHIUAbmU9W77VLG
 XCy68VI0dJegXJN2oDyRB1naBEthn9zgcNYNLUBy0cHcP8qmDM+x4J2tXi4TW1vq
 CpIRLQbgU9IAjC5j+DAdknP8cZP19qNkGZ8niPO5NPDpAHwoimhpFPBrU/+EofS1
 PUgpdghsL3arTw2LnqAPQ8/8UoLTViUjbt/VU4+zoOaRsIoM9e9ivzZqyEDMBiR5
 7/JyXG2iaF7h4CHeIc/jNUFxTXRrjYSr16qa8PHPb8Ae5EZ2+9Eyb5sdI1oy6vEy
 9HR4AdKHdvYCmL4Fx/FMaaXyKWE6U/Pg1B/nGLx4c0cULgkAvawlVNrI/X8+H6F4
 x+8P52/7LIFolcCbvUyurlQRY3WUDFqreD90+r1dGt66Z5czQ/E=
 =yW8f
 -----END PGP SIGNATURE-----

Merge 6.6.54 into android15-6.6-lts

Changes in 6.6.54
	EDAC/synopsys: Fix ECC status and IRQ control race condition
	EDAC/synopsys: Fix error injection on Zynq UltraScale+
	wifi: rtw88: always wait for both firmware loading attempts
	crypto: xor - fix template benchmarking
	ACPI: PMIC: Remove unneeded check in tps68470_pmic_opregion_probe()
	wifi: brcmfmac: export firmware interface functions
	wifi: brcmfmac: introducing fwil query functions
	wifi: ath9k: Remove error checks when creating debugfs entries
	wifi: ath12k: fix BSS chan info request WMI command
	wifi: ath12k: match WMI BSS chan info structure with firmware definition
	wifi: ath12k: fix invalid AMPDU factor calculation in ath12k_peer_assoc_h_he()
	net: stmmac: dwmac-loongson: Init ref and PTP clocks rate
	arm64: signal: Fix some under-bracketed UAPI macros
	wifi: rtw88: remove CPT execution branch never used
	RISC-V: KVM: Fix sbiret init before forwarding to userspace
	RISC-V: KVM: Allow legacy PMU access from guest
	RISC-V: KVM: Fix to allow hpmcounter31 from the guest
	mount: handle OOM on mnt_warn_timestamp_expiry
	ARM: 9410/1: vfp: Use asm volatile in fmrx/fmxr macros
	powercap: intel_rapl: Fix off by one in get_rpi()
	kselftest/arm64: signal: fix/refactor SVE vector length enumeration
	drivers/perf: Fix ali_drw_pmu driver interrupt status clearing
	wifi: mac80211: don't use rate mask for offchannel TX either
	wifi: iwlwifi: remove AX101, AX201 and AX203 support from LNL
	wifi: iwlwifi: config: label 'gl' devices as discrete
	wifi: iwlwifi: mvm: increase the time between ranging measurements
	padata: Honor the caller's alignment in case of chunk_size 0
	drivers/perf: hisi_pcie: Record hardware counts correctly
	drivers/perf: hisi_pcie: Fix TLP headers bandwidth counting
	kselftest/arm64: Actually test SME vector length changes via sigreturn
	can: j1939: use correct function name in comment
	ACPI: CPPC: Fix MASK_VAL() usage
	netfilter: nf_tables: elements with timeout below CONFIG_HZ never expire
	netfilter: nf_tables: reject element expiration with no timeout
	netfilter: nf_tables: reject expiration higher than timeout
	netfilter: nf_tables: remove annotation to access set timeout while holding lock
	perf/arm-cmn: Rework DTC counters (again)
	perf/arm-cmn: Improve debugfs pretty-printing for large configs
	perf/arm-cmn: Refactor node ID handling. Again.
	perf/arm-cmn: Fix CCLA register offset
	perf/arm-cmn: Ensure dtm_idx is big enough
	cpufreq: ti-cpufreq: Introduce quirks to handle syscon fails appropriately
	wifi: mt76: mt7915: fix oops on non-dbdc mt7986
	wifi: mt76: mt7996: use hweight16 to get correct tx antenna
	wifi: mt76: mt7996: fix traffic delay when switching back to working channel
	wifi: mt76: mt7996: fix wmm set of station interface to 3
	wifi: mt76: mt7996: fix HE and EHT beamforming capabilities
	wifi: mt76: mt7996: fix EHT beamforming capability check
	x86/sgx: Fix deadlock in SGX NUMA node search
	pm:cpupower: Add missing powercap_set_enabled() stub function
	crypto: hisilicon/hpre - mask cluster timeout error
	crypto: hisilicon/qm - reset device before enabling it
	crypto: hisilicon/qm - inject error before stopping queue
	wifi: mt76: mt7603: fix mixed declarations and code
	wifi: cfg80211: fix UBSAN noise in cfg80211_wext_siwscan()
	wifi: mt76: mt7915: fix rx filter setting for bfee functionality
	wifi: mt76: mt7996: ensure 4-byte alignment for beacon commands
	wifi: mt76: mt7996: fix uninitialized TLV data
	wifi: cfg80211: fix two more possible UBSAN-detected off-by-one errors
	wifi: mac80211: use two-phase skb reclamation in ieee80211_do_stop()
	wifi: wilc1000: fix potential RCU dereference issue in wilc_parse_join_bss_param
	Bluetooth: hci_core: Fix sending MGMT_EV_CONNECT_FAILED
	Bluetooth: hci_sync: Ignore errors from HCI_OP_REMOTE_NAME_REQ_CANCEL
	sock_map: Add a cond_resched() in sock_hash_free()
	can: bcm: Clear bo->bcm_proc_read after remove_proc_entry().
	can: m_can: enable NAPI before enabling interrupts
	can: m_can: m_can_close(): stop clocks after device has been shut down
	Bluetooth: btusb: Fix not handling ZPL/short-transfer
	bareudp: Pull inner IP header in bareudp_udp_encap_recv().
	bareudp: Pull inner IP header on xmit.
	net: enetc: Use IRQF_NO_AUTOEN flag in request_irq()
	r8169: disable ALDPS per default for RTL8125
	net: ipv6: rpl_iptunnel: Fix memory leak in rpl_input
	net: tipc: avoid possible garbage value
	ipv6: avoid possible NULL deref in rt6_uncached_list_flush_dev()
	ublk: move zone report data out of request pdu
	nbd: fix race between timeout and normal completion
	block, bfq: fix possible UAF for bfqq->bic with merge chain
	block, bfq: choose the last bfqq from merge chain in bfq_setup_cooperator()
	block, bfq: don't break merge chain in bfq_split_bfqq()
	cachefiles: Fix non-taking of sb_writers around set/removexattr
	erofs: fix incorrect symlink detection in fast symlink
	block, bfq: fix uaf for accessing waker_bfqq after splitting
	block, bfq: fix procress reference leakage for bfqq in merge chain
	io_uring/io-wq: do not allow pinning outside of cpuset
	io_uring/io-wq: inherit cpuset of cgroup in io worker
	block: print symbolic error name instead of error code
	block: fix potential invalid pointer dereference in blk_add_partition
	spi: ppc4xx: handle irq_of_parse_and_map() errors
	arm64: dts: exynos: exynos7885-jackpotlte: Correct RAM amount to 4GB
	arm64: dts: mediatek: mt8186: Fix supported-hw mask for GPU OPPs
	firmware: arm_scmi: Fix double free in OPTEE transport
	spi: ppc4xx: Avoid returning 0 when failed to parse and map IRQ
	regulator: Return actual error in of_regulator_bulk_get_all()
	arm64: dts: renesas: r9a07g043u: Correct GICD and GICR sizes
	arm64: dts: renesas: r9a07g054: Correct GICD and GICR sizes
	arm64: dts: renesas: r9a07g044: Correct GICD and GICR sizes
	ARM: dts: microchip: sam9x60: Fix rtc/rtt clocks
	arm64: dts: rockchip: Correct vendor prefix for Hardkernel ODROID-M1
	arm64: dts: ti: k3-j721e-sk: Fix reversed C6x carveout locations
	arm64: dts: ti: k3-j721e-beagleboneai64: Fix reversed C6x carveout locations
	spi: bcmbca-hsspi: Fix missing pm_runtime_disable()
	ARM: dts: microchip: sama7g5: Fix RTT clock
	ARM: dts: imx7d-zii-rmu2: fix Ethernet PHY pinctrl property
	ARM: versatile: fix OF node leak in CPUs prepare
	reset: berlin: fix OF node leak in probe() error path
	reset: k210: fix OF node leak in probe() error path
	clocksource/drivers/qcom: Add missing iounmap() on errors in msm_dt_timer_init()
	arm64: dts: mediatek: mt8195: Correct clock order for dp_intf*
	x86/mm: Use IPIs to synchronize LAM enablement
	ASoC: rt5682s: Return devm_of_clk_add_hw_provider to transfer the error
	ASoC: tas2781: remove unused acpi_subysystem_id
	ASoC: tas2781: Use of_property_read_reg()
	ASoC: tas2781-i2c: Drop weird GPIO code
	ASoC: tas2781-i2c: Get the right GPIO line
	selftests/ftrace: Add required dependency for kprobe tests
	ALSA: hda: cs35l41: fix module autoloading
	m68k: Fix kernel_clone_args.flags in m68k_clone()
	ASoC: loongson: fix error release
	hwmon: (max16065) Fix overflows seen when writing limits
	hwmon: (max16065) Remove use of i2c_match_id()
	hwmon: (max16065) Fix alarm attributes
	mtd: slram: insert break after errors in parsing the map
	hwmon: (ntc_thermistor) fix module autoloading
	power: supply: axp20x_battery: Remove design from min and max voltage
	power: supply: max17042_battery: Fix SOC threshold calc w/ no current sense
	fbdev: hpfb: Fix an error handling path in hpfb_dio_probe()
	iommu/amd: Do not set the D bit on AMD v2 table entries
	mtd: powernv: Add check devm_kasprintf() returned value
	rcu/nocb: Fix RT throttling hrtimer armed from offline CPU
	mtd: rawnand: mtk: Use for_each_child_of_node_scoped()
	mtd: rawnand: mtk: Factorize out the logic cleaning mtk chips
	mtd: rawnand: mtk: Fix init error path
	iommu/arm-smmu-qcom: hide last LPASS SMMU context bank from linux
	iommu/arm-smmu-qcom: Work around SDM845 Adreno SMMU w/ 16K pages
	iommu/arm-smmu-qcom: apply num_context_bank fixes for SDM630 / SDM660
	pmdomain: core: Harden inter-column space in debug summary
	drm/stm: Fix an error handling path in stm_drm_platform_probe()
	drm/stm: ltdc: check memory returned by devm_kzalloc()
	drm/amd/display: Add null check for set_output_gamma in dcn30_set_output_transfer_func
	drm/amdgpu: properly handle vbios fake edid sizing
	drm/radeon: properly handle vbios fake edid sizing
	scsi: smartpqi: revert propagate-the-multipath-failure-to-SML-quickly
	scsi: NCR5380: Check for phase match during PDMA fixup
	drm/amd/amdgpu: Properly tune the size of struct
	drm/rockchip: vop: Allow 4096px width scaling
	drm/rockchip: dw_hdmi: Fix reading EDID when using a forced mode
	drm/radeon/evergreen_cs: fix int overflow errors in cs track offsets
	drm/bridge: lontium-lt8912b: Validate mode in drm_bridge_funcs::mode_valid()
	drm/vc4: hdmi: Handle error case of pm_runtime_resume_and_get
	scsi: elx: libefc: Fix potential use after free in efc_nport_vport_del()
	jfs: fix out-of-bounds in dbNextAG() and diAlloc()
	drm/mediatek: Fix missing configuration flags in mtk_crtc_ddp_config()
	drm/mediatek: Use spin_lock_irqsave() for CRTC event lock
	powerpc/8xx: Fix initial memory mapping
	powerpc/8xx: Fix kernel vs user address comparison
	powerpc/vdso: Inconditionally use CFUNC macro
	drm/msm: Fix incorrect file name output in adreno_request_fw()
	drm/msm/a5xx: disable preemption in submits by default
	drm/msm/a5xx: properly clear preemption records on resume
	drm/msm/a5xx: fix races in preemption evaluation stage
	drm/msm/a5xx: workaround early ring-buffer emptiness check
	ipmi: docs: don't advertise deprecated sysfs entries
	drm/msm/dsi: correct programming sequence for SM8350 / SM8450
	drm/msm: fix %s null argument error
	drivers:drm:exynos_drm_gsc:Fix wrong assignment in gsc_bind()
	xen: use correct end address of kernel for conflict checking
	HID: wacom: Support sequence numbers smaller than 16-bit
	HID: wacom: Do not warn about dropped packets for first packet
	ata: libata: Clear DID_TIME_OUT for ATA PT commands with sense data
	minmax: avoid overly complex min()/max() macro arguments in xen
	xen: introduce generic helper checking for memory map conflicts
	xen: move max_pfn in xen_memory_setup() out of function scope
	xen: add capability to remap non-RAM pages to different PFNs
	xen: tolerate ACPI NVS memory overlapping with Xen allocated memory
	xen/swiotlb: add alignment check for dma buffers
	xen/swiotlb: fix allocated size
	tpm: Clean up TPM space after command failure
	sched/fair: Make SCHED_IDLE entity be preempted in strict hierarchy
	selftests/bpf: Workaround strict bpf_lsm return value check.
	selftests/bpf: Fix error linking uprobe_multi on mips
	bpf: Use -Wno-error in certain tests when building with GCC
	bpf: Disable some `attribute ignored' warnings in GCC
	bpf: Temporarily define BPF_NO_PRESEVE_ACCESS_INDEX for GCC
	selftests/bpf: Add CFLAGS per source file and runner
	selftests/bpf: Fix wrong binary in Makefile log output
	tools/runqslower: Fix LDFLAGS and add LDLIBS support
	selftests/bpf: Use pid_t consistently in test_progs.c
	selftests/bpf: Fix compile error from rlim_t in sk_storage_map.c
	selftests/bpf: Fix error compiling bpf_iter_setsockopt.c with musl libc
	selftests/bpf: Implement get_hw_ring_size function to retrieve current and max interface size
	selftests/bpf: Drop unneeded error.h includes
	selftests/bpf: Fix missing ARRAY_SIZE() definition in bench.c
	selftests/bpf: Fix missing UINT_MAX definitions in benchmarks
	selftests/bpf: Fix missing BUILD_BUG_ON() declaration
	selftests/bpf: Replace CHECK with ASSERT_* in ns_current_pid_tgid test
	selftests/bpf: Refactor out some functions in ns_current_pid_tgid test
	selftests/bpf: Add a cgroup prog bpf_get_ns_current_pid_tgid() test
	selftests/bpf: Fix include of <sys/fcntl.h>
	selftests/bpf: Fix compiling parse_tcp_hdr_opt.c with musl-libc
	selftests/bpf: Fix compiling kfree_skb.c with musl-libc
	selftests/bpf: Fix compiling flow_dissector.c with musl-libc
	selftests/bpf: Fix compiling tcp_rtt.c with musl-libc
	selftests/bpf: Fix compiling core_reloc.c with musl-libc
	selftests/bpf: Fix errors compiling lwt_redirect.c with musl libc
	selftests/bpf: Fix errors compiling decap_sanity.c with musl libc
	selftests/bpf: Fix errors compiling cg_storage_multi.h with musl libc
	selftests/bpf: Fix arg parsing in veristat, test_progs
	selftests/bpf: Fix error compiling test_lru_map.c
	selftests/bpf: Fix C++ compile error from missing _Bool type
	selftests/bpf: Fix flaky selftest lwt_redirect/lwt_reroute
	selftests/bpf: Fix redefinition errors compiling lwt_reroute.c
	selftests/bpf: Fix compile if backtrace support missing in libc
	selftests/bpf: Fix error compiling tc_redirect.c with musl libc
	samples/bpf: Fix compilation errors with cf-protection option
	bpf: correctly handle malformed BPF_CORE_TYPE_ID_LOCAL relos
	xz: cleanup CRC32 edits from 2018
	kthread: fix task state in kthread worker if being frozen
	ext4: clear EXT4_GROUP_INFO_WAS_TRIMMED_BIT even mount with discard
	smackfs: Use rcu_assign_pointer() to ensure safe assignment in smk_set_cipso
	ext4: avoid buffer_head leak in ext4_mark_inode_used()
	ext4: avoid potential buffer_head leak in __ext4_new_inode()
	ext4: avoid negative min_clusters in find_group_orlov()
	ext4: return error on ext4_find_inline_entry
	ext4: avoid OOB when system.data xattr changes underneath the filesystem
	ext4: check stripe size compatibility on remount as well
	sched/numa: Document vma_numab_state fields
	sched/numa: Rename vma_numab_state::access_pids[] => ::pids_active[], ::next_pid_reset => ::pids_active_reset
	sched/numa: Trace decisions related to skipping VMAs
	sched/numa: Move up the access pid reset logic
	sched/numa: Complete scanning of partial VMAs regardless of PID activity
	sched/numa: Complete scanning of inactive VMAs when there is no alternative
	sched/numa: Fix the vma scan starving issue
	nilfs2: fix potential null-ptr-deref in nilfs_btree_insert()
	nilfs2: determine empty node blocks as corrupted
	nilfs2: fix potential oob read in nilfs_btree_check_delete()
	bpf: Fix bpf_strtol and bpf_strtoul helpers for 32bit
	bpf: Fix helper writes to read-only maps
	bpf: Improve check_raw_mode_ok test for MEM_UNINIT-tagged types
	bpf: Zero former ARG_PTR_TO_{LONG,INT} args in case of error
	perf mem: Free the allocated sort string, fixing a leak
	perf inject: Fix leader sampling inserting additional samples
	perf annotate: Split branch stack cycles info from 'struct annotation'
	perf annotate: Move some source code related fields from 'struct annotation' to 'struct annotated_source'
	perf ui/browser/annotate: Use global annotation_options
	perf report: Fix --total-cycles --stdio output error
	perf sched timehist: Fix missing free of session in perf_sched__timehist()
	perf stat: Display iostat headers correctly
	perf sched timehist: Fixed timestamp error when unable to confirm event sched_in time
	perf time-utils: Fix 32-bit nsec parsing
	clk: imx: clk-audiomix: Correct parent clock for earc_phy and audpll
	clk: imx: imx6ul: fix default parent for enet*_ref_sel
	clk: imx: composite-8m: Less function calls in __imx8m_clk_hw_composite() after error detection
	clk: imx: composite-8m: Enable gate clk with mcore_booted
	clk: imx: composite-93: keep root clock on when mcore enabled
	clk: imx: composite-7ulp: Check the PCC present bit
	clk: imx: fracn-gppll: fix fractional part of PLL getting lost
	clk: imx: imx8mp: fix clock tree update of TF-A managed clocks
	clk: imx: imx8qxp: Register dc0_bypass0_clk before disp clk
	clk: imx: imx8qxp: Parent should be initialized earlier than the clock
	remoteproc: imx_rproc: Correct ddr alias for i.MX8M
	remoteproc: imx_rproc: Initialize workqueue earlier
	clk: rockchip: Set parent rate for DCLK_VOP clock on RK3228
	clk: qcom: dispcc-sm8550: fix several supposed typos
	clk: qcom: dispcc-sm8550: use rcg2_ops for mdss_dptx1_aux_clk_src
	clk: qcom: dispcc-sm8650: Update the GDSC flags
	clk: qcom: dispcc-sm8550: use rcg2_shared_ops for ESC RCGs
	leds: bd2606mvv: Fix device child node usage in bd2606mvv_probe()
	pinctrl: ti: ti-iodelay: Convert to platform remove callback returning void
	pinctrl: Use device_get_match_data()
	pinctrl: ti: iodelay: Use scope based of_node_put() cleanups
	pinctrl: ti: ti-iodelay: Fix some error handling paths
	Input: ilitek_ts_i2c - avoid wrong input subsystem sync
	Input: ilitek_ts_i2c - add report id message validation
	drivers: media: dvb-frontends/rtl2832: fix an out-of-bounds write error
	drivers: media: dvb-frontends/rtl2830: fix an out-of-bounds write error
	PCI: Wait for Link before restoring Downstream Buses
	firewire: core: correct range of block for case of switch statement
	PCI: keystone: Fix if-statement expression in ks_pcie_quirk()
	clk: qcom: ipq5332: Register gcc_qdss_tsctr_clk_src
	clk: qcom: dispcc-sm8250: use special function for Lucid 5LPE PLL
	leds: leds-pca995x: Add support for NXP PCA9956B
	leds: pca995x: Use device_for_each_child_node() to access device child nodes
	leds: pca995x: Fix device child node usage in pca995x_probe()
	x86/PCI: Check pcie_find_root_port() return for NULL
	nvdimm: Fix devs leaks in scan_labels()
	PCI: xilinx-nwl: Fix register misspelling
	PCI: xilinx-nwl: Clean up clock on probe failure/removal
	media: platform: rzg2l-cru: rzg2l-csi2: Add missing MODULE_DEVICE_TABLE
	RDMA/iwcm: Fix WARNING:at_kernel/workqueue.c:#check_flush_dependency
	pinctrl: single: fix missing error code in pcs_probe()
	clk: at91: sama7g5: Allocate only the needed amount of memory for PLLs
	media: mediatek: vcodec: Fix H264 multi stateless decoder smatch warning
	media: mediatek: vcodec: Fix VP8 stateless decoder smatch warning
	media: mediatek: vcodec: Fix H264 stateless decoder smatch warning
	RDMA/rtrs: Reset hb_missed_cnt after receiving other traffic from peer
	RDMA/rtrs-clt: Reset cid to con_num - 1 to stay in bounds
	clk: ti: dra7-atl: Fix leak of of_nodes
	clk: starfive: Use pm_runtime_resume_and_get to fix pm_runtime_get_sync() usage
	clk: rockchip: rk3588: Fix 32k clock name for pmu_24m_32k_100m_src_p
	nfsd: remove unneeded EEXIST error check in nfsd_do_file_acquire
	nfsd: fix refcount leak when file is unhashed after being found
	pinctrl: mvebu: Fix devinit_dove_pinctrl_probe function
	IB/core: Fix ib_cache_setup_one error flow cleanup
	PCI: kirin: Fix buffer overflow in kirin_pcie_parse_port()
	RDMA/erdma: Return QP state in erdma_query_qp
	RDMA/mlx5: Limit usage of over-sized mkeys from the MR cache
	watchdog: imx_sc_wdt: Don't disable WDT in suspend
	RDMA/hns: Don't modify rq next block addr in HIP09 QPC
	RDMA/hns: Fix Use-After-Free of rsv_qp on HIP08
	RDMA/hns: Fix the overflow risk of hem_list_calc_ba_range()
	RDMA/hns: Fix spin_unlock_irqrestore() called with IRQs enabled
	RDMA/hns: Fix VF triggering PF reset in abnormal interrupt handler
	RDMA/hns: Fix 1bit-ECC recovery address in non-4K OS
	RDMA/hns: Optimize hem allocation performance
	RDMA/hns: Fix restricted __le16 degrades to integer issue
	RDMA/mlx5: Obtain upper net device only when needed
	riscv: Fix fp alignment bug in perf_callchain_user()
	RDMA/cxgb4: Added NULL check for lookup_atid
	RDMA/irdma: fix error message in irdma_modify_qp_roce()
	ntb: intel: Fix the NULL vs IS_ERR() bug for debugfs_create_dir()
	ntb_perf: Fix printk format
	ntb: Force physically contiguous allocation of rx ring buffers
	nfsd: call cache_put if xdr_reserve_space returns NULL
	nfsd: return -EINVAL when namelen is 0
	crypto: caam - Pad SG length when allocating hash edesc
	crypto: powerpc/p10-aes-gcm - Disable CRYPTO_AES_GCM_P10
	f2fs: atomic: fix to avoid racing w/ GC
	f2fs: reduce expensive checkpoint trigger frequency
	f2fs: fix to avoid racing in between read and OPU dio write
	f2fs: Create COW inode from parent dentry for atomic write
	f2fs: fix to wait page writeback before setting gcing flag
	f2fs: atomic: fix to truncate pagecache before on-disk metadata truncation
	f2fs: support .shutdown in f2fs_sops
	f2fs: fix to avoid use-after-free in f2fs_stop_gc_thread()
	f2fs: compress: do sanity check on cluster when CONFIG_F2FS_CHECK_FS is on
	f2fs: compress: don't redirty sparse cluster during {,de}compress
	f2fs: prevent atomic file from being dirtied before commit
	f2fs: clean up w/ dotdot_name
	f2fs: get rid of online repaire on corrupted directory
	f2fs: fix to don't set SB_RDONLY in f2fs_handle_critical_error()
	spi: atmel-quadspi: Undo runtime PM changes at driver exit time
	spi: spi-fsl-lpspi: Undo runtime PM changes at driver exit time
	lib/sbitmap: define swap_lock as raw_spinlock_t
	spi: atmel-quadspi: Avoid overwriting delay register settings
	nvme-multipath: system fails to create generic nvme device
	iio: adc: ad7606: fix oversampling gpio array
	iio: adc: ad7606: fix standby gpio state to match the documentation
	driver core: Fix error handling in driver API device_rename()
	ABI: testing: fix admv8818 attr description
	iio: chemical: bme680: Fix read/write ops to device by adding mutexes
	iio: magnetometer: ak8975: Convert enum->pointer for data in the match tables
	iio: magnetometer: ak8975: drop incorrect AK09116 compatible
	dt-bindings: iio: asahi-kasei,ak8975: drop incorrect AK09116 compatible
	driver core: Fix a potential null-ptr-deref in module_add_driver()
	serial: 8250: omap: Cleanup on error in request_irq
	coresight: tmc: sg: Do not leak sg_table
	interconnect: icc-clk: Add missed num_nodes initialization
	cxl/pci: Fix to record only non-zero ranges
	vhost_vdpa: assign irq bypass producer token correctly
	ep93xx: clock: Fix off by one in ep93xx_div_recalc_rate()
	Revert "dm: requeue IO if mapping table not yet available"
	net: xilinx: axienet: Schedule NAPI in two steps
	net: xilinx: axienet: Fix packet counting
	netfilter: nf_reject_ipv6: fix nf_reject_ip6_tcphdr_put()
	net: seeq: Fix use after free vulnerability in ether3 Driver Due to Race Condition
	net: ipv6: select DST_CACHE from IPV6_RPL_LWTUNNEL
	tcp: check skb is non-NULL in tcp_rto_delta_us()
	net: qrtr: Update packets cloning when broadcasting
	bonding: Fix unnecessary warnings and logs from bond_xdp_get_xmit_slave()
	virtio_net: Fix mismatched buf address when unmapping for small packets
	net: stmmac: set PP_FLAG_DMA_SYNC_DEV only if XDP is enabled
	netfilter: nf_tables: Keep deleted flowtable hooks until after RCU
	netfilter: ctnetlink: compile ctnetlink_label_size with CONFIG_NF_CONNTRACK_EVENTS
	netfilter: nf_tables: use rcu chain hook list iterator from netlink dump path
	io_uring/sqpoll: do not allow pinning outside of cpuset
	io_uring: check for presence of task_work rather than TIF_NOTIFY_SIGNAL
	mm: call the security_mmap_file() LSM hook in remap_file_pages()
	drm/amd/display: Fix Synaptics Cascaded Panamera DSC Determination
	drm/vmwgfx: Prevent unmapping active read buffers
	Revert "net: libwx: fix alloc msix vectors failed"
	xen: move checks for e820 conflicts further up
	xen: allow mapping ACPI data using a different physical address
	io_uring/sqpoll: retain test for whether the CPU is valid
	io_uring/sqpoll: do not put cpumask on stack
	Remove *.orig pattern from .gitignore
	PCI: Revert to the original speed after PCIe failed link retraining
	PCI: Clear the LBMS bit after a link retrain
	PCI: dra7xx: Fix threaded IRQ request for "dra7xx-pcie-main" IRQ
	PCI: imx6: Fix missing call to phy_power_off() in error handling
	PCI: Correct error reporting with PCIe failed link retraining
	PCI: Use an error code with PCIe failed link retraining
	PCI: xilinx-nwl: Fix off-by-one in INTx IRQ handler
	Revert "soc: qcom: smd-rpm: Match rpmsg channel instead of compatible"
	ASoC: rt5682: Return devm_of_clk_add_hw_provider to transfer the error
	soc: fsl: cpm1: tsa: Fix tsa_write8()
	soc: versatile: integrator: fix OF node leak in probe() error path
	Revert "media: tuners: fix error return code of hybrid_tuner_request_state()"
	iommufd: Protect against overflow of ALIGN() during iova allocation
	Input: adp5588-keys - fix check on return code
	Input: i8042 - add TUXEDO Stellaris 16 Gen5 AMD to i8042 quirk table
	Input: i8042 - add TUXEDO Stellaris 15 Slim Gen6 AMD to i8042 quirk table
	Input: i8042 - add another board name for TUXEDO Stellaris Gen5 AMD line
	KVM: arm64: Add memory length checks and remove inline in do_ffa_mem_xfer
	KVM: x86: Enforce x2APIC's must-be-zero reserved ICR bits
	KVM: x86: Move x2APIC ICR helper above kvm_apic_write_nodecode()
	KVM: Use dedicated mutex to protect kvm_usage_count to avoid deadlock
	drm/amd/display: Skip Recompute DSC Params if no Stream on Link
	drm/amd/display: Add HDMI DSC native YCbCr422 support
	drm/amd/display: Round calculated vtotal
	drm/amd/display: Validate backlight caps are sane
	KEYS: prevent NULL pointer dereference in find_asymmetric_key()
	powerpc/atomic: Use YZ constraints for DS-form instructions
	fs: Create a generic is_dot_dotdot() utility
	ksmbd: make __dir_empty() compatible with POSIX
	ksmbd: allow write with FILE_APPEND_DATA
	ksmbd: handle caseless file creation
	ata: libata-scsi: Fix ata_msense_control() CDL page reporting
	scsi: sd: Fix off-by-one error in sd_read_block_characteristics()
	scsi: ufs: qcom: Update MODE_MAX cfg_bw value
	scsi: mac_scsi: Revise printk(KERN_DEBUG ...) messages
	scsi: mac_scsi: Refactor polling loop
	scsi: mac_scsi: Disallow bus errors during PDMA send
	can: esd_usb: Remove CAN_CTRLMODE_3_SAMPLES for CAN-USB/3-FD
	wifi: rtw88: Fix USB/SDIO devices not transmitting beacons
	usbnet: fix cyclical race on disconnect with work queue
	arm64: dts: mediatek: mt8195-cherry: Mark USB 3.0 on xhci1 as disabled
	USB: appledisplay: close race between probe and completion handler
	USB: misc: cypress_cy7c63: check for short transfer
	USB: class: CDC-ACM: fix race between get_serial and set_serial
	usb: cdnsp: Fix incorrect usb_request status
	usb: dwc2: drd: fix clock gating on USB role switch
	bus: integrator-lm: fix OF node leak in probe()
	bus: mhi: host: pci_generic: Fix the name for the Telit FE990A
	firmware_loader: Block path traversal
	tty: rp2: Fix reset with non forgiving PCIe host bridges
	xhci: Set quirky xHC PCI hosts to D3 _after_ stopping and freeing them.
	serial: qcom-geni: fix fifo polling timeout
	crypto: ccp - Properly unregister /dev/sev on sev PLATFORM_STATUS failure
	drbd: Fix atomicity violation in drbd_uuid_set_bm()
	drbd: Add NULL check for net_conf to prevent dereference in state validation
	ACPI: sysfs: validate return type of _STR method
	ACPI: resource: Add another DMI match for the TongFang GMxXGxx
	efistub/tpm: Use ACPI reclaim memory for event log to avoid corruption
	perf/x86/intel/pt: Fix sampling synchronization
	wifi: mt76: mt7921: Check devm_kasprintf() returned value
	wifi: mt76: mt7915: check devm_kasprintf() returned value
	wifi: mt76: mt7996: fix NULL pointer dereference in mt7996_mcu_sta_bfer_he
	wifi: rtw88: 8821cu: Remove VID/PID 0bda:c82c
	wifi: rtw88: 8822c: Fix reported RX band width
	wifi: mt76: mt7615: check devm_kasprintf() returned value
	debugobjects: Fix conditions in fill_pool()
	btrfs: tree-checker: fix the wrong output of data backref objectid
	btrfs: always update fstrim_range on failure in FITRIM ioctl
	f2fs: fix several potential integer overflows in file offsets
	f2fs: prevent possible int overflow in dir_block_index()
	f2fs: avoid potential int overflow in sanity_check_area_boundary()
	f2fs: Require FMODE_WRITE for atomic write ioctls
	f2fs: fix to check atomic_file in f2fs ioctl interfaces
	hwrng: mtk - Use devm_pm_runtime_enable
	hwrng: bcm2835 - Add missing clk_disable_unprepare in bcm2835_rng_init
	hwrng: cctrng - Add missing clk_disable_unprepare in cctrng_resume
	arm64: esr: Define ESR_ELx_EC_* constants as UL
	arm64: errata: Enable the AC03_CPU_38 workaround for ampere1a
	arm64: dts: rockchip: Raise Pinebook Pro's panel backlight PWM frequency
	arm64: dts: qcom: sa8775p: Mark APPS and PCIe SMMUs as DMA coherent
	arm64: dts: rockchip: Correct the Pinebook Pro battery design capacity
	vfs: fix race between evice_inodes() and find_inode()&iput()
	fs: Fix file_set_fowner LSM hook inconsistencies
	nfs: fix memory leak in error path of nfs4_do_reclaim
	EDAC/igen6: Fix conversion of system address to physical memory address
	icmp: change the order of rate limits
	cpuidle: riscv-sbi: Use scoped device node handling to fix missing of_node_put
	padata: use integer wrap around to prevent deadlock on seq_nr overflow
	spi: fspi: involve lut_num for struct nxp_fspi_devtype_data
	ARM: dts: imx6ul-geam: fix fsl,pins property in tscgrp pinctrl
	soc: versatile: realview: fix memory leak during device remove
	soc: versatile: realview: fix soc_dev leak during device remove
	usb: yurex: Replace snprintf() with the safer scnprintf() variant
	USB: misc: yurex: fix race between read and write
	xhci: Add a quirk for writing ERST in high-low order
	usb: xhci: fix loss of data on Cadence xHC
	pps: remove usage of the deprecated ida_simple_xx() API
	pps: add an error check in parport_attach
	tty: serial: kgdboc: Fix 8250_* kgdb over serial
	serial: don't use uninitialized value in uart_poll_init()
	x86/idtentry: Incorporate definitions/declarations of the FRED entries
	x86/entry: Remove unwanted instrumentation in common_interrupt()
	lib/bitmap: add bitmap_{read,write}()
	btrfs: subpage: fix the bitmap dump which can cause bitmap corruption
	btrfs: reorder btrfs_inode to fill gaps
	btrfs: update comment for struct btrfs_inode::lock
	btrfs: fix race setting file private on concurrent lseek using same fd
	dt-bindings: spi: nxp-fspi: support i.MX93 and i.MX95
	dt-bindings: spi: nxp-fspi: add imx8ulp support
	thunderbolt: Fix debug log when DisplayPort adapter not available for pairing
	thunderbolt: Use tb_tunnel_dbg() where possible to make logging more consistent
	thunderbolt: Expose tb_tunnel_xxx() log macros to the rest of the driver
	thunderbolt: Create multiple DisplayPort tunnels if there are more DP IN/OUT pairs
	thunderbolt: Use constants for path weight and priority
	thunderbolt: Use weight constants in tb_usb3_consumed_bandwidth()
	thunderbolt: Make is_gen4_link() available to the rest of the driver
	thunderbolt: Change bandwidth reservations to comply USB4 v2
	thunderbolt: Introduce tb_port_path_direction_downstream()
	thunderbolt: Introduce tb_for_each_upstream_port_on_path()
	thunderbolt: Introduce tb_switch_depth()
	thunderbolt: Add support for asymmetric link
	thunderbolt: Configure asymmetric link if needed and bandwidth allows
	thunderbolt: Improve DisplayPort tunnel setup process to be more robust
	mm/filemap: return early if failed to allocate memory for split
	lib/xarray: introduce a new helper xas_get_order
	mm/filemap: optimize filemap folio adding
	bpf: lsm: Set bpf_lsm_blob_sizes.lbs_task to 0
	dm-verity: restart or panic on an I/O error
	lockdep: fix deadlock issue between lockdep and rcu
	mm: only enforce minimum stack gap size if it's sensible
	spi: fspi: add support for imx8ulp
	module: Fix KCOV-ignored file name
	mm/damon/vaddr: protect vma traversal in __damon_va_thre_regions() with rcu read lock
	i2c: aspeed: Update the stop sw state when the bus recovery occurs
	i2c: isch: Add missed 'else'
	Documentation: KVM: fix warning in "make htmldocs"
	bpf: Fix use-after-free in bpf_uprobe_multi_link_attach()
	usb: yurex: Fix inconsistent locking bug in yurex_read()
	perf/arm-cmn: Fail DTC counter allocation correctly
	iio: magnetometer: ak8975: Fix 'Unexpected device' error
	wifi: brcmfmac: add linefeed at end of file
	thunderbolt: Send uevent after asymmetric/symmetric switch
	thunderbolt: Fix minimum allocated USB 3.x and PCIe bandwidth
	thunderbolt: Fix NULL pointer dereference in tb_port_update_credits()
	x86/tdx: Fix "in-kernel MMIO" check
	spi: atmel-quadspi: Fix wrong register value written to MR
	Revert: "dm-verity: restart or panic on an I/O error"
	Linux 6.6.54

Commits that affect the gki_defconfig arm64 build:
  32ba316088 arm64: signal: Fix some under-bracketed UAPI macros [1 file, +3 | -3]
  c82ea72d96 mount: handle OOM on mnt_warn_timestamp_expiry [1 file, +11 | -3]
  ea8d90a5b0 Bluetooth: hci_core: Fix sending MGMT_EV_CONNECT_FAILED [3 files, +13 | -10]
  7eebbdde4b Bluetooth: hci_sync: Ignore errors from HCI_OP_REMOTE_NAME_REQ_CANCEL [1 file, +4 | -1]
  80bd490ac0 sock_map: Add a cond_resched() in sock_hash_free() [1 file, +1 | -0]
  c3d941cc73 can: bcm: Clear bo->bcm_proc_read after remove_proc_entry(). [1 file, +3 | -1]
  2b5e904dea net: tipc: avoid possible garbage value [1 file, +1 | -1]
  0ceb2f2b5c ipv6: avoid possible NULL deref in rt6_uncached_list_flush_dev() [1 file, +1 | -1]
  75a5e5909b ublk: move zone report data out of request pdu [1 file, +46 | -16]
  7faed2896d block, bfq: fix possible UAF for bfqq->bic with merge chain [1 file, +2 | -1]
  e50c9a3526 block, bfq: choose the last bfqq from merge chain in bfq_setup_cooperator() [1 file, +6 | -2]
  19f3bec2ac block, bfq: don't break merge chain in bfq_split_bfqq() [1 file, +1 | -1]
  0c9b52bfee erofs: fix incorrect symlink detection in fast symlink [1 file, +6 | -14]
  0780451f03 block, bfq: fix uaf for accessing waker_bfqq after splitting [1 file, +28 | -3]
  c3eba0a4e9 block, bfq: fix procress reference leakage for bfqq in merge chain [1 file, +17 | -20]
  7b3a35584d io_uring/io-wq: do not allow pinning outside of cpuset [1 file, +18 | -5]
  5740c0fa93 io_uring/io-wq: inherit cpuset of cgroup in io worker [1 file, +1 | -1]
  0d7ddfc892 block: print symbolic error name instead of error code [1 file, +2 | -2]
  80f5bfbb80 block: fix potential invalid pointer dereference in blk_add_partition [1 file, +5 | -3]
  1ccd886abf regulator: Return actual error in of_regulator_bulk_get_all() [1 file, +1 | -1]
  8e6f4aa43b pmdomain: core: Harden inter-column space in debug summary [1 file, +1 | -1]
  85572bf646 HID: wacom: Support sequence numbers smaller than 16-bit [1 file, +7 | -2]
  f7b4ba5f78 HID: wacom: Do not warn about dropped packets for first packet [2 files, +6 | -2]
  5a4f8de92d sched/fair: Make SCHED_IDLE entity be preempted in strict hierarchy [1 file, +9 | -13]
  2288b54b96 bpf: correctly handle malformed BPF_CORE_TYPE_ID_LOCAL relos [1 file, +8 | -0]
  cfd257f5e8 kthread: fix task state in kthread worker if being frozen [1 file, +9 | -1]
  e4006410b0 ext4: clear EXT4_GROUP_INFO_WAS_TRIMMED_BIT even mount with discard [1 file, +4 | -6]
  7a349feead ext4: avoid buffer_head leak in ext4_mark_inode_used() [1 file, +3 | -2]
  fae0793abd ext4: avoid potential buffer_head leak in __ext4_new_inode() [1 file, +4 | -3]
  9f70768554 ext4: avoid negative min_clusters in find_group_orlov() [1 file, +2 | -0]
  dd3f90e8c4 ext4: return error on ext4_find_inline_entry [1 file, +7 | -3]
  2a6579ef5f ext4: avoid OOB when system.data xattr changes underneath the filesystem [1 file, +21 | -10]
  faeff8b1ee ext4: check stripe size compatibility on remount as well [1 file, +22 | -7]
  ba4eb7f258 sched/numa: Document vma_numab_state fields [1 file, +21 | -0]
  707e9a6c88 sched/numa: Rename vma_numab_state::access_pids[] => ::pids_active[], ::next_pid_reset => ::pids_active_reset [3 files, +11 | -11]
  6654e54ae7 sched/numa: Trace decisions related to skipping VMAs [3 files, +71 | -4]
  7f01977665 sched/numa: Move up the access pid reset logic [1 file, +7 | -10]
  cb7846df6b sched/numa: Complete scanning of partial VMAs regardless of PID activity [3 files, +18 | -4]
  e3a2d3f6c4 sched/numa: Complete scanning of inactive VMAs when there is no alternative [4 files, +61 | -4]
  66f3fc7411 sched/numa: Fix the vma scan starving issue [1 file, +9 | -0]
  81197a9b45 bpf: Fix bpf_strtol and bpf_strtoul helpers for 32bit [1 file, +2 | -2]
  a2c8dc7e21 bpf: Fix helper writes to read-only maps [6 files, +24 | -45]
  abf7559b4f bpf: Improve check_raw_mode_ok test for MEM_UNINIT-tagged types [1 file, +11 | -5]
  a634fa8e48 bpf: Zero former ARG_PTR_TO_{LONG,INT} args in case of error [3 files, +26 | -21]
  390de4d01b PCI: Wait for Link before restoring Downstream Buses [1 file, +6 | -2]
  18a672c62d nvdimm: Fix devs leaks in scan_labels() [1 file, +17 | -17]
  b6edb3fd96 pinctrl: single: fix missing error code in pcs_probe() [1 file, +2 | -1]
  95248d7497 PCI: kirin: Fix buffer overflow in kirin_pcie_parse_port() [1 file, +2 | -2]
  d889928bbc f2fs: atomic: fix to avoid racing w/ GC [2 files, +16 | -2]
  67c3c4638f f2fs: fix to avoid racing in between read and OPU dio write [1 file, +4 | -0]
  87f9d26fcc f2fs: Create COW inode from parent dentry for atomic write [1 file, +3 | -9]
  1bb0686a2e f2fs: fix to wait page writeback before setting gcing flag [1 file, +4 | -0]
  783b6ca342 f2fs: atomic: fix to truncate pagecache before on-disk metadata truncation [1 file, +4 | -0]
  4263b3ef81 f2fs: compress: do sanity check on cluster when CONFIG_F2FS_CHECK_FS is on [2 files, +35 | -30]
  b6f186bd6a f2fs: compress: don't redirty sparse cluster during {,de}compress [3 files, +61 | -26]
  66b1b8254d f2fs: clean up w/ dotdot_name [1 file, +2 | -3]
  649ec8b30d f2fs: fix to don't set SB_RDONLY in f2fs_handle_critical_error() [1 file, +6 | -4]
  54fd87259c lib/sbitmap: define swap_lock as raw_spinlock_t [2 files, +3 | -3]
  30b9bf4b41 nvme-multipath: system fails to create generic nvme device [1 file, +1 | -1]
  dd69fb026c driver core: Fix error handling in driver API device_rename() [1 file, +10 | -5]
  b8e45b9105 driver core: Fix a potential null-ptr-deref in module_add_driver() [1 file, +9 | -5]
  9360d077d3 Revert "dm: requeue IO if mapping table not yet available" [2 files, +11 | -4]
  af4b8a704f netfilter: nf_reject_ipv6: fix nf_reject_ip6_tcphdr_put() [1 file, +2 | -12]
  570f7d8c9b tcp: check skb is non-NULL in tcp_rto_delta_us() [1 file, +19 | -2]
  b3f7607f20 netfilter: ctnetlink: compile ctnetlink_label_size with CONFIG_NF_CONNTRACK_EVENTS [1 file, +2 | -5]
  358124ba2c io_uring/sqpoll: do not allow pinning outside of cpuset [1 file, +4 | -1]
  4bdf75c2ef io_uring: check for presence of task_work rather than TIF_NOTIFY_SIGNAL [1 file, +2 | -2]
  49d3a4ad57 mm: call the security_mmap_file() LSM hook in remap_file_pages() [1 file, +4 | -0]
  859f62a2f9 io_uring/sqpoll: retain test for whether the CPU is valid [1 file, +2 | -0]
  01ad0576f0 io_uring/sqpoll: do not put cpumask on stack [1 file, +10 | -3]
  fb17695735 PCI: Revert to the original speed after PCIe failed link retraining [1 file, +10 | -1]
  894f21117f PCI: Clear the LBMS bit after a link retrain [1 file, +9 | -1]
  a200897dc7 PCI: Correct error reporting with PCIe failed link retraining [1 file, +8 | -4]
  3d8573abdc PCI: Use an error code with PCIe failed link retraining [3 files, +14 | -14]
  a3765b497a KEYS: prevent NULL pointer dereference in find_asymmetric_key() [1 file, +4 | -3]
  ef83620438 fs: Create a generic is_dot_dotdot() utility [5 files, +14 | -32]
  568c7c4c77 scsi: sd: Fix off-by-one error in sd_read_block_characteristics() [1 file, +1 | -1]
  1e44ee6cdd usbnet: fix cyclical race on disconnect with work queue [2 files, +43 | -9]
  a0b4cbeb09 USB: class: CDC-ACM: fix race between get_serial and set_serial [1 file, +2 | -0]
  7420c1bf7f firmware_loader: Block path traversal [1 file, +30 | -0]
  e29a1f8b74 xhci: Set quirky xHC PCI hosts to D3 _after_ stopping and freeing them. [1 file, +5 | -3]
  633bd1d6be serial: qcom-geni: fix fifo polling timeout [1 file, +17 | -14]
  19fd2f2c5f efistub/tpm: Use ACPI reclaim memory for event log to avoid corruption [1 file, +1 | -1]
  b18a5c8382 f2fs: fix several potential integer overflows in file offsets [2 files, +3 | -3]
  0c598a0217 f2fs: prevent possible int overflow in dir_block_index() [1 file, +2 | -1]
  56d8651679 f2fs: avoid potential int overflow in sanity_check_area_boundary() [1 file, +2 | -2]
  5e0de753bf f2fs: Require FMODE_WRITE for atomic write ioctls [1 file, +9 | -0]
  7cb51731f2 f2fs: fix to check atomic_file in f2fs ioctl interfaces [1 file, +12 | -1]
  1b4089d567 hwrng: cctrng - Add missing clk_disable_unprepare in cctrng_resume [1 file, +1 | -0]
  93e1215f3f arm64: esr: Define ESR_ELx_EC_* constants as UL [1 file, +44 | -44]
  0eed942bc6 vfs: fix race between evice_inodes() and find_inode()&iput() [1 file, +4 | -0]
  4d3d0869ec fs: Fix file_set_fowner LSM hook inconsistencies [1 file, +4 | -10]
  662ec52260 icmp: change the order of rate limits [3 files, +76 | -57]
  eef5d6219a xhci: Add a quirk for writing ERST in high-low order [2 files, +6 | -1]
  88e26a196a tty: serial: kgdboc: Fix 8250_* kgdb over serial [1 file, +2 | -1]
  1d8c1add5e serial: don't use uninitialized value in uart_poll_init() [1 file, +6 | -7]
  459b724c3c lib/bitmap: add bitmap_{read,write}() [1 file, +77 | -0]
  ff3c557fa9 mm/filemap: return early if failed to allocate memory for split [1 file, +4 | -1]
  734594d41c lib/xarray: introduce a new helper xas_get_order [3 files, +71 | -18]
  b3c10ac84c bpf: lsm: Set bpf_lsm_blob_sizes.lbs_task to 0 [1 file, +0 | -1]
  bd24f30f50 dm-verity: restart or panic on an I/O error [1 file, +21 | -2]
  9347605691 mm: only enforce minimum stack gap size if it's sensible [1 file, +1 | -1]
  790c630ab0 bpf: Fix use-after-free in bpf_uprobe_multi_link_attach() [1 file, +6 | -3]
  cada2646b7 Revert: "dm-verity: restart or panic on an I/O error" [1 file, +2 | -21]
  commits=104  lines added=1286  lines removed=641

Change-Id: I4bc58755cc6f17017f20178d9a119864269ea188
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-10-08 08:04:39 +00:00
commit d930352374
483 changed files with 5980 additions and 3079 deletions

1
.gitignore vendored
View File

@ -135,7 +135,6 @@ GTAGS
# id-utils files # id-utils files
ID ID
*.orig
*~ *~
\#*# \#*#

View File

@ -3,7 +3,7 @@ KernelVersion:
Contact: linux-iio@vger.kernel.org Contact: linux-iio@vger.kernel.org
Description: Description:
Reading this returns the valid values that can be written to the Reading this returns the valid values that can be written to the
on_altvoltage0_mode attribute: filter_mode attribute:
- auto -> Adjust bandpass filter to track changes in input clock rate. - auto -> Adjust bandpass filter to track changes in input clock rate.
- manual -> disable/unregister the clock rate notifier / input clock tracking. - manual -> disable/unregister the clock rate notifier / input clock tracking.

View File

@ -54,6 +54,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| Ampere | AmpereOne | AC03_CPU_38 | AMPERE_ERRATUM_AC03_CPU_38 | | Ampere | AmpereOne | AC03_CPU_38 | AMPERE_ERRATUM_AC03_CPU_38 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| Ampere | AmpereOne AC04 | AC04_CPU_10 | AMPERE_ERRATUM_AC03_CPU_38 |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 | | ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+

View File

@ -23,7 +23,6 @@ properties:
- ak8963 - ak8963
- ak09911 - ak09911
- ak09912 - ak09912
- ak09916
deprecated: true deprecated: true
reg: reg:

View File

@ -15,12 +15,19 @@ allOf:
properties: properties:
compatible: compatible:
enum: oneOf:
- nxp,imx8dxl-fspi - enum:
- nxp,imx8mm-fspi - nxp,imx8dxl-fspi
- nxp,imx8mp-fspi - nxp,imx8mm-fspi
- nxp,imx8qxp-fspi - nxp,imx8mp-fspi
- nxp,lx2160a-fspi - nxp,imx8qxp-fspi
- nxp,imx8ulp-fspi
- nxp,lx2160a-fspi
- items:
- enum:
- nxp,imx93-fspi
- nxp,imx95-fspi
- const: nxp,imx8mm-fspi
reg: reg:
items: items:

View File

@ -540,7 +540,7 @@ at module load time (for a module) with::
alerts_broken alerts_broken
The addresses are normal I2C addresses. The adapter is the string The addresses are normal I2C addresses. The adapter is the string
name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name. name of the adapter, as shown in /sys/bus/i2c/devices/i2c-<n>/name.
It is *NOT* i2c-<n> itself. Also, the comparison is done ignoring It is *NOT* i2c-<n> itself. Also, the comparison is done ignoring
spaces, so if the name is "This is an I2C chip" you can say spaces, so if the name is "This is an I2C chip" you can say
adapter_name=ThisisanI2cchip. This is because it's hard to pass in adapter_name=ThisisanI2cchip. This is because it's hard to pass in

View File

@ -9,7 +9,7 @@ KVM Lock Overview
The acquisition orders for mutexes are as follows: The acquisition orders for mutexes are as follows:
- cpus_read_lock() is taken outside kvm_lock - cpus_read_lock() is taken outside kvm_lock and kvm_usage_lock
- kvm->lock is taken outside vcpu->mutex - kvm->lock is taken outside vcpu->mutex
@ -24,6 +24,13 @@ The acquisition orders for mutexes are as follows:
are taken on the waiting side when modifying memslots, so MMU notifiers are taken on the waiting side when modifying memslots, so MMU notifiers
must not take either kvm->slots_lock or kvm->slots_arch_lock. must not take either kvm->slots_lock or kvm->slots_arch_lock.
cpus_read_lock() vs kvm_lock:
- Taking cpus_read_lock() outside of kvm_lock is problematic, despite that
being the official ordering, as it is quite easy to unknowingly trigger
cpus_read_lock() while holding kvm_lock. Use caution when walking vm_list,
e.g. avoid complex operations when possible.
For SRCU: For SRCU:
- ``synchronize_srcu(&kvm->srcu)`` is called inside critical sections - ``synchronize_srcu(&kvm->srcu)`` is called inside critical sections
@ -228,10 +235,17 @@ time it will be set using the Dirty tracking mechanism described above.
:Type: mutex :Type: mutex
:Arch: any :Arch: any
:Protects: - vm_list :Protects: - vm_list
- kvm_usage_count
``kvm_usage_lock``
^^^^^^^^^^^^^^^^^^
:Type: mutex
:Arch: any
:Protects: - kvm_usage_count
- hardware virtualization enable/disable - hardware virtualization enable/disable
:Comment: KVM also disables CPU hotplug via cpus_read_lock() during :Comment: Exists because using kvm_lock leads to deadlock (see earlier comment
enable/disable. on cpus_read_lock() vs kvm_lock). Note, KVM also disables CPU hotplug via
cpus_read_lock() when enabling/disabling virtualization.
``kvm->mn_invalidate_lock`` ``kvm->mn_invalidate_lock``
^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -291,11 +305,12 @@ time it will be set using the Dirty tracking mechanism described above.
wakeup. wakeup.
``vendor_module_lock`` ``vendor_module_lock``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^
:Type: mutex :Type: mutex
:Arch: x86 :Arch: x86
:Protects: loading a vendor module (kvm_amd or kvm_intel) :Protects: loading a vendor module (kvm_amd or kvm_intel)
:Comment: Exists because using kvm_lock leads to deadlock. cpu_hotplug_lock is :Comment: Exists because using kvm_lock leads to deadlock. kvm_lock is taken
taken outside of kvm_lock, e.g. in KVM's CPU online/offline callbacks, and in notifiers, e.g. __kvmclock_cpufreq_notifier(), that may be invoked while
many operations need to take cpu_hotplug_lock when loading a vendor module, cpu_hotplug_lock is held, e.g. from cpufreq_boost_trigger_state(), and many
e.g. updating static calls. operations need to take cpu_hotplug_lock when loading a vendor module, e.g.
updating static calls.

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 6 VERSION = 6
PATCHLEVEL = 6 PATCHLEVEL = 6
SUBLEVEL = 53 SUBLEVEL = 54
EXTRAVERSION = EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth NAME = Hurr durr I'ma ninja sloth

View File

@ -1312,7 +1312,7 @@
compatible = "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt"; compatible = "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
reg = <0xfffffe20 0x20>; reg = <0xfffffe20 0x20>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
clocks = <&clk32k 0>; clocks = <&clk32k 1>;
}; };
pit: timer@fffffe40 { pit: timer@fffffe40 {
@ -1338,7 +1338,7 @@
compatible = "microchip,sam9x60-rtc", "atmel,at91sam9x5-rtc"; compatible = "microchip,sam9x60-rtc", "atmel,at91sam9x5-rtc";
reg = <0xfffffea8 0x100>; reg = <0xfffffea8 0x100>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
clocks = <&clk32k 0>; clocks = <&clk32k 1>;
}; };
watchdog: watchdog@ffffff80 { watchdog: watchdog@ffffff80 {

View File

@ -272,7 +272,7 @@
compatible = "microchip,sama7g5-rtt", "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt"; compatible = "microchip,sama7g5-rtt", "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
reg = <0xe001d020 0x30>; reg = <0xe001d020 0x30>;
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk32k 0>; clocks = <&clk32k 1>;
}; };
clk32k: clock-controller@e001d050 { clk32k: clock-controller@e001d050 {

View File

@ -366,7 +366,7 @@
}; };
pinctrl_tsc: tscgrp { pinctrl_tsc: tscgrp {
fsl,pin = < fsl,pins = <
MX6UL_PAD_GPIO1_IO01__GPIO1_IO01 0xb0 MX6UL_PAD_GPIO1_IO01__GPIO1_IO01 0xb0
MX6UL_PAD_GPIO1_IO02__GPIO1_IO02 0xb0 MX6UL_PAD_GPIO1_IO02__GPIO1_IO02 0xb0
MX6UL_PAD_GPIO1_IO03__GPIO1_IO03 0xb0 MX6UL_PAD_GPIO1_IO03__GPIO1_IO03 0xb0

View File

@ -350,7 +350,7 @@
&iomuxc_lpsr { &iomuxc_lpsr {
pinctrl_enet1_phy_interrupt: enet1phyinterruptgrp { pinctrl_enet1_phy_interrupt: enet1phyinterruptgrp {
fsl,phy = < fsl,pins = <
MX7D_PAD_LPSR_GPIO1_IO02__GPIO1_IO2 0x08 MX7D_PAD_LPSR_GPIO1_IO02__GPIO1_IO2 0x08
>; >;
}; };

View File

@ -359,7 +359,7 @@ static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw,
u32 val = __raw_readl(psc->reg); u32 val = __raw_readl(psc->reg);
u8 index = (val & psc->mask) >> psc->shift; u8 index = (val & psc->mask) >> psc->shift;
if (index > psc->num_div) if (index >= psc->num_div)
return 0; return 0;
return DIV_ROUND_UP_ULL(parent_rate, psc->div[index]); return DIV_ROUND_UP_ULL(parent_rate, psc->div[index]);

View File

@ -66,6 +66,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus)
return; return;
} }
map = syscon_node_to_regmap(np); map = syscon_node_to_regmap(np);
of_node_put(np);
if (IS_ERR(map)) { if (IS_ERR(map)) {
pr_err("PLATSMP: No syscon regmap\n"); pr_err("PLATSMP: No syscon regmap\n");
return; return;

View File

@ -64,33 +64,37 @@
#ifdef CONFIG_AS_VFP_VMRS_FPINST #ifdef CONFIG_AS_VFP_VMRS_FPINST
#define fmrx(_vfp_) ({ \ #define fmrx(_vfp_) ({ \
u32 __v; \ u32 __v; \
asm(".fpu vfpv2\n" \ asm volatile (".fpu vfpv2\n" \
"vmrs %0, " #_vfp_ \ "vmrs %0, " #_vfp_ \
: "=r" (__v) : : "cc"); \ : "=r" (__v) : : "cc"); \
__v; \ __v; \
}) })
#define fmxr(_vfp_,_var_) \ #define fmxr(_vfp_, _var_) ({ \
asm(".fpu vfpv2\n" \ asm volatile (".fpu vfpv2\n" \
"vmsr " #_vfp_ ", %0" \ "vmsr " #_vfp_ ", %0" \
: : "r" (_var_) : "cc") : : "r" (_var_) : "cc"); \
})
#else #else
#define vfpreg(_vfp_) #_vfp_ #define vfpreg(_vfp_) #_vfp_
#define fmrx(_vfp_) ({ \ #define fmrx(_vfp_) ({ \
u32 __v; \ u32 __v; \
asm("mrc p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmrx %0, " #_vfp_ \ asm volatile ("mrc p10, 7, %0, " vfpreg(_vfp_) "," \
: "=r" (__v) : : "cc"); \ "cr0, 0 @ fmrx %0, " #_vfp_ \
__v; \ : "=r" (__v) : : "cc"); \
}) __v; \
})
#define fmxr(_vfp_,_var_) \ #define fmxr(_vfp_, _var_) ({ \
asm("mcr p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmxr " #_vfp_ ", %0" \ asm volatile ("mcr p10, 7, %0, " vfpreg(_vfp_) "," \
: : "r" (_var_) : "cc") "cr0, 0 @ fmxr " #_vfp_ ", %0" \
: : "r" (_var_) : "cc"); \
})
#endif #endif

View File

@ -424,7 +424,7 @@ config AMPERE_ERRATUM_AC03_CPU_38
default y default y
help help
This option adds an alternative code sequence to work around Ampere This option adds an alternative code sequence to work around Ampere
erratum AC03_CPU_38 on AmpereOne. errata AC03_CPU_38 and AC04_CPU_10 on AmpereOne.
The affected design reports FEAT_HAFDBS as not implemented in The affected design reports FEAT_HAFDBS as not implemented in
ID_AA64MMFR1_EL1.HAFDBS, but (V)TCR_ELx.{HA,HD} are not RES0 ID_AA64MMFR1_EL1.HAFDBS, but (V)TCR_ELx.{HA,HD} are not RES0

View File

@ -32,7 +32,7 @@
device_type = "memory"; device_type = "memory";
reg = <0x0 0x80000000 0x3da00000>, reg = <0x0 0x80000000 0x3da00000>,
<0x0 0xc0000000 0x40000000>, <0x0 0xc0000000 0x40000000>,
<0x8 0x80000000 0x40000000>; <0x8 0x80000000 0x80000000>;
}; };
gpio-keys { gpio-keys {

View File

@ -731,7 +731,7 @@
opp-900000000-3 { opp-900000000-3 {
opp-hz = /bits/ 64 <900000000>; opp-hz = /bits/ 64 <900000000>;
opp-microvolt = <850000>; opp-microvolt = <850000>;
opp-supported-hw = <0x8>; opp-supported-hw = <0xcf>;
}; };
opp-900000000-4 { opp-900000000-4 {
@ -743,13 +743,13 @@
opp-900000000-5 { opp-900000000-5 {
opp-hz = /bits/ 64 <900000000>; opp-hz = /bits/ 64 <900000000>;
opp-microvolt = <825000>; opp-microvolt = <825000>;
opp-supported-hw = <0x30>; opp-supported-hw = <0x20>;
}; };
opp-950000000-3 { opp-950000000-3 {
opp-hz = /bits/ 64 <950000000>; opp-hz = /bits/ 64 <950000000>;
opp-microvolt = <900000>; opp-microvolt = <900000>;
opp-supported-hw = <0x8>; opp-supported-hw = <0xcf>;
}; };
opp-950000000-4 { opp-950000000-4 {
@ -761,13 +761,13 @@
opp-950000000-5 { opp-950000000-5 {
opp-hz = /bits/ 64 <950000000>; opp-hz = /bits/ 64 <950000000>;
opp-microvolt = <850000>; opp-microvolt = <850000>;
opp-supported-hw = <0x30>; opp-supported-hw = <0x20>;
}; };
opp-1000000000-3 { opp-1000000000-3 {
opp-hz = /bits/ 64 <1000000000>; opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <950000>; opp-microvolt = <950000>;
opp-supported-hw = <0x8>; opp-supported-hw = <0xcf>;
}; };
opp-1000000000-4 { opp-1000000000-4 {
@ -779,7 +779,7 @@
opp-1000000000-5 { opp-1000000000-5 {
opp-hz = /bits/ 64 <1000000000>; opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <875000>; opp-microvolt = <875000>;
opp-supported-hw = <0x30>; opp-supported-hw = <0x20>;
}; };
}; };

View File

@ -1312,6 +1312,7 @@
usb2-lpm-disable; usb2-lpm-disable;
vusb33-supply = <&mt6359_vusb_ldo_reg>; vusb33-supply = <&mt6359_vusb_ldo_reg>;
vbus-supply = <&usb_vbus>; vbus-supply = <&usb_vbus>;
mediatek,u3p-dis-msk = <1>;
}; };
#include <arm/cros-ec-keyboard.dtsi> #include <arm/cros-ec-keyboard.dtsi>

View File

@ -2766,10 +2766,10 @@
compatible = "mediatek,mt8195-dp-intf"; compatible = "mediatek,mt8195-dp-intf";
reg = <0 0x1c015000 0 0x1000>; reg = <0 0x1c015000 0 0x1000>;
interrupts = <GIC_SPI 657 IRQ_TYPE_LEVEL_HIGH 0>; interrupts = <GIC_SPI 657 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&vdosys0 CLK_VDO0_DP_INTF0>, clocks = <&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>,
<&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>, <&vdosys0 CLK_VDO0_DP_INTF0>,
<&apmixedsys CLK_APMIXED_TVDPLL1>; <&apmixedsys CLK_APMIXED_TVDPLL1>;
clock-names = "engine", "pixel", "pll"; clock-names = "pixel", "engine", "pll";
status = "disabled"; status = "disabled";
}; };
@ -3036,10 +3036,10 @@
reg = <0 0x1c113000 0 0x1000>; reg = <0 0x1c113000 0 0x1000>;
interrupts = <GIC_SPI 513 IRQ_TYPE_LEVEL_HIGH 0>; interrupts = <GIC_SPI 513 IRQ_TYPE_LEVEL_HIGH 0>;
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>; power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
clocks = <&vdosys1 CLK_VDO1_DP_INTF0_MM>, clocks = <&vdosys1 CLK_VDO1_DPINTF>,
<&vdosys1 CLK_VDO1_DPINTF>, <&vdosys1 CLK_VDO1_DP_INTF0_MM>,
<&apmixedsys CLK_APMIXED_TVDPLL2>; <&apmixedsys CLK_APMIXED_TVDPLL2>;
clock-names = "engine", "pixel", "pll"; clock-names = "pixel", "engine", "pll";
status = "disabled"; status = "disabled";
}; };

View File

@ -1951,6 +1951,7 @@
reg = <0x0 0x15000000 0x0 0x100000>; reg = <0x0 0x15000000 0x0 0x100000>;
#iommu-cells = <2>; #iommu-cells = <2>;
#global-interrupts = <2>; #global-interrupts = <2>;
dma-coherent;
interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>, interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
@ -2089,6 +2090,7 @@
reg = <0x0 0x15200000 0x0 0x80000>; reg = <0x0 0x15200000 0x0 0x80000>;
#iommu-cells = <2>; #iommu-cells = <2>;
#global-interrupts = <2>; #global-interrupts = <2>;
dma-coherent;
interrupts = <GIC_SPI 920 IRQ_TYPE_LEVEL_HIGH>, interrupts = <GIC_SPI 920 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 921 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 921 IRQ_TYPE_LEVEL_HIGH>,

View File

@ -145,8 +145,8 @@
#interrupt-cells = <3>; #interrupt-cells = <3>;
#address-cells = <0>; #address-cells = <0>;
interrupt-controller; interrupt-controller;
reg = <0x0 0x11900000 0 0x40000>, reg = <0x0 0x11900000 0 0x20000>,
<0x0 0x11940000 0 0x60000>; <0x0 0x11940000 0 0x40000>;
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>; interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
}; };
}; };

View File

@ -997,8 +997,8 @@
#interrupt-cells = <3>; #interrupt-cells = <3>;
#address-cells = <0>; #address-cells = <0>;
interrupt-controller; interrupt-controller;
reg = <0x0 0x11900000 0 0x40000>, reg = <0x0 0x11900000 0 0x20000>,
<0x0 0x11940000 0 0x60000>; <0x0 0x11940000 0 0x40000>;
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>; interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
}; };

View File

@ -1004,8 +1004,8 @@
#interrupt-cells = <3>; #interrupt-cells = <3>;
#address-cells = <0>; #address-cells = <0>;
interrupt-controller; interrupt-controller;
reg = <0x0 0x11900000 0 0x40000>, reg = <0x0 0x11900000 0 0x20000>,
<0x0 0x11940000 0 0x60000>; <0x0 0x11940000 0 0x40000>;
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>; interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
}; };

View File

@ -32,12 +32,12 @@
backlight: edp-backlight { backlight: edp-backlight {
compatible = "pwm-backlight"; compatible = "pwm-backlight";
power-supply = <&vcc_12v>; power-supply = <&vcc_12v>;
pwms = <&pwm0 0 740740 0>; pwms = <&pwm0 0 125000 0>;
}; };
bat: battery { bat: battery {
compatible = "simple-battery"; compatible = "simple-battery";
charge-full-design-microamp-hours = <9800000>; charge-full-design-microamp-hours = <10000000>;
voltage-max-design-microvolt = <4350000>; voltage-max-design-microvolt = <4350000>;
voltage-min-design-microvolt = <3000000>; voltage-min-design-microvolt = <3000000>;
}; };

View File

@ -13,7 +13,7 @@
/ { / {
model = "Hardkernel ODROID-M1"; model = "Hardkernel ODROID-M1";
compatible = "rockchip,rk3568-odroid-m1", "rockchip,rk3568"; compatible = "hardkernel,odroid-m1", "rockchip,rk3568";
aliases { aliases {
ethernet0 = &gmac0; ethernet0 = &gmac0;

View File

@ -123,7 +123,7 @@
no-map; no-map;
}; };
c66_1_dma_memory_region: c66-dma-memory@a6000000 { c66_0_dma_memory_region: c66-dma-memory@a6000000 {
compatible = "shared-dma-pool"; compatible = "shared-dma-pool";
reg = <0x00 0xa6000000 0x00 0x100000>; reg = <0x00 0xa6000000 0x00 0x100000>;
no-map; no-map;
@ -135,7 +135,7 @@
no-map; no-map;
}; };
c66_0_dma_memory_region: c66-dma-memory@a7000000 { c66_1_dma_memory_region: c66-dma-memory@a7000000 {
compatible = "shared-dma-pool"; compatible = "shared-dma-pool";
reg = <0x00 0xa7000000 0x00 0x100000>; reg = <0x00 0xa7000000 0x00 0x100000>;
no-map; no-map;

View File

@ -119,7 +119,7 @@
no-map; no-map;
}; };
c66_1_dma_memory_region: c66-dma-memory@a6000000 { c66_0_dma_memory_region: c66-dma-memory@a6000000 {
compatible = "shared-dma-pool"; compatible = "shared-dma-pool";
reg = <0x00 0xa6000000 0x00 0x100000>; reg = <0x00 0xa6000000 0x00 0x100000>;
no-map; no-map;
@ -131,7 +131,7 @@
no-map; no-map;
}; };
c66_0_dma_memory_region: c66-dma-memory@a7000000 { c66_1_dma_memory_region: c66-dma-memory@a7000000 {
compatible = "shared-dma-pool"; compatible = "shared-dma-pool";
reg = <0x00 0xa7000000 0x00 0x100000>; reg = <0x00 0xa7000000 0x00 0x100000>;
no-map; no-map;

View File

@ -143,6 +143,7 @@
#define APPLE_CPU_PART_M2_AVALANCHE_MAX 0x039 #define APPLE_CPU_PART_M2_AVALANCHE_MAX 0x039
#define AMPERE_CPU_PART_AMPERE1 0xAC3 #define AMPERE_CPU_PART_AMPERE1 0xAC3
#define AMPERE_CPU_PART_AMPERE1A 0xAC4
#define MICROSOFT_CPU_PART_AZURE_COBALT_100 0xD49 /* Based on r0p0 of ARM Neoverse N2 */ #define MICROSOFT_CPU_PART_AZURE_COBALT_100 0xD49 /* Based on r0p0 of ARM Neoverse N2 */
@ -212,6 +213,7 @@
#define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX) #define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX)
#define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX) #define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX)
#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1) #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
#define MIDR_AMPERE1A MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1A)
#define MIDR_MICROSOFT_AZURE_COBALT_100 MIDR_CPU_MODEL(ARM_CPU_IMP_MICROSOFT, MICROSOFT_CPU_PART_AZURE_COBALT_100) #define MIDR_MICROSOFT_AZURE_COBALT_100 MIDR_CPU_MODEL(ARM_CPU_IMP_MICROSOFT, MICROSOFT_CPU_PART_AZURE_COBALT_100)
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */

View File

@ -10,63 +10,63 @@
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
#define ESR_ELx_EC_UNKNOWN (0x00) #define ESR_ELx_EC_UNKNOWN UL(0x00)
#define ESR_ELx_EC_WFx (0x01) #define ESR_ELx_EC_WFx UL(0x01)
/* Unallocated EC: 0x02 */ /* Unallocated EC: 0x02 */
#define ESR_ELx_EC_CP15_32 (0x03) #define ESR_ELx_EC_CP15_32 UL(0x03)
#define ESR_ELx_EC_CP15_64 (0x04) #define ESR_ELx_EC_CP15_64 UL(0x04)
#define ESR_ELx_EC_CP14_MR (0x05) #define ESR_ELx_EC_CP14_MR UL(0x05)
#define ESR_ELx_EC_CP14_LS (0x06) #define ESR_ELx_EC_CP14_LS UL(0x06)
#define ESR_ELx_EC_FP_ASIMD (0x07) #define ESR_ELx_EC_FP_ASIMD UL(0x07)
#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */ #define ESR_ELx_EC_CP10_ID UL(0x08) /* EL2 only */
#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */ #define ESR_ELx_EC_PAC UL(0x09) /* EL2 and above */
/* Unallocated EC: 0x0A - 0x0B */ /* Unallocated EC: 0x0A - 0x0B */
#define ESR_ELx_EC_CP14_64 (0x0C) #define ESR_ELx_EC_CP14_64 UL(0x0C)
#define ESR_ELx_EC_BTI (0x0D) #define ESR_ELx_EC_BTI UL(0x0D)
#define ESR_ELx_EC_ILL (0x0E) #define ESR_ELx_EC_ILL UL(0x0E)
/* Unallocated EC: 0x0F - 0x10 */ /* Unallocated EC: 0x0F - 0x10 */
#define ESR_ELx_EC_SVC32 (0x11) #define ESR_ELx_EC_SVC32 UL(0x11)
#define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */ #define ESR_ELx_EC_HVC32 UL(0x12) /* EL2 only */
#define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */ #define ESR_ELx_EC_SMC32 UL(0x13) /* EL2 and above */
/* Unallocated EC: 0x14 */ /* Unallocated EC: 0x14 */
#define ESR_ELx_EC_SVC64 (0x15) #define ESR_ELx_EC_SVC64 UL(0x15)
#define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */ #define ESR_ELx_EC_HVC64 UL(0x16) /* EL2 and above */
#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */ #define ESR_ELx_EC_SMC64 UL(0x17) /* EL2 and above */
#define ESR_ELx_EC_SYS64 (0x18) #define ESR_ELx_EC_SYS64 UL(0x18)
#define ESR_ELx_EC_SVE (0x19) #define ESR_ELx_EC_SVE UL(0x19)
#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */ #define ESR_ELx_EC_ERET UL(0x1a) /* EL2 only */
/* Unallocated EC: 0x1B */ /* Unallocated EC: 0x1B */
#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */ #define ESR_ELx_EC_FPAC UL(0x1C) /* EL1 and above */
#define ESR_ELx_EC_SME (0x1D) #define ESR_ELx_EC_SME UL(0x1D)
/* Unallocated EC: 0x1E */ /* Unallocated EC: 0x1E */
#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */ #define ESR_ELx_EC_IMP_DEF UL(0x1f) /* EL3 only */
#define ESR_ELx_EC_IABT_LOW (0x20) #define ESR_ELx_EC_IABT_LOW UL(0x20)
#define ESR_ELx_EC_IABT_CUR (0x21) #define ESR_ELx_EC_IABT_CUR UL(0x21)
#define ESR_ELx_EC_PC_ALIGN (0x22) #define ESR_ELx_EC_PC_ALIGN UL(0x22)
/* Unallocated EC: 0x23 */ /* Unallocated EC: 0x23 */
#define ESR_ELx_EC_DABT_LOW (0x24) #define ESR_ELx_EC_DABT_LOW UL(0x24)
#define ESR_ELx_EC_DABT_CUR (0x25) #define ESR_ELx_EC_DABT_CUR UL(0x25)
#define ESR_ELx_EC_SP_ALIGN (0x26) #define ESR_ELx_EC_SP_ALIGN UL(0x26)
#define ESR_ELx_EC_MOPS (0x27) #define ESR_ELx_EC_MOPS UL(0x27)
#define ESR_ELx_EC_FP_EXC32 (0x28) #define ESR_ELx_EC_FP_EXC32 UL(0x28)
/* Unallocated EC: 0x29 - 0x2B */ /* Unallocated EC: 0x29 - 0x2B */
#define ESR_ELx_EC_FP_EXC64 (0x2C) #define ESR_ELx_EC_FP_EXC64 UL(0x2C)
/* Unallocated EC: 0x2D - 0x2E */ /* Unallocated EC: 0x2D - 0x2E */
#define ESR_ELx_EC_SERROR (0x2F) #define ESR_ELx_EC_SERROR UL(0x2F)
#define ESR_ELx_EC_BREAKPT_LOW (0x30) #define ESR_ELx_EC_BREAKPT_LOW UL(0x30)
#define ESR_ELx_EC_BREAKPT_CUR (0x31) #define ESR_ELx_EC_BREAKPT_CUR UL(0x31)
#define ESR_ELx_EC_SOFTSTP_LOW (0x32) #define ESR_ELx_EC_SOFTSTP_LOW UL(0x32)
#define ESR_ELx_EC_SOFTSTP_CUR (0x33) #define ESR_ELx_EC_SOFTSTP_CUR UL(0x33)
#define ESR_ELx_EC_WATCHPT_LOW (0x34) #define ESR_ELx_EC_WATCHPT_LOW UL(0x34)
#define ESR_ELx_EC_WATCHPT_CUR (0x35) #define ESR_ELx_EC_WATCHPT_CUR UL(0x35)
/* Unallocated EC: 0x36 - 0x37 */ /* Unallocated EC: 0x36 - 0x37 */
#define ESR_ELx_EC_BKPT32 (0x38) #define ESR_ELx_EC_BKPT32 UL(0x38)
/* Unallocated EC: 0x39 */ /* Unallocated EC: 0x39 */
#define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */ #define ESR_ELx_EC_VECTOR32 UL(0x3A) /* EL2 only */
/* Unallocated EC: 0x3B */ /* Unallocated EC: 0x3B */
#define ESR_ELx_EC_BRK64 (0x3C) #define ESR_ELx_EC_BRK64 UL(0x3C)
/* Unallocated EC: 0x3D - 0x3F */ /* Unallocated EC: 0x3D - 0x3F */
#define ESR_ELx_EC_MAX (0x3F) #define ESR_ELx_EC_MAX UL(0x3F)
#define ESR_ELx_EC_SHIFT (26) #define ESR_ELx_EC_SHIFT (26)
#define ESR_ELx_EC_WIDTH (6) #define ESR_ELx_EC_WIDTH (6)

View File

@ -312,10 +312,10 @@ struct zt_context {
((sizeof(struct za_context) + (__SVE_VQ_BYTES - 1)) \ ((sizeof(struct za_context) + (__SVE_VQ_BYTES - 1)) \
/ __SVE_VQ_BYTES * __SVE_VQ_BYTES) / __SVE_VQ_BYTES * __SVE_VQ_BYTES)
#define ZA_SIG_REGS_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES)) #define ZA_SIG_REGS_SIZE(vq) (((vq) * __SVE_VQ_BYTES) * ((vq) * __SVE_VQ_BYTES))
#define ZA_SIG_ZAV_OFFSET(vq, n) (ZA_SIG_REGS_OFFSET + \ #define ZA_SIG_ZAV_OFFSET(vq, n) (ZA_SIG_REGS_OFFSET + \
(SVE_SIG_ZREG_SIZE(vq) * n)) (SVE_SIG_ZREG_SIZE(vq) * (n)))
#define ZA_SIG_CONTEXT_SIZE(vq) \ #define ZA_SIG_CONTEXT_SIZE(vq) \
(ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq)) (ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq))
@ -326,7 +326,7 @@ struct zt_context {
#define ZT_SIG_REGS_OFFSET sizeof(struct zt_context) #define ZT_SIG_REGS_OFFSET sizeof(struct zt_context)
#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * n) #define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * (n))
#define ZT_SIG_CONTEXT_SIZE(n) \ #define ZT_SIG_CONTEXT_SIZE(n) \
(sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n)) (sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n))

View File

@ -472,6 +472,14 @@ static const struct midr_range erratum_spec_ssbs_list[] = {
}; };
#endif #endif
#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
static const struct midr_range erratum_ac03_cpu_38_list[] = {
MIDR_ALL_VERSIONS(MIDR_AMPERE1),
MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
{},
};
#endif
const struct arm64_cpu_capabilities arm64_errata[] = { const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
{ {
@ -789,7 +797,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{ {
.desc = "AmpereOne erratum AC03_CPU_38", .desc = "AmpereOne erratum AC03_CPU_38",
.capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38, .capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1), ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list),
}, },
#endif #endif
{ {

View File

@ -426,9 +426,9 @@ out:
return; return;
} }
static __always_inline void do_ffa_mem_xfer(const u64 func_id, static void __do_ffa_mem_xfer(const u64 func_id,
struct arm_smccc_res *res, struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt) struct kvm_cpu_context *ctxt)
{ {
DECLARE_REG(u32, len, ctxt, 1); DECLARE_REG(u32, len, ctxt, 1);
DECLARE_REG(u32, fraglen, ctxt, 2); DECLARE_REG(u32, fraglen, ctxt, 2);
@ -440,9 +440,6 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
u32 offset, nr_ranges; u32 offset, nr_ranges;
int ret = 0; int ret = 0;
BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE &&
func_id != FFA_FN64_MEM_LEND);
if (addr_mbz || npages_mbz || fraglen > len || if (addr_mbz || npages_mbz || fraglen > len ||
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) { fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
ret = FFA_RET_INVALID_PARAMETERS; ret = FFA_RET_INVALID_PARAMETERS;
@ -461,6 +458,11 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
goto out_unlock; goto out_unlock;
} }
if (len > ffa_desc_buf.len) {
ret = FFA_RET_NO_MEMORY;
goto out_unlock;
}
buf = hyp_buffers.tx; buf = hyp_buffers.tx;
memcpy(buf, host_buffers.tx, fraglen); memcpy(buf, host_buffers.tx, fraglen);
@ -512,6 +514,13 @@ err_unshare:
goto out_unlock; goto out_unlock;
} }
#define do_ffa_mem_xfer(fid, res, ctxt) \
do { \
BUILD_BUG_ON((fid) != FFA_FN64_MEM_SHARE && \
(fid) != FFA_FN64_MEM_LEND); \
__do_ffa_mem_xfer((fid), (res), (ctxt)); \
} while (0);
static void do_ffa_mem_reclaim(struct arm_smccc_res *res, static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt) struct kvm_cpu_context *ctxt)
{ {

View File

@ -116,7 +116,7 @@ asmlinkage int m68k_clone(struct pt_regs *regs)
{ {
/* regs will be equal to current_pt_regs() */ /* regs will be equal to current_pt_regs() */
struct kernel_clone_args args = { struct kernel_clone_args args = {
.flags = regs->d1 & ~CSIGNAL, .flags = (u32)(regs->d1) & ~CSIGNAL,
.pidfd = (int __user *)regs->d3, .pidfd = (int __user *)regs->d3,
.child_tid = (int __user *)regs->d4, .child_tid = (int __user *)regs->d4,
.parent_tid = (int __user *)regs->d3, .parent_tid = (int __user *)regs->d3,

View File

@ -96,6 +96,7 @@ config CRYPTO_AES_PPC_SPE
config CRYPTO_AES_GCM_P10 config CRYPTO_AES_GCM_P10
tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)" tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)"
depends on BROKEN
depends on PPC64 && CPU_LITTLE_ENDIAN && VSX depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
select CRYPTO_LIB_AES select CRYPTO_LIB_AES
select CRYPTO_ALGAPI select CRYPTO_ALGAPI

View File

@ -39,6 +39,12 @@
#define STDX_BE stringify_in_c(stdbrx) #define STDX_BE stringify_in_c(stdbrx)
#endif #endif
#ifdef CONFIG_CC_IS_CLANG
#define DS_FORM_CONSTRAINT "Z<>"
#else
#define DS_FORM_CONSTRAINT "YZ<>"
#endif
#else /* 32-bit */ #else /* 32-bit */
/* operations for longs and pointers */ /* operations for longs and pointers */

View File

@ -11,6 +11,7 @@
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/asm-const.h> #include <asm/asm-const.h>
#include <asm/asm-compat.h>
/* /*
* Since *_return_relaxed and {cmp}xchg_relaxed are implemented with * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
@ -197,7 +198,7 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED)) if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
__asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter)); __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
else else
__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter)); __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter));
return t; return t;
} }
@ -208,7 +209,7 @@ static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED)) if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
__asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter)); __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
else else
__asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i)); __asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i));
} }
#define ATOMIC64_OP(op, asm_op) \ #define ATOMIC64_OP(op, asm_op) \

View File

@ -6,6 +6,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/extable.h> #include <asm/extable.h>
#include <asm/kup.h> #include <asm/kup.h>
#include <asm/asm-compat.h>
#ifdef __powerpc64__ #ifdef __powerpc64__
/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
@ -92,12 +93,6 @@ __pu_failed: \
: label) : label)
#endif #endif
#ifdef CONFIG_CC_IS_CLANG
#define DS_FORM_CONSTRAINT "Z<>"
#else
#define DS_FORM_CONSTRAINT "YZ<>"
#endif
#ifdef __powerpc64__ #ifdef __powerpc64__
#ifdef CONFIG_PPC_KERNEL_PREFIXED #ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __put_user_asm2_goto(x, ptr, label) \ #define __put_user_asm2_goto(x, ptr, label) \

View File

@ -41,12 +41,12 @@
#include "head_32.h" #include "head_32.h"
.macro compare_to_kernel_boundary scratch, addr .macro compare_to_kernel_boundary scratch, addr
#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000 #if CONFIG_TASK_SIZE <= 0x80000000 && MODULES_VADDR >= 0x80000000
/* By simply checking Address >= 0x80000000, we know if its a kernel address */ /* By simply checking Address >= 0x80000000, we know if its a kernel address */
not. \scratch, \addr not. \scratch, \addr
#else #else
rlwinm \scratch, \addr, 16, 0xfff8 rlwinm \scratch, \addr, 16, 0xfff8
cmpli cr0, \scratch, PAGE_OFFSET@h cmpli cr0, \scratch, TASK_SIZE@h
#endif #endif
.endm .endm
@ -404,7 +404,7 @@ FixupDAR:/* Entry point for dcbx workaround. */
mfspr r10, SPRN_SRR0 mfspr r10, SPRN_SRR0
mtspr SPRN_MD_EPN, r10 mtspr SPRN_MD_EPN, r10
rlwinm r11, r10, 16, 0xfff8 rlwinm r11, r10, 16, 0xfff8
cmpli cr1, r11, PAGE_OFFSET@h cmpli cr1, r11, TASK_SIZE@h
mfspr r11, SPRN_M_TWB /* Get level 1 table */ mfspr r11, SPRN_M_TWB /* Get level 1 table */
blt+ cr1, 3f blt+ cr1, 3f

View File

@ -38,11 +38,7 @@
.else .else
addi r4, r5, VDSO_DATA_OFFSET addi r4, r5, VDSO_DATA_OFFSET
.endif .endif
#ifdef __powerpc64__
bl CFUNC(DOTSYM(\funct)) bl CFUNC(DOTSYM(\funct))
#else
bl \funct
#endif
PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
#ifdef __powerpc64__ #ifdef __powerpc64__
PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1) PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1)

View File

@ -149,11 +149,11 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
mmu_mapin_immr(); mmu_mapin_immr();
mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true); mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_X, true);
if (debug_pagealloc_enabled_or_kfence()) { if (debug_pagealloc_enabled_or_kfence()) {
top = boundary; top = boundary;
} else { } else {
mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true); mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_X, true);
mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true); mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
} }

View File

@ -10,6 +10,7 @@
#define __KVM_VCPU_RISCV_PMU_H #define __KVM_VCPU_RISCV_PMU_H
#include <linux/perf/riscv_pmu.h> #include <linux/perf/riscv_pmu.h>
#include <asm/kvm_vcpu_insn.h>
#include <asm/sbi.h> #include <asm/sbi.h>
#ifdef CONFIG_RISCV_PMU_SBI #ifdef CONFIG_RISCV_PMU_SBI
@ -57,11 +58,11 @@ struct kvm_pmu {
#if defined(CONFIG_32BIT) #if defined(CONFIG_32BIT)
#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
{.base = CSR_CYCLEH, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, \ {.base = CSR_CYCLEH, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, \
{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, {.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
#else #else
#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, {.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
#endif #endif
int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid); int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid);
@ -92,8 +93,20 @@ void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
struct kvm_pmu { struct kvm_pmu {
}; };
static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num,
unsigned long *val, unsigned long new_val,
unsigned long wr_mask)
{
if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) {
*val = 0;
return KVM_INSN_CONTINUE_NEXT_SEPC;
} else {
return KVM_INSN_ILLEGAL_TRAP;
}
}
#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
{.base = 0, .count = 0, .func = NULL }, {.base = CSR_CYCLE, .count = 3, .func = kvm_riscv_vcpu_pmu_read_legacy },
static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {} static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid) static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)

View File

@ -62,7 +62,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
perf_callchain_store(entry, regs->epc); perf_callchain_store(entry, regs->epc);
fp = user_backtrace(entry, fp, regs->ra); fp = user_backtrace(entry, fp, regs->ra);
while (fp && !(fp & 0x3) && entry->nr < entry->max_stack) while (fp && !(fp & 0x7) && entry->nr < entry->max_stack)
fp = user_backtrace(entry, fp, 0); fp = user_backtrace(entry, fp, 0);
} }

View File

@ -91,8 +91,8 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
run->riscv_sbi.args[3] = cp->a3; run->riscv_sbi.args[3] = cp->a3;
run->riscv_sbi.args[4] = cp->a4; run->riscv_sbi.args[4] = cp->a4;
run->riscv_sbi.args[5] = cp->a5; run->riscv_sbi.args[5] = cp->a5;
run->riscv_sbi.ret[0] = cp->a0; run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
run->riscv_sbi.ret[1] = cp->a1; run->riscv_sbi.ret[1] = 0;
} }
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,

View File

@ -14,6 +14,7 @@
#include <asm/insn.h> #include <asm/insn.h>
#include <asm/insn-eval.h> #include <asm/insn-eval.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/traps.h>
/* MMIO direction */ /* MMIO direction */
#define EPT_READ 0 #define EPT_READ 0
@ -405,6 +406,11 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
return -EINVAL; return -EINVAL;
} }
if (!fault_in_kernel_space(ve->gla)) {
WARN_ONCE(1, "Access to userspace address is not supported");
return -EINVAL;
}
/* /*
* Reject EPT violation #VEs that split pages. * Reject EPT violation #VEs that split pages.
* *

View File

@ -1602,6 +1602,7 @@ static void pt_event_stop(struct perf_event *event, int mode)
* see comment in intel_pt_interrupt(). * see comment in intel_pt_interrupt().
*/ */
WRITE_ONCE(pt->handle_nmi, 0); WRITE_ONCE(pt->handle_nmi, 0);
barrier();
pt_config_stop(event); pt_config_stop(event);
@ -1653,11 +1654,10 @@ static long pt_event_snapshot_aux(struct perf_event *event,
return 0; return 0;
/* /*
* Here, handle_nmi tells us if the tracing is on * There is no PT interrupt in this mode, so stop the trace and it will
* remain stopped while the buffer is copied.
*/ */
if (READ_ONCE(pt->handle_nmi)) pt_config_stop(event);
pt_config_stop(event);
pt_read_offset(buf); pt_read_offset(buf);
pt_update_head(pt); pt_update_head(pt);
@ -1669,11 +1669,10 @@ static long pt_event_snapshot_aux(struct perf_event *event,
ret = perf_output_copy_aux(&pt->handle, handle, from, to); ret = perf_output_copy_aux(&pt->handle, handle, from, to);
/* /*
* If the tracing was on when we turned up, restart it. * Here, handle_nmi tells us if the tracing was on.
* Compiler barrier not needed as we couldn't have been * If the tracing was on, restart it.
* preempted by anything that touches pt->handle_nmi.
*/ */
if (pt->handle_nmi) if (READ_ONCE(pt->handle_nmi))
pt_config_start(event); pt_config_start(event);
return ret; return ret;

View File

@ -165,6 +165,14 @@ void acpi_generic_reduced_hw_init(void);
void x86_default_set_root_pointer(u64 addr); void x86_default_set_root_pointer(u64 addr);
u64 x86_default_get_root_pointer(void); u64 x86_default_get_root_pointer(void);
#ifdef CONFIG_XEN_PV
/* A Xen PV domain needs a special acpi_os_ioremap() handling. */
extern void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys,
acpi_size size);
void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
#define acpi_os_ioremap acpi_os_ioremap
#endif
#else /* !CONFIG_ACPI */ #else /* !CONFIG_ACPI */
#define acpi_lapic 0 #define acpi_lapic 0

View File

@ -63,7 +63,11 @@ extern u64 arch_irq_stat(void);
#define local_softirq_pending_ref pcpu_hot.softirq_pending #define local_softirq_pending_ref pcpu_hot.softirq_pending
#if IS_ENABLED(CONFIG_KVM_INTEL) #if IS_ENABLED(CONFIG_KVM_INTEL)
static inline void kvm_set_cpu_l1tf_flush_l1d(void) /*
* This function is called from noinstr interrupt contexts
* and must be inlined to not get instrumentation.
*/
static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void)
{ {
__this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1); __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
} }
@ -78,7 +82,7 @@ static __always_inline bool kvm_get_cpu_l1tf_flush_l1d(void)
return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d); return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d);
} }
#else /* !IS_ENABLED(CONFIG_KVM_INTEL) */ #else /* !IS_ENABLED(CONFIG_KVM_INTEL) */
static inline void kvm_set_cpu_l1tf_flush_l1d(void) { } static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
#endif /* IS_ENABLED(CONFIG_KVM_INTEL) */ #endif /* IS_ENABLED(CONFIG_KVM_INTEL) */
#endif /* _ASM_X86_HARDIRQ_H */ #endif /* _ASM_X86_HARDIRQ_H */

View File

@ -13,15 +13,18 @@
#include <asm/irq_stack.h> #include <asm/irq_stack.h>
typedef void (*idtentry_t)(struct pt_regs *regs);
/** /**
* DECLARE_IDTENTRY - Declare functions for simple IDT entry points * DECLARE_IDTENTRY - Declare functions for simple IDT entry points
* No error code pushed by hardware * No error code pushed by hardware
* @vector: Vector number (ignored for C) * @vector: Vector number (ignored for C)
* @func: Function name of the entry point * @func: Function name of the entry point
* *
* Declares three functions: * Declares four functions:
* - The ASM entry point: asm_##func * - The ASM entry point: asm_##func
* - The XEN PV trap entry point: xen_##func (maybe unused) * - The XEN PV trap entry point: xen_##func (maybe unused)
* - The C handler called from the FRED event dispatcher (maybe unused)
* - The C handler called from the ASM entry point * - The C handler called from the ASM entry point
* *
* Note: This is the C variant of DECLARE_IDTENTRY(). As the name says it * Note: This is the C variant of DECLARE_IDTENTRY(). As the name says it
@ -31,6 +34,7 @@
#define DECLARE_IDTENTRY(vector, func) \ #define DECLARE_IDTENTRY(vector, func) \
asmlinkage void asm_##func(void); \ asmlinkage void asm_##func(void); \
asmlinkage void xen_asm_##func(void); \ asmlinkage void xen_asm_##func(void); \
void fred_##func(struct pt_regs *regs); \
__visible void func(struct pt_regs *regs) __visible void func(struct pt_regs *regs)
/** /**
@ -137,6 +141,17 @@ static __always_inline void __##func(struct pt_regs *regs, \
#define DEFINE_IDTENTRY_RAW(func) \ #define DEFINE_IDTENTRY_RAW(func) \
__visible noinstr void func(struct pt_regs *regs) __visible noinstr void func(struct pt_regs *regs)
/**
* DEFINE_FREDENTRY_RAW - Emit code for raw FRED entry points
* @func: Function name of the entry point
*
* @func is called from the FRED event dispatcher with interrupts disabled.
*
* See @DEFINE_IDTENTRY_RAW for further details.
*/
#define DEFINE_FREDENTRY_RAW(func) \
noinstr void fred_##func(struct pt_regs *regs)
/** /**
* DECLARE_IDTENTRY_RAW_ERRORCODE - Declare functions for raw IDT entry points * DECLARE_IDTENTRY_RAW_ERRORCODE - Declare functions for raw IDT entry points
* Error code pushed by hardware * Error code pushed by hardware
@ -197,8 +212,8 @@ __visible noinstr void func(struct pt_regs *regs, \
irqentry_state_t state = irqentry_enter(regs); \ irqentry_state_t state = irqentry_enter(regs); \
u32 vector = (u32)(u8)error_code; \ u32 vector = (u32)(u8)error_code; \
\ \
kvm_set_cpu_l1tf_flush_l1d(); \
instrumentation_begin(); \ instrumentation_begin(); \
kvm_set_cpu_l1tf_flush_l1d(); \
run_irq_on_irqstack_cond(__##func, regs, vector); \ run_irq_on_irqstack_cond(__##func, regs, vector); \
instrumentation_end(); \ instrumentation_end(); \
irqentry_exit(regs, state); \ irqentry_exit(regs, state); \
@ -233,17 +248,27 @@ static noinline void __##func(struct pt_regs *regs, u32 vector)
#define DEFINE_IDTENTRY_SYSVEC(func) \ #define DEFINE_IDTENTRY_SYSVEC(func) \
static void __##func(struct pt_regs *regs); \ static void __##func(struct pt_regs *regs); \
\ \
static __always_inline void instr_##func(struct pt_regs *regs) \
{ \
run_sysvec_on_irqstack_cond(__##func, regs); \
} \
\
__visible noinstr void func(struct pt_regs *regs) \ __visible noinstr void func(struct pt_regs *regs) \
{ \ { \
irqentry_state_t state = irqentry_enter(regs); \ irqentry_state_t state = irqentry_enter(regs); \
\ \
kvm_set_cpu_l1tf_flush_l1d(); \
instrumentation_begin(); \ instrumentation_begin(); \
kvm_set_cpu_l1tf_flush_l1d(); \ instr_##func (regs); \
run_sysvec_on_irqstack_cond(__##func, regs); \
instrumentation_end(); \ instrumentation_end(); \
irqentry_exit(regs, state); \ irqentry_exit(regs, state); \
} \ } \
\ \
void fred_##func(struct pt_regs *regs) \
{ \
instr_##func (regs); \
} \
\
static noinline void __##func(struct pt_regs *regs) static noinline void __##func(struct pt_regs *regs)
/** /**
@ -260,19 +285,29 @@ static noinline void __##func(struct pt_regs *regs)
#define DEFINE_IDTENTRY_SYSVEC_SIMPLE(func) \ #define DEFINE_IDTENTRY_SYSVEC_SIMPLE(func) \
static __always_inline void __##func(struct pt_regs *regs); \ static __always_inline void __##func(struct pt_regs *regs); \
\ \
static __always_inline void instr_##func(struct pt_regs *regs) \
{ \
__irq_enter_raw(); \
__##func (regs); \
__irq_exit_raw(); \
} \
\
__visible noinstr void func(struct pt_regs *regs) \ __visible noinstr void func(struct pt_regs *regs) \
{ \ { \
irqentry_state_t state = irqentry_enter(regs); \ irqentry_state_t state = irqentry_enter(regs); \
\ \
kvm_set_cpu_l1tf_flush_l1d(); \
instrumentation_begin(); \ instrumentation_begin(); \
__irq_enter_raw(); \ instr_##func (regs); \
kvm_set_cpu_l1tf_flush_l1d(); \
__##func (regs); \
__irq_exit_raw(); \
instrumentation_end(); \ instrumentation_end(); \
irqentry_exit(regs, state); \ irqentry_exit(regs, state); \
} \ } \
\ \
void fred_##func(struct pt_regs *regs) \
{ \
instr_##func (regs); \
} \
\
static __always_inline void __##func(struct pt_regs *regs) static __always_inline void __##func(struct pt_regs *regs)
/** /**
@ -410,15 +445,18 @@ __visible noinstr void func(struct pt_regs *regs, \
/* C-Code mapping */ /* C-Code mapping */
#define DECLARE_IDTENTRY_NMI DECLARE_IDTENTRY_RAW #define DECLARE_IDTENTRY_NMI DECLARE_IDTENTRY_RAW
#define DEFINE_IDTENTRY_NMI DEFINE_IDTENTRY_RAW #define DEFINE_IDTENTRY_NMI DEFINE_IDTENTRY_RAW
#define DEFINE_FREDENTRY_NMI DEFINE_FREDENTRY_RAW
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#define DECLARE_IDTENTRY_MCE DECLARE_IDTENTRY_IST #define DECLARE_IDTENTRY_MCE DECLARE_IDTENTRY_IST
#define DEFINE_IDTENTRY_MCE DEFINE_IDTENTRY_IST #define DEFINE_IDTENTRY_MCE DEFINE_IDTENTRY_IST
#define DEFINE_IDTENTRY_MCE_USER DEFINE_IDTENTRY_NOIST #define DEFINE_IDTENTRY_MCE_USER DEFINE_IDTENTRY_NOIST
#define DEFINE_FREDENTRY_MCE DEFINE_FREDENTRY_RAW
#define DECLARE_IDTENTRY_DEBUG DECLARE_IDTENTRY_IST #define DECLARE_IDTENTRY_DEBUG DECLARE_IDTENTRY_IST
#define DEFINE_IDTENTRY_DEBUG DEFINE_IDTENTRY_IST #define DEFINE_IDTENTRY_DEBUG DEFINE_IDTENTRY_IST
#define DEFINE_IDTENTRY_DEBUG_USER DEFINE_IDTENTRY_NOIST #define DEFINE_IDTENTRY_DEBUG_USER DEFINE_IDTENTRY_NOIST
#define DEFINE_FREDENTRY_DEBUG DEFINE_FREDENTRY_RAW
#endif #endif
#else /* !__ASSEMBLY__ */ #else /* !__ASSEMBLY__ */
@ -655,23 +693,36 @@ DECLARE_IDTENTRY(RESCHEDULE_VECTOR, sysvec_reschedule_ipi);
DECLARE_IDTENTRY_SYSVEC(REBOOT_VECTOR, sysvec_reboot); DECLARE_IDTENTRY_SYSVEC(REBOOT_VECTOR, sysvec_reboot);
DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_SINGLE_VECTOR, sysvec_call_function_single); DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_SINGLE_VECTOR, sysvec_call_function_single);
DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_VECTOR, sysvec_call_function); DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_VECTOR, sysvec_call_function);
#else
# define fred_sysvec_reschedule_ipi NULL
# define fred_sysvec_reboot NULL
# define fred_sysvec_call_function_single NULL
# define fred_sysvec_call_function NULL
#endif #endif
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
# ifdef CONFIG_X86_MCE_THRESHOLD # ifdef CONFIG_X86_MCE_THRESHOLD
DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR, sysvec_threshold); DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR, sysvec_threshold);
# else
# define fred_sysvec_threshold NULL
# endif # endif
# ifdef CONFIG_X86_MCE_AMD # ifdef CONFIG_X86_MCE_AMD
DECLARE_IDTENTRY_SYSVEC(DEFERRED_ERROR_VECTOR, sysvec_deferred_error); DECLARE_IDTENTRY_SYSVEC(DEFERRED_ERROR_VECTOR, sysvec_deferred_error);
# else
# define fred_sysvec_deferred_error NULL
# endif # endif
# ifdef CONFIG_X86_THERMAL_VECTOR # ifdef CONFIG_X86_THERMAL_VECTOR
DECLARE_IDTENTRY_SYSVEC(THERMAL_APIC_VECTOR, sysvec_thermal); DECLARE_IDTENTRY_SYSVEC(THERMAL_APIC_VECTOR, sysvec_thermal);
# else
# define fred_sysvec_thermal NULL
# endif # endif
# ifdef CONFIG_IRQ_WORK # ifdef CONFIG_IRQ_WORK
DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR, sysvec_irq_work); DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR, sysvec_irq_work);
# else
# define fred_sysvec_irq_work NULL
# endif # endif
#endif #endif
@ -679,12 +730,16 @@ DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR, sysvec_irq_work);
DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_VECTOR, sysvec_kvm_posted_intr_ipi); DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_VECTOR, sysvec_kvm_posted_intr_ipi);
DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_WAKEUP_VECTOR, sysvec_kvm_posted_intr_wakeup_ipi); DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_WAKEUP_VECTOR, sysvec_kvm_posted_intr_wakeup_ipi);
DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR, sysvec_kvm_posted_intr_nested_ipi); DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR, sysvec_kvm_posted_intr_nested_ipi);
#else
# define fred_sysvec_kvm_posted_intr_ipi NULL
# define fred_sysvec_kvm_posted_intr_wakeup_ipi NULL
# define fred_sysvec_kvm_posted_intr_nested_ipi NULL
#endif #endif
#if IS_ENABLED(CONFIG_HYPERV) #if IS_ENABLED(CONFIG_HYPERV)
DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_hyperv_callback); DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_hyperv_callback);
DECLARE_IDTENTRY_SYSVEC(HYPERV_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment); DECLARE_IDTENTRY_SYSVEC(HYPERV_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment);
DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0); DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0);
#endif #endif
#if IS_ENABLED(CONFIG_ACRN_GUEST) #if IS_ENABLED(CONFIG_ACRN_GUEST)

View File

@ -1901,3 +1901,14 @@ u64 x86_default_get_root_pointer(void)
{ {
return boot_params.acpi_rsdp_addr; return boot_params.acpi_rsdp_addr;
} }
#ifdef CONFIG_XEN_PV
void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
{
return ioremap_cache(phys, size);
}
void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys, acpi_size size) =
x86_acpi_os_ioremap;
EXPORT_SYMBOL_GPL(acpi_os_ioremap);
#endif

View File

@ -474,24 +474,25 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void)
{ {
struct sgx_epc_page *page; struct sgx_epc_page *page;
int nid_of_current = numa_node_id(); int nid_of_current = numa_node_id();
int nid = nid_of_current; int nid_start, nid;
if (node_isset(nid_of_current, sgx_numa_mask)) { /*
page = __sgx_alloc_epc_page_from_node(nid_of_current); * Try local node first. If it doesn't have an EPC section,
if (page) * fall back to the non-local NUMA nodes.
return page; */
} if (node_isset(nid_of_current, sgx_numa_mask))
nid_start = nid_of_current;
/* Fall back to the non-local NUMA nodes: */ else
while (true) { nid_start = next_node_in(nid_of_current, sgx_numa_mask);
nid = next_node_in(nid, sgx_numa_mask);
if (nid == nid_of_current)
break;
nid = nid_start;
do {
page = __sgx_alloc_epc_page_from_node(nid); page = __sgx_alloc_epc_page_from_node(nid);
if (page) if (page)
return page; return page;
}
nid = next_node_in(nid, sgx_numa_mask);
} while (nid != nid_start);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }

View File

@ -12,6 +12,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/serial_8250.h> #include <linux/serial_8250.h>
#include <linux/acpi.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/io_apic.h> #include <asm/io_apic.h>
#include <asm/acpi.h> #include <asm/acpi.h>

View File

@ -9,6 +9,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/dmi.h> #include <linux/dmi.h>
#include <linux/range.h> #include <linux/range.h>
#include <linux/acpi.h>
#include <asm/pci-direct.h> #include <asm/pci-direct.h>
#include <linux/sort.h> #include <linux/sort.h>

View File

@ -750,6 +750,27 @@ static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
#define LAM_U57_BITS 6 #define LAM_U57_BITS 6
static void enable_lam_func(void *__mm)
{
struct mm_struct *mm = __mm;
if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm) {
write_cr3(__read_cr3() | mm->context.lam_cr3_mask);
set_tlbstate_lam_mode(mm);
}
}
static void mm_enable_lam(struct mm_struct *mm)
{
/*
* Even though the process must still be single-threaded at this
* point, kernel threads may be using the mm. IPI those kernel
* threads if they exist.
*/
on_each_cpu_mask(mm_cpumask(mm), enable_lam_func, mm, true);
set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags);
}
static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits) static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
{ {
if (!cpu_feature_enabled(X86_FEATURE_LAM)) if (!cpu_feature_enabled(X86_FEATURE_LAM))
@ -766,6 +787,10 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
if (mmap_write_lock_killable(mm)) if (mmap_write_lock_killable(mm))
return -EINTR; return -EINTR;
/*
* MM_CONTEXT_LOCK_LAM is set on clone. Prevent LAM from
* being enabled unless the process is single threaded:
*/
if (test_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags)) { if (test_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags)) {
mmap_write_unlock(mm); mmap_write_unlock(mm);
return -EBUSY; return -EBUSY;
@ -782,9 +807,7 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
return -EINVAL; return -EINVAL;
} }
write_cr3(__read_cr3() | mm->context.lam_cr3_mask); mm_enable_lam(mm);
set_tlbstate_lam_mode(mm);
set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags);
mmap_write_unlock(mm); mmap_write_unlock(mm);

View File

@ -60,6 +60,7 @@
#include <linux/stackprotector.h> #include <linux/stackprotector.h>
#include <linux/cpuhotplug.h> #include <linux/cpuhotplug.h>
#include <linux/mc146818rtc.h> #include <linux/mc146818rtc.h>
#include <linux/acpi.h>
#include <asm/acpi.h> #include <asm/acpi.h>
#include <asm/cacheinfo.h> #include <asm/cacheinfo.h>

View File

@ -8,6 +8,7 @@
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/acpi.h>
#include <asm/acpi.h> #include <asm/acpi.h>
#include <asm/bios_ebda.h> #include <asm/bios_ebda.h>

View File

@ -2443,6 +2443,29 @@ void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
} }
EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi); EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
#define X2APIC_ICR_RESERVED_BITS (GENMASK_ULL(31, 20) | GENMASK_ULL(17, 16) | BIT(13))
int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
{
if (data & X2APIC_ICR_RESERVED_BITS)
return 1;
/*
* The BUSY bit is reserved on both Intel and AMD in x2APIC mode, but
* only AMD requires it to be zero, Intel essentially just ignores the
* bit. And if IPI virtualization (Intel) or x2AVIC (AMD) is enabled,
* the CPU performs the reserved bits checks, i.e. the underlying CPU
* behavior will "win". Arbitrarily clear the BUSY bit, as there is no
* sane way to provide consistent behavior with respect to hardware.
*/
data &= ~APIC_ICR_BUSY;
kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
kvm_lapic_set_reg64(apic, APIC_ICR, data);
trace_kvm_apic_write(APIC_ICR, data);
return 0;
}
/* emulate APIC access in a trap manner */ /* emulate APIC access in a trap manner */
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
{ {
@ -2460,7 +2483,7 @@ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
* maybe-unecessary write, and both are in the noise anyways. * maybe-unecessary write, and both are in the noise anyways.
*/ */
if (apic_x2apic_mode(apic) && offset == APIC_ICR) if (apic_x2apic_mode(apic) && offset == APIC_ICR)
kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR)); WARN_ON_ONCE(kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR)));
else else
kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset)); kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
} }
@ -3153,16 +3176,6 @@ int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
return 0; return 0;
} }
int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
{
data &= ~APIC_ICR_BUSY;
kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
kvm_lapic_set_reg64(apic, APIC_ICR, data);
trace_kvm_apic_write(APIC_ICR, data);
return 0;
}
static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data) static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
{ {
u32 low; u32 low;

View File

@ -497,9 +497,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
{ {
struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm); struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
unsigned long new_lam = mm_lam_cr3_mask(next);
bool was_lazy = this_cpu_read(cpu_tlbstate_shared.is_lazy); bool was_lazy = this_cpu_read(cpu_tlbstate_shared.is_lazy);
unsigned cpu = smp_processor_id(); unsigned cpu = smp_processor_id();
unsigned long new_lam;
u64 next_tlb_gen; u64 next_tlb_gen;
bool need_flush; bool need_flush;
u16 new_asid; u16 new_asid;
@ -622,9 +622,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
cpumask_clear_cpu(cpu, mm_cpumask(real_prev)); cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
} }
/* /* Start receiving IPIs and then read tlb_gen (and LAM below) */
* Start remote flushes and then read tlb_gen.
*/
if (next != &init_mm) if (next != &init_mm)
cpumask_set_cpu(cpu, mm_cpumask(next)); cpumask_set_cpu(cpu, mm_cpumask(next));
next_tlb_gen = atomic64_read(&next->context.tlb_gen); next_tlb_gen = atomic64_read(&next->context.tlb_gen);
@ -636,6 +634,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
barrier(); barrier();
} }
new_lam = mm_lam_cr3_mask(next);
set_tlbstate_lam_mode(next); set_tlbstate_lam_mode(next);
if (need_flush) { if (need_flush) {
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);

View File

@ -980,7 +980,7 @@ static void amd_rp_pme_suspend(struct pci_dev *dev)
return; return;
rp = pcie_find_root_port(dev); rp = pcie_find_root_port(dev);
if (!rp->pm_cap) if (!rp || !rp->pm_cap)
return; return;
rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >> rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >>
@ -994,7 +994,7 @@ static void amd_rp_pme_resume(struct pci_dev *dev)
u16 pmc; u16 pmc;
rp = pcie_find_root_port(dev); rp = pcie_find_root_port(dev);
if (!rp->pm_cap) if (!rp || !rp->pm_cap)
return; return;
pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc); pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc);

View File

@ -2019,10 +2019,7 @@ void __init xen_reserve_special_pages(void)
void __init xen_pt_check_e820(void) void __init xen_pt_check_e820(void)
{ {
if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) { xen_chk_is_e820_usable(xen_pt_base, xen_pt_size, "page table");
xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
BUG();
}
} }
static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;

View File

@ -70,6 +70,7 @@
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/acpi.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/setup.h> #include <asm/setup.h>
@ -80,6 +81,7 @@
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#include <xen/balloon.h> #include <xen/balloon.h>
#include <xen/grant_table.h> #include <xen/grant_table.h>
#include <xen/hvc-console.h>
#include "multicalls.h" #include "multicalls.h"
#include "xen-ops.h" #include "xen-ops.h"
@ -794,6 +796,102 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
return ret; return ret;
} }
/* Remapped non-RAM areas */
#define NR_NONRAM_REMAP 4
static struct nonram_remap {
phys_addr_t maddr;
phys_addr_t paddr;
size_t size;
} xen_nonram_remap[NR_NONRAM_REMAP] __ro_after_init;
static unsigned int nr_nonram_remap __ro_after_init;
/*
* Do the real remapping of non-RAM regions as specified in the
* xen_nonram_remap[] array.
* In case of an error just crash the system.
*/
void __init xen_do_remap_nonram(void)
{
unsigned int i;
unsigned int remapped = 0;
const struct nonram_remap *remap = xen_nonram_remap;
unsigned long pfn, mfn, end_pfn;
for (i = 0; i < nr_nonram_remap; i++) {
end_pfn = PFN_UP(remap->paddr + remap->size);
pfn = PFN_DOWN(remap->paddr);
mfn = PFN_DOWN(remap->maddr);
while (pfn < end_pfn) {
if (!set_phys_to_machine(pfn, mfn))
panic("Failed to set p2m mapping for pfn=%lx mfn=%lx\n",
pfn, mfn);
pfn++;
mfn++;
remapped++;
}
remap++;
}
pr_info("Remapped %u non-RAM page(s)\n", remapped);
}
#ifdef CONFIG_ACPI
/*
* Xen variant of acpi_os_ioremap() taking potentially remapped non-RAM
* regions into account.
* Any attempt to map an area crossing a remap boundary will produce a
* WARN() splat.
* phys is related to remap->maddr on input and will be rebased to remap->paddr.
*/
static void __iomem *xen_acpi_os_ioremap(acpi_physical_address phys,
acpi_size size)
{
unsigned int i;
const struct nonram_remap *remap = xen_nonram_remap;
for (i = 0; i < nr_nonram_remap; i++) {
if (phys + size > remap->maddr &&
phys < remap->maddr + remap->size) {
WARN_ON(phys < remap->maddr ||
phys + size > remap->maddr + remap->size);
phys += remap->paddr - remap->maddr;
break;
}
}
return x86_acpi_os_ioremap(phys, size);
}
#endif /* CONFIG_ACPI */
/*
* Add a new non-RAM remap entry.
* In case of no free entry found, just crash the system.
*/
void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr,
unsigned long size)
{
BUG_ON((maddr & ~PAGE_MASK) != (paddr & ~PAGE_MASK));
if (nr_nonram_remap == NR_NONRAM_REMAP) {
xen_raw_console_write("Number of required E820 entry remapping actions exceed maximum value\n");
BUG();
}
#ifdef CONFIG_ACPI
/* Switch to the Xen acpi_os_ioremap() variant. */
if (nr_nonram_remap == 0)
acpi_os_ioremap = xen_acpi_os_ioremap;
#endif
xen_nonram_remap[nr_nonram_remap].maddr = maddr;
xen_nonram_remap[nr_nonram_remap].paddr = paddr;
xen_nonram_remap[nr_nonram_remap].size = size;
nr_nonram_remap++;
}
#ifdef CONFIG_XEN_DEBUG_FS #ifdef CONFIG_XEN_DEBUG_FS
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include "debugfs.h" #include "debugfs.h"

View File

@ -15,12 +15,12 @@
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/memory_hotplug.h> #include <linux/memory_hotplug.h>
#include <linux/acpi.h>
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/vdso.h> #include <asm/vdso.h>
#include <asm/e820/api.h> #include <asm/e820/api.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/acpi.h>
#include <asm/numa.h> #include <asm/numa.h>
#include <asm/idtentry.h> #include <asm/idtentry.h>
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
@ -47,6 +47,9 @@ bool xen_pv_pci_possible;
/* E820 map used during setting up memory. */ /* E820 map used during setting up memory. */
static struct e820_table xen_e820_table __initdata; static struct e820_table xen_e820_table __initdata;
/* Number of initially usable memory pages. */
static unsigned long ini_nr_pages __initdata;
/* /*
* Buffer used to remap identity mapped pages. We only need the virtual space. * Buffer used to remap identity mapped pages. We only need the virtual space.
* The physical page behind this address is remapped as needed to different * The physical page behind this address is remapped as needed to different
@ -213,7 +216,7 @@ static int __init xen_free_mfn(unsigned long mfn)
* as a fallback if the remapping fails. * as a fallback if the remapping fails.
*/ */
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
unsigned long end_pfn, unsigned long nr_pages) unsigned long end_pfn)
{ {
unsigned long pfn, end; unsigned long pfn, end;
int ret; int ret;
@ -221,7 +224,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
WARN_ON(start_pfn > end_pfn); WARN_ON(start_pfn > end_pfn);
/* Release pages first. */ /* Release pages first. */
end = min(end_pfn, nr_pages); end = min(end_pfn, ini_nr_pages);
for (pfn = start_pfn; pfn < end; pfn++) { for (pfn = start_pfn; pfn < end; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn); unsigned long mfn = pfn_to_mfn(pfn);
@ -342,15 +345,14 @@ static void __init xen_do_set_identity_and_remap_chunk(
* to Xen and not remapped. * to Xen and not remapped.
*/ */
static unsigned long __init xen_set_identity_and_remap_chunk( static unsigned long __init xen_set_identity_and_remap_chunk(
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn)
unsigned long remap_pfn)
{ {
unsigned long pfn; unsigned long pfn;
unsigned long i = 0; unsigned long i = 0;
unsigned long n = end_pfn - start_pfn; unsigned long n = end_pfn - start_pfn;
if (remap_pfn == 0) if (remap_pfn == 0)
remap_pfn = nr_pages; remap_pfn = ini_nr_pages;
while (i < n) { while (i < n) {
unsigned long cur_pfn = start_pfn + i; unsigned long cur_pfn = start_pfn + i;
@ -359,19 +361,19 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
unsigned long remap_range_size; unsigned long remap_range_size;
/* Do not remap pages beyond the current allocation */ /* Do not remap pages beyond the current allocation */
if (cur_pfn >= nr_pages) { if (cur_pfn >= ini_nr_pages) {
/* Identity map remaining pages */ /* Identity map remaining pages */
set_phys_range_identity(cur_pfn, cur_pfn + size); set_phys_range_identity(cur_pfn, cur_pfn + size);
break; break;
} }
if (cur_pfn + size > nr_pages) if (cur_pfn + size > ini_nr_pages)
size = nr_pages - cur_pfn; size = ini_nr_pages - cur_pfn;
remap_range_size = xen_find_pfn_range(&remap_pfn); remap_range_size = xen_find_pfn_range(&remap_pfn);
if (!remap_range_size) { if (!remap_range_size) {
pr_warn("Unable to find available pfn range, not remapping identity pages\n"); pr_warn("Unable to find available pfn range, not remapping identity pages\n");
xen_set_identity_and_release_chunk(cur_pfn, xen_set_identity_and_release_chunk(cur_pfn,
cur_pfn + left, nr_pages); cur_pfn + left);
break; break;
} }
/* Adjust size to fit in current e820 RAM region */ /* Adjust size to fit in current e820 RAM region */
@ -398,18 +400,18 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
} }
static unsigned long __init xen_count_remap_pages( static unsigned long __init xen_count_remap_pages(
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, unsigned long start_pfn, unsigned long end_pfn,
unsigned long remap_pages) unsigned long remap_pages)
{ {
if (start_pfn >= nr_pages) if (start_pfn >= ini_nr_pages)
return remap_pages; return remap_pages;
return remap_pages + min(end_pfn, nr_pages) - start_pfn; return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn;
} }
static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, static unsigned long __init xen_foreach_remap_area(
unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn, unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
unsigned long nr_pages, unsigned long last_val)) unsigned long last_val))
{ {
phys_addr_t start = 0; phys_addr_t start = 0;
unsigned long ret_val = 0; unsigned long ret_val = 0;
@ -437,8 +439,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
end_pfn = PFN_UP(entry->addr); end_pfn = PFN_UP(entry->addr);
if (start_pfn < end_pfn) if (start_pfn < end_pfn)
ret_val = func(start_pfn, end_pfn, nr_pages, ret_val = func(start_pfn, end_pfn, ret_val);
ret_val);
start = end; start = end;
} }
} }
@ -495,6 +496,8 @@ void __init xen_remap_memory(void)
set_pte_mfn(buf, mfn_save, PAGE_KERNEL); set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
pr_info("Remapped %ld page(s)\n", remapped); pr_info("Remapped %ld page(s)\n", remapped);
xen_do_remap_nonram();
} }
static unsigned long __init xen_get_pages_limit(void) static unsigned long __init xen_get_pages_limit(void)
@ -568,7 +571,7 @@ static void __init xen_ignore_unusable(void)
} }
} }
bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) static bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
{ {
struct e820_entry *entry; struct e820_entry *entry;
unsigned mapcnt; unsigned mapcnt;
@ -625,6 +628,111 @@ phys_addr_t __init xen_find_free_area(phys_addr_t size)
return 0; return 0;
} }
/*
* Swap a non-RAM E820 map entry with RAM above ini_nr_pages.
* Note that the E820 map is modified accordingly, but the P2M map isn't yet.
* The adaption of the P2M must be deferred until page allocation is possible.
*/
static void __init xen_e820_swap_entry_with_ram(struct e820_entry *swap_entry)
{
struct e820_entry *entry;
unsigned int mapcnt;
phys_addr_t mem_end = PFN_PHYS(ini_nr_pages);
phys_addr_t swap_addr, swap_size, entry_end;
swap_addr = PAGE_ALIGN_DOWN(swap_entry->addr);
swap_size = PAGE_ALIGN(swap_entry->addr - swap_addr + swap_entry->size);
entry = xen_e820_table.entries;
for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
entry_end = entry->addr + entry->size;
if (entry->type == E820_TYPE_RAM && entry->size >= swap_size &&
entry_end - swap_size >= mem_end) {
/* Reduce RAM entry by needed space (whole pages). */
entry->size -= swap_size;
/* Add new entry at the end of E820 map. */
entry = xen_e820_table.entries +
xen_e820_table.nr_entries;
xen_e820_table.nr_entries++;
/* Fill new entry (keep size and page offset). */
entry->type = swap_entry->type;
entry->addr = entry_end - swap_size +
swap_addr - swap_entry->addr;
entry->size = swap_entry->size;
/* Convert old entry to RAM, align to pages. */
swap_entry->type = E820_TYPE_RAM;
swap_entry->addr = swap_addr;
swap_entry->size = swap_size;
/* Remember PFN<->MFN relation for P2M update. */
xen_add_remap_nonram(swap_addr, entry_end - swap_size,
swap_size);
/* Order E820 table and merge entries. */
e820__update_table(&xen_e820_table);
return;
}
entry++;
}
xen_raw_console_write("No suitable area found for required E820 entry remapping action\n");
BUG();
}
/*
* Look for non-RAM memory types in a specific guest physical area and move
* those away if possible (ACPI NVS only for now).
*/
static void __init xen_e820_resolve_conflicts(phys_addr_t start,
phys_addr_t size)
{
struct e820_entry *entry;
unsigned int mapcnt;
phys_addr_t end;
if (!size)
return;
end = start + size;
entry = xen_e820_table.entries;
for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
if (entry->addr >= end)
return;
if (entry->addr + entry->size > start &&
entry->type == E820_TYPE_NVS)
xen_e820_swap_entry_with_ram(entry);
entry++;
}
}
/*
* Check for an area in physical memory to be usable for non-movable purposes.
* An area is considered to usable if the used E820 map lists it to be RAM or
* some other type which can be moved to higher PFNs while keeping the MFNs.
* In case the area is not usable, crash the system with an error message.
*/
void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size,
const char *component)
{
xen_e820_resolve_conflicts(start, size);
if (!xen_is_e820_reserved(start, size))
return;
xen_raw_console_write("Xen hypervisor allocated ");
xen_raw_console_write(component);
xen_raw_console_write(" memory conflicts with E820 map\n");
BUG();
}
/* /*
* Like memcpy, but with physical addresses for dest and src. * Like memcpy, but with physical addresses for dest and src.
*/ */
@ -684,20 +792,20 @@ static void __init xen_reserve_xen_mfnlist(void)
**/ **/
char * __init xen_memory_setup(void) char * __init xen_memory_setup(void)
{ {
unsigned long max_pfn, pfn_s, n_pfns; unsigned long pfn_s, n_pfns;
phys_addr_t mem_end, addr, size, chunk_size; phys_addr_t mem_end, addr, size, chunk_size;
u32 type; u32 type;
int rc; int rc;
struct xen_memory_map memmap; struct xen_memory_map memmap;
unsigned long max_pages; unsigned long max_pages;
unsigned long extra_pages = 0; unsigned long extra_pages = 0;
unsigned long maxmem_pages;
int i; int i;
int op; int op;
xen_parse_512gb(); xen_parse_512gb();
max_pfn = xen_get_pages_limit(); ini_nr_pages = min(xen_get_pages_limit(), xen_start_info->nr_pages);
max_pfn = min(max_pfn, xen_start_info->nr_pages); mem_end = PFN_PHYS(ini_nr_pages);
mem_end = PFN_PHYS(max_pfn);
memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries); memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
set_xen_guest_handle(memmap.buffer, xen_e820_table.entries); set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
@ -747,13 +855,35 @@ char * __init xen_memory_setup(void)
/* Make sure the Xen-supplied memory map is well-ordered. */ /* Make sure the Xen-supplied memory map is well-ordered. */
e820__update_table(&xen_e820_table); e820__update_table(&xen_e820_table);
/*
* Check whether the kernel itself conflicts with the target E820 map.
* Failing now is better than running into weird problems later due
* to relocating (and even reusing) pages with kernel text or data.
*/
xen_chk_is_e820_usable(__pa_symbol(_text),
__pa_symbol(_end) - __pa_symbol(_text),
"kernel");
/*
* Check for a conflict of the xen_start_info memory with the target
* E820 map.
*/
xen_chk_is_e820_usable(__pa(xen_start_info), sizeof(*xen_start_info),
"xen_start_info");
/*
* Check for a conflict of the hypervisor supplied page tables with
* the target E820 map.
*/
xen_pt_check_e820();
max_pages = xen_get_max_pages(); max_pages = xen_get_max_pages();
/* How many extra pages do we need due to remapping? */ /* How many extra pages do we need due to remapping? */
max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages); max_pages += xen_foreach_remap_area(xen_count_remap_pages);
if (max_pages > max_pfn) if (max_pages > ini_nr_pages)
extra_pages += max_pages - max_pfn; extra_pages += max_pages - ini_nr_pages;
/* /*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
@ -762,8 +892,8 @@ char * __init xen_memory_setup(void)
* Make sure we have no memory above max_pages, as this area * Make sure we have no memory above max_pages, as this area
* isn't handled by the p2m management. * isn't handled by the p2m management.
*/ */
extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), maxmem_pages = EXTRA_MEM_RATIO * min(ini_nr_pages, PFN_DOWN(MAXMEM));
extra_pages, max_pages - max_pfn); extra_pages = min3(maxmem_pages, extra_pages, max_pages - ini_nr_pages);
i = 0; i = 0;
addr = xen_e820_table.entries[0].addr; addr = xen_e820_table.entries[0].addr;
size = xen_e820_table.entries[0].size; size = xen_e820_table.entries[0].size;
@ -819,23 +949,6 @@ char * __init xen_memory_setup(void)
e820__update_table(e820_table); e820__update_table(e820_table);
/*
* Check whether the kernel itself conflicts with the target E820 map.
* Failing now is better than running into weird problems later due
* to relocating (and even reusing) pages with kernel text or data.
*/
if (xen_is_e820_reserved(__pa_symbol(_text),
__pa_symbol(__bss_stop) - __pa_symbol(_text))) {
xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
BUG();
}
/*
* Check for a conflict of the hypervisor supplied page tables with
* the target E820 map.
*/
xen_pt_check_e820();
xen_reserve_xen_mfnlist(); xen_reserve_xen_mfnlist();
/* Check for a conflict of the initrd with the target E820 map. */ /* Check for a conflict of the initrd with the target E820 map. */
@ -863,7 +976,7 @@ char * __init xen_memory_setup(void)
* Set identity map on non-RAM pages and prepare remapping the * Set identity map on non-RAM pages and prepare remapping the
* underlying RAM. * underlying RAM.
*/ */
xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk); xen_foreach_remap_area(xen_set_identity_and_remap_chunk);
pr_info("Released %ld page(s)\n", xen_released_pages); pr_info("Released %ld page(s)\n", xen_released_pages);

View File

@ -43,8 +43,12 @@ void xen_mm_unpin_all(void);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
void __init xen_relocate_p2m(void); void __init xen_relocate_p2m(void);
#endif #endif
void __init xen_do_remap_nonram(void);
void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr,
unsigned long size);
bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size); void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size,
const char *component);
unsigned long __ref xen_chk_extra_mem(unsigned long pfn); unsigned long __ref xen_chk_extra_mem(unsigned long pfn);
void __init xen_inv_extra_mem(void); void __init xen_inv_extra_mem(void);
void __init xen_remap_memory(void); void __init xen_remap_memory(void);

View File

@ -2911,8 +2911,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx]; struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx];
/* if a merge has already been setup, then proceed with that first */ /* if a merge has already been setup, then proceed with that first */
if (bfqq->new_bfqq) new_bfqq = bfqq->new_bfqq;
return bfqq->new_bfqq; if (new_bfqq) {
while (new_bfqq->new_bfqq)
new_bfqq = new_bfqq->new_bfqq;
return new_bfqq;
}
/* /*
* Check delayed stable merge for rotational or non-queueing * Check delayed stable merge for rotational or non-queueing
@ -3125,10 +3129,12 @@ void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_put_queue(bfqq); bfq_put_queue(bfqq);
} }
static void static struct bfq_queue *bfq_merge_bfqqs(struct bfq_data *bfqd,
bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, struct bfq_io_cq *bic,
struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) struct bfq_queue *bfqq)
{ {
struct bfq_queue *new_bfqq = bfqq->new_bfqq;
bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
(unsigned long)new_bfqq->pid); (unsigned long)new_bfqq->pid);
/* Save weight raising and idle window of the merged queues */ /* Save weight raising and idle window of the merged queues */
@ -3222,6 +3228,8 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
bfq_reassign_last_bfqq(bfqq, new_bfqq); bfq_reassign_last_bfqq(bfqq, new_bfqq);
bfq_release_process_ref(bfqd, bfqq); bfq_release_process_ref(bfqd, bfqq);
return new_bfqq;
} }
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
@ -3257,14 +3265,8 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
* fulfilled, i.e., bic can be redirected to new_bfqq * fulfilled, i.e., bic can be redirected to new_bfqq
* and bfqq can be put. * and bfqq can be put.
*/ */
bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq, while (bfqq != new_bfqq)
new_bfqq); bfqq = bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq);
/*
* If we get here, bio will be queued into new_queue,
* so use new_bfqq to decide whether bio and rq can be
* merged.
*/
bfqq = new_bfqq;
/* /*
* Change also bqfd->bio_bfqq, as * Change also bqfd->bio_bfqq, as
@ -5699,9 +5701,7 @@ bfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
* state before killing it. * state before killing it.
*/ */
bfqq->bic = bic; bfqq->bic = bic;
bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq); return bfq_merge_bfqqs(bfqd, bic, bfqq);
return new_bfqq;
} }
/* /*
@ -6156,6 +6156,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
bool waiting, idle_timer_disabled = false; bool waiting, idle_timer_disabled = false;
if (new_bfqq) { if (new_bfqq) {
struct bfq_queue *old_bfqq = bfqq;
/* /*
* Release the request's reference to the old bfqq * Release the request's reference to the old bfqq
* and make sure one is taken to the shared queue. * and make sure one is taken to the shared queue.
@ -6172,18 +6173,18 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
* new_bfqq. * new_bfqq.
*/ */
if (bic_to_bfqq(RQ_BIC(rq), true, if (bic_to_bfqq(RQ_BIC(rq), true,
bfq_actuator_index(bfqd, rq->bio)) == bfqq) bfq_actuator_index(bfqd, rq->bio)) == bfqq) {
bfq_merge_bfqqs(bfqd, RQ_BIC(rq), while (bfqq != new_bfqq)
bfqq, new_bfqq); bfqq = bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqq);
}
bfq_clear_bfqq_just_created(bfqq); bfq_clear_bfqq_just_created(old_bfqq);
/* /*
* rq is about to be enqueued into new_bfqq, * rq is about to be enqueued into new_bfqq,
* release rq reference on bfqq * release rq reference on bfqq
*/ */
bfq_put_queue(bfqq); bfq_put_queue(old_bfqq);
rq->elv.priv[1] = new_bfqq; rq->elv.priv[1] = new_bfqq;
bfqq = new_bfqq;
} }
bfq_update_io_thinktime(bfqd, bfqq); bfq_update_io_thinktime(bfqd, bfqq);
@ -6721,7 +6722,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
{ {
bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
if (bfqq_process_refs(bfqq) == 1) { if (bfqq_process_refs(bfqq) == 1 && !bfqq->new_bfqq) {
bfqq->pid = current->pid; bfqq->pid = current->pid;
bfq_clear_bfqq_coop(bfqq); bfq_clear_bfqq_coop(bfqq);
bfq_clear_bfqq_split_coop(bfqq); bfq_clear_bfqq_split_coop(bfqq);
@ -6819,6 +6820,31 @@ static void bfq_prepare_request(struct request *rq)
rq->elv.priv[0] = rq->elv.priv[1] = NULL; rq->elv.priv[0] = rq->elv.priv[1] = NULL;
} }
static struct bfq_queue *bfq_waker_bfqq(struct bfq_queue *bfqq)
{
struct bfq_queue *new_bfqq = bfqq->new_bfqq;
struct bfq_queue *waker_bfqq = bfqq->waker_bfqq;
if (!waker_bfqq)
return NULL;
while (new_bfqq) {
if (new_bfqq == waker_bfqq) {
/*
* If waker_bfqq is in the merge chain, and current
* is the only procress.
*/
if (bfqq_process_refs(waker_bfqq) == 1)
return NULL;
break;
}
new_bfqq = new_bfqq->new_bfqq;
}
return waker_bfqq;
}
/* /*
* If needed, init rq, allocate bfq data structures associated with * If needed, init rq, allocate bfq data structures associated with
* rq, and increment reference counters in the destination bfq_queue * rq, and increment reference counters in the destination bfq_queue
@ -6880,7 +6906,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
/* If the queue was seeky for too long, break it apart. */ /* If the queue was seeky for too long, break it apart. */
if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq) && if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq) &&
!bic->bfqq_data[a_idx].stably_merged) { !bic->bfqq_data[a_idx].stably_merged) {
struct bfq_queue *old_bfqq = bfqq; struct bfq_queue *waker_bfqq = bfq_waker_bfqq(bfqq);
/* Update bic before losing reference to bfqq */ /* Update bic before losing reference to bfqq */
if (bfq_bfqq_in_large_burst(bfqq)) if (bfq_bfqq_in_large_burst(bfqq))
@ -6900,7 +6926,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
bfqq_already_existing = true; bfqq_already_existing = true;
if (!bfqq_already_existing) { if (!bfqq_already_existing) {
bfqq->waker_bfqq = old_bfqq->waker_bfqq; bfqq->waker_bfqq = waker_bfqq;
bfqq->tentative_waker_bfqq = NULL; bfqq->tentative_waker_bfqq = NULL;
/* /*
@ -6910,7 +6936,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
* woken_list of the waker. See * woken_list of the waker. See
* bfq_check_waker for details. * bfq_check_waker for details.
*/ */
if (bfqq->waker_bfqq) if (waker_bfqq)
hlist_add_head(&bfqq->woken_list_node, hlist_add_head(&bfqq->woken_list_node,
&bfqq->waker_bfqq->woken_list); &bfqq->waker_bfqq->woken_list);
} }
@ -6932,7 +6958,8 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
* addition, if the queue has also just been split, we have to * addition, if the queue has also just been split, we have to
* resume its state. * resume its state.
*/ */
if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { if (likely(bfqq != &bfqd->oom_bfqq) && !bfqq->new_bfqq &&
bfqq_process_refs(bfqq) == 1) {
bfqq->bic = bic; bfqq->bic = bic;
if (split) { if (split) {
/* /*

View File

@ -574,9 +574,11 @@ static bool blk_add_partition(struct gendisk *disk,
part = add_partition(disk, p, from, size, state->parts[p].flags, part = add_partition(disk, p, from, size, state->parts[p].flags,
&state->parts[p].info); &state->parts[p].info);
if (IS_ERR(part) && PTR_ERR(part) != -ENXIO) { if (IS_ERR(part)) {
printk(KERN_ERR " %s: p%d could not be added: %ld\n", if (PTR_ERR(part) != -ENXIO) {
disk->disk_name, p, -PTR_ERR(part)); printk(KERN_ERR " %s: p%d could not be added: %pe\n",
disk->disk_name, p, part);
}
return true; return true;
} }

View File

@ -60,17 +60,18 @@ struct key *find_asymmetric_key(struct key *keyring,
char *req, *p; char *req, *p;
int len; int len;
WARN_ON(!id_0 && !id_1 && !id_2);
if (id_0) { if (id_0) {
lookup = id_0->data; lookup = id_0->data;
len = id_0->len; len = id_0->len;
} else if (id_1) { } else if (id_1) {
lookup = id_1->data; lookup = id_1->data;
len = id_1->len; len = id_1->len;
} else { } else if (id_2) {
lookup = id_2->data; lookup = id_2->data;
len = id_2->len; len = id_2->len;
} else {
WARN_ON(1);
return ERR_PTR(-EINVAL);
} }
/* Construct an identifier "id:<keyid>". */ /* Construct an identifier "id:<keyid>". */

View File

@ -83,33 +83,30 @@ static void __init
do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2) do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
{ {
int speed; int speed;
int i, j; unsigned long reps;
ktime_t min, start, diff; ktime_t min, start, t0;
tmpl->next = template_list; tmpl->next = template_list;
template_list = tmpl; template_list = tmpl;
preempt_disable(); preempt_disable();
min = (ktime_t)S64_MAX; reps = 0;
for (i = 0; i < 3; i++) { t0 = ktime_get();
start = ktime_get(); /* delay start until time has advanced */
for (j = 0; j < REPS; j++) { while ((start = ktime_get()) == t0)
mb(); /* prevent loop optimization */ cpu_relax();
tmpl->do_2(BENCH_SIZE, b1, b2); do {
mb(); mb(); /* prevent loop optimization */
} tmpl->do_2(BENCH_SIZE, b1, b2);
diff = ktime_sub(ktime_get(), start); mb();
if (diff < min) } while (reps++ < REPS || (t0 = ktime_get()) == start);
min = diff; min = ktime_sub(t0, start);
}
preempt_enable(); preempt_enable();
// bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s] // bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s]
if (!min) speed = (1000 * reps * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
min = 1;
speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
tmpl->speed = speed; tmpl->speed = speed;
pr_info(" %-16s: %5d MB/sec\n", tmpl->name, speed); pr_info(" %-16s: %5d MB/sec\n", tmpl->name, speed);

View File

@ -167,8 +167,11 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width) #define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
/* Shift and apply the mask for CPC reads/writes */ /* Shift and apply the mask for CPC reads/writes */
#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \ #define MASK_VAL_READ(reg, val) (((val) >> (reg)->bit_offset) & \
GENMASK(((reg)->bit_width) - 1, 0)) GENMASK(((reg)->bit_width) - 1, 0))
#define MASK_VAL_WRITE(reg, prev_val, val) \
((((val) & GENMASK(((reg)->bit_width) - 1, 0)) << (reg)->bit_offset) | \
((prev_val) & ~(GENMASK(((reg)->bit_width) - 1, 0) << (reg)->bit_offset))) \
static ssize_t show_feedback_ctrs(struct kobject *kobj, static ssize_t show_feedback_ctrs(struct kobject *kobj,
struct kobj_attribute *attr, char *buf) struct kobj_attribute *attr, char *buf)
@ -852,6 +855,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
/* Store CPU Logical ID */ /* Store CPU Logical ID */
cpc_ptr->cpu_id = pr->id; cpc_ptr->cpu_id = pr->id;
spin_lock_init(&cpc_ptr->rmw_lock);
/* Parse PSD data for this CPU */ /* Parse PSD data for this CPU */
ret = acpi_get_psd(cpc_ptr, handle); ret = acpi_get_psd(cpc_ptr, handle);
@ -1057,7 +1061,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
} }
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
*val = MASK_VAL(reg, *val); *val = MASK_VAL_READ(reg, *val);
return 0; return 0;
} }
@ -1066,9 +1070,11 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
{ {
int ret_val = 0; int ret_val = 0;
int size; int size;
u64 prev_val;
void __iomem *vaddr = NULL; void __iomem *vaddr = NULL;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg; struct cpc_reg *reg = &reg_res->cpc_entry.reg;
struct cpc_desc *cpc_desc;
size = GET_BIT_WIDTH(reg); size = GET_BIT_WIDTH(reg);
@ -1101,8 +1107,34 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
return acpi_os_write_memory((acpi_physical_address)reg->address, return acpi_os_write_memory((acpi_physical_address)reg->address,
val, size); val, size);
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
val = MASK_VAL(reg, val); cpc_desc = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_desc) {
pr_debug("No CPC descriptor for CPU:%d\n", cpu);
return -ENODEV;
}
spin_lock(&cpc_desc->rmw_lock);
switch (size) {
case 8:
prev_val = readb_relaxed(vaddr);
break;
case 16:
prev_val = readw_relaxed(vaddr);
break;
case 32:
prev_val = readl_relaxed(vaddr);
break;
case 64:
prev_val = readq_relaxed(vaddr);
break;
default:
spin_unlock(&cpc_desc->rmw_lock);
return -EFAULT;
}
val = MASK_VAL_WRITE(reg, prev_val, val);
val |= prev_val;
}
switch (size) { switch (size) {
case 8: case 8:
@ -1129,6 +1161,9 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
break; break;
} }
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
spin_unlock(&cpc_desc->rmw_lock);
return ret_val; return ret_val;
} }

View File

@ -544,8 +544,9 @@ int acpi_device_setup_files(struct acpi_device *dev)
* If device has _STR, 'description' file is created * If device has _STR, 'description' file is created
*/ */
if (acpi_has_method(dev->handle, "_STR")) { if (acpi_has_method(dev->handle, "_STR")) {
status = acpi_evaluate_object(dev->handle, "_STR", status = acpi_evaluate_object_typed(dev->handle, "_STR",
NULL, &buffer); NULL, &buffer,
ACPI_TYPE_BUFFER);
if (ACPI_FAILURE(status)) if (ACPI_FAILURE(status))
buffer.pointer = NULL; buffer.pointer = NULL;
dev->pnp.str_obj = buffer.pointer; dev->pnp.str_obj = buffer.pointer;

View File

@ -376,10 +376,8 @@ static int tps68470_pmic_opregion_probe(struct platform_device *pdev)
struct tps68470_pmic_opregion *opregion; struct tps68470_pmic_opregion *opregion;
acpi_status status; acpi_status status;
if (!dev || !tps68470_regmap) { if (!tps68470_regmap)
dev_warn(dev, "dev or regmap is NULL\n"); return dev_err_probe(dev, -EINVAL, "regmap is missing\n");
return -EINVAL;
}
if (!handle) { if (!handle) {
dev_warn(dev, "acpi handle is NULL\n"); dev_warn(dev, "acpi handle is NULL\n");

View File

@ -508,6 +508,12 @@ static const struct dmi_system_id maingear_laptop[] = {
DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"), DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
}, },
}, },
{
/* TongFang GMxXGxX/TUXEDO Polaris 15 Gen5 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GMxXGxX"),
},
},
{ {
/* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */ /* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */
.matches = { .matches = {

View File

@ -618,6 +618,14 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
struct ata_queued_cmd *qc; struct ata_queued_cmd *qc;
/*
* If the scmd was added to EH, via ata_qc_schedule_eh() ->
* scsi_timeout() -> scsi_eh_scmd_add(), scsi_timeout() will
* have set DID_TIME_OUT (since libata does not have an abort
* handler). Thus, to clear DID_TIME_OUT, clear the host byte.
*/
set_host_byte(scmd, DID_OK);
ata_qc_for_each_raw(ap, qc, i) { ata_qc_for_each_raw(ap, qc, i) {
if (qc->flags & ATA_QCFLAG_ACTIVE && if (qc->flags & ATA_QCFLAG_ACTIVE &&
qc->scsicmd == scmd) qc->scsicmd == scmd)

View File

@ -1725,9 +1725,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION); set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION);
} else if (is_error && !have_sense) { } else if (is_error && !have_sense) {
ata_gen_ata_sense(qc); ata_gen_ata_sense(qc);
} else {
/* Keep the SCSI ML and status byte, clear host byte. */
cmd->result &= 0x0000ffff;
} }
ata_qc_done(qc); ata_qc_done(qc);
@ -2393,7 +2390,7 @@ static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf,
case ALL_SUB_MPAGES: case ALL_SUB_MPAGES:
n = ata_msense_control_spg0(dev, buf, changeable); n = ata_msense_control_spg0(dev, buf, changeable);
n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE); n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE);
n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE); n += ata_msense_control_spgt2(dev, buf + n, CDL_T2B_SUB_MPAGE);
n += ata_msense_control_ata_feature(dev, buf + n); n += ata_msense_control_ata_feature(dev, buf + n);
return n; return n;
default: default:

View File

@ -4485,9 +4485,11 @@ EXPORT_SYMBOL_GPL(device_destroy);
*/ */
int device_rename(struct device *dev, const char *new_name) int device_rename(struct device *dev, const char *new_name)
{ {
struct subsys_private *sp = NULL;
struct kobject *kobj = &dev->kobj; struct kobject *kobj = &dev->kobj;
char *old_device_name = NULL; char *old_device_name = NULL;
int error; int error;
bool is_link_renamed = false;
dev = get_device(dev); dev = get_device(dev);
if (!dev) if (!dev)
@ -4502,7 +4504,7 @@ int device_rename(struct device *dev, const char *new_name)
} }
if (dev->class) { if (dev->class) {
struct subsys_private *sp = class_to_subsys(dev->class); sp = class_to_subsys(dev->class);
if (!sp) { if (!sp) {
error = -EINVAL; error = -EINVAL;
@ -4511,16 +4513,19 @@ int device_rename(struct device *dev, const char *new_name)
error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name, error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name,
new_name, kobject_namespace(kobj)); new_name, kobject_namespace(kobj));
subsys_put(sp);
if (error) if (error)
goto out; goto out;
is_link_renamed = true;
} }
error = kobject_rename(kobj, new_name); error = kobject_rename(kobj, new_name);
if (error)
goto out;
out: out:
if (error && is_link_renamed)
sysfs_rename_link_ns(&sp->subsys.kobj, kobj, new_name,
old_device_name, kobject_namespace(kobj));
subsys_put(sp);
put_device(dev); put_device(dev);
kfree(old_device_name); kfree(old_device_name);

View File

@ -912,6 +912,26 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name,
{} {}
#endif #endif
/*
* Reject firmware file names with ".." path components.
* There are drivers that construct firmware file names from device-supplied
* strings, and we don't want some device to be able to tell us "I would like to
* be sent my firmware from ../../../etc/shadow, please".
*
* Search for ".." surrounded by either '/' or start/end of string.
*
* This intentionally only looks at the firmware name, not at the firmware base
* directory or at symlink contents.
*/
static bool name_contains_dotdot(const char *name)
{
size_t name_len = strlen(name);
return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 ||
strstr(name, "/../") != NULL ||
(name_len >= 3 && strcmp(name+name_len-3, "/..") == 0);
}
/* called from request_firmware() and request_firmware_work_func() */ /* called from request_firmware() and request_firmware_work_func() */
static int static int
_request_firmware(const struct firmware **firmware_p, const char *name, _request_firmware(const struct firmware **firmware_p, const char *name,
@ -932,6 +952,14 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
goto out; goto out;
} }
if (name_contains_dotdot(name)) {
dev_warn(device,
"Firmware load for '%s' refused, path contains '..' component\n",
name);
ret = -EINVAL;
goto out;
}
ret = _request_firmware_prepare(&fw, name, device, buf, size, ret = _request_firmware_prepare(&fw, name, device, buf, size,
offset, opt_flags); offset, opt_flags);
if (ret <= 0) /* error or already assigned */ if (ret <= 0) /* error or already assigned */
@ -1009,6 +1037,8 @@ out:
* @name will be used as $FIRMWARE in the uevent environment and * @name will be used as $FIRMWARE in the uevent environment and
* should be distinctive enough not to be confused with any other * should be distinctive enough not to be confused with any other
* firmware image for this or any other device. * firmware image for this or any other device.
* It must not contain any ".." path components - "foo/bar..bin" is
* allowed, but "foo/../bar.bin" is not.
* *
* Caller must hold the reference count of @device. * Caller must hold the reference count of @device.
* *

View File

@ -66,27 +66,31 @@ int module_add_driver(struct module *mod, struct device_driver *drv)
driver_name = make_driver_name(drv); driver_name = make_driver_name(drv);
if (!driver_name) { if (!driver_name) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out_remove_kobj;
} }
module_create_drivers_dir(mk); module_create_drivers_dir(mk);
if (!mk->drivers_dir) { if (!mk->drivers_dir) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out_free_driver_name;
} }
ret = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, driver_name); ret = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, driver_name);
if (ret) if (ret)
goto out; goto out_remove_drivers_dir;
kfree(driver_name); kfree(driver_name);
return 0; return 0;
out:
sysfs_remove_link(&drv->p->kobj, "module"); out_remove_drivers_dir:
sysfs_remove_link(mk->drivers_dir, driver_name); sysfs_remove_link(mk->drivers_dir, driver_name);
out_free_driver_name:
kfree(driver_name); kfree(driver_name);
out_remove_kobj:
sysfs_remove_link(&drv->p->kobj, "module");
return ret; return ret;
} }

View File

@ -3145,7 +3145,7 @@ static int genpd_summary_one(struct seq_file *s,
else else
snprintf(state, sizeof(state), "%s", snprintf(state, sizeof(state), "%s",
status_lookup[genpd->status]); status_lookup[genpd->status]);
seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state); seq_printf(s, "%-30s %-49s %u", genpd->name, state, genpd->performance_state);
/* /*
* Modifications on the list require holding locks on both * Modifications on the list require holding locks on both

View File

@ -3392,10 +3392,12 @@ void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local) void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
{ {
unsigned long flags; unsigned long flags;
if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
return;
spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) {
spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
return;
}
if (val == 0) { if (val == 0) {
drbd_uuid_move_history(device); drbd_uuid_move_history(device);
device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP]; device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];

View File

@ -876,7 +876,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
ns.disk == D_OUTDATED) ns.disk == D_OUTDATED)
rv = SS_CONNECTED_OUTDATES; rv = SS_CONNECTED_OUTDATES;
else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && else if (nc && (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
(nc->verify_alg[0] == 0)) (nc->verify_alg[0] == 0))
rv = SS_NO_VERIFY_ALG; rv = SS_NO_VERIFY_ALG;

View File

@ -181,6 +181,17 @@ static void nbd_requeue_cmd(struct nbd_cmd *cmd)
{ {
struct request *req = blk_mq_rq_from_pdu(cmd); struct request *req = blk_mq_rq_from_pdu(cmd);
lockdep_assert_held(&cmd->lock);
/*
* Clear INFLIGHT flag so that this cmd won't be completed in
* normal completion path
*
* INFLIGHT flag will be set when the cmd is queued to nbd next
* time.
*/
__clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags)) if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
blk_mq_requeue_request(req, true); blk_mq_requeue_request(req, true);
} }
@ -461,8 +472,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
nbd_mark_nsock_dead(nbd, nsock, 1); nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock); mutex_unlock(&nsock->tx_lock);
} }
mutex_unlock(&cmd->lock);
nbd_requeue_cmd(cmd); nbd_requeue_cmd(cmd);
mutex_unlock(&cmd->lock);
nbd_config_put(nbd); nbd_config_put(nbd);
return BLK_EH_DONE; return BLK_EH_DONE;
} }

View File

@ -68,9 +68,6 @@ struct ublk_rq_data {
struct llist_node node; struct llist_node node;
struct kref ref; struct kref ref;
__u64 sector;
__u32 operation;
__u32 nr_zones;
}; };
struct ublk_uring_cmd_pdu { struct ublk_uring_cmd_pdu {
@ -215,6 +212,33 @@ static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq)
#ifdef CONFIG_BLK_DEV_ZONED #ifdef CONFIG_BLK_DEV_ZONED
struct ublk_zoned_report_desc {
__u64 sector;
__u32 operation;
__u32 nr_zones;
};
static DEFINE_XARRAY(ublk_zoned_report_descs);
static int ublk_zoned_insert_report_desc(const struct request *req,
struct ublk_zoned_report_desc *desc)
{
return xa_insert(&ublk_zoned_report_descs, (unsigned long)req,
desc, GFP_KERNEL);
}
static struct ublk_zoned_report_desc *ublk_zoned_erase_report_desc(
const struct request *req)
{
return xa_erase(&ublk_zoned_report_descs, (unsigned long)req);
}
static struct ublk_zoned_report_desc *ublk_zoned_get_report_desc(
const struct request *req)
{
return xa_load(&ublk_zoned_report_descs, (unsigned long)req);
}
static int ublk_get_nr_zones(const struct ublk_device *ub) static int ublk_get_nr_zones(const struct ublk_device *ub)
{ {
const struct ublk_param_basic *p = &ub->params.basic; const struct ublk_param_basic *p = &ub->params.basic;
@ -321,7 +345,7 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector,
unsigned int zones_in_request = unsigned int zones_in_request =
min_t(unsigned int, remaining_zones, max_zones_per_request); min_t(unsigned int, remaining_zones, max_zones_per_request);
struct request *req; struct request *req;
struct ublk_rq_data *pdu; struct ublk_zoned_report_desc desc;
blk_status_t status; blk_status_t status;
memset(buffer, 0, buffer_length); memset(buffer, 0, buffer_length);
@ -332,20 +356,23 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector,
goto out; goto out;
} }
pdu = blk_mq_rq_to_pdu(req); desc.operation = UBLK_IO_OP_REPORT_ZONES;
pdu->operation = UBLK_IO_OP_REPORT_ZONES; desc.sector = sector;
pdu->sector = sector; desc.nr_zones = zones_in_request;
pdu->nr_zones = zones_in_request; ret = ublk_zoned_insert_report_desc(req, &desc);
if (ret)
goto free_req;
ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length, ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length,
GFP_KERNEL); GFP_KERNEL);
if (ret) { if (ret)
blk_mq_free_request(req); goto erase_desc;
goto out;
}
status = blk_execute_rq(req, 0); status = blk_execute_rq(req, 0);
ret = blk_status_to_errno(status); ret = blk_status_to_errno(status);
erase_desc:
ublk_zoned_erase_report_desc(req);
free_req:
blk_mq_free_request(req); blk_mq_free_request(req);
if (ret) if (ret)
goto out; goto out;
@ -379,7 +406,7 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
{ {
struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
struct ublk_io *io = &ubq->ios[req->tag]; struct ublk_io *io = &ubq->ios[req->tag];
struct ublk_rq_data *pdu = blk_mq_rq_to_pdu(req); struct ublk_zoned_report_desc *desc;
u32 ublk_op; u32 ublk_op;
switch (req_op(req)) { switch (req_op(req)) {
@ -402,12 +429,15 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
ublk_op = UBLK_IO_OP_ZONE_RESET_ALL; ublk_op = UBLK_IO_OP_ZONE_RESET_ALL;
break; break;
case REQ_OP_DRV_IN: case REQ_OP_DRV_IN:
ublk_op = pdu->operation; desc = ublk_zoned_get_report_desc(req);
if (!desc)
return BLK_STS_IOERR;
ublk_op = desc->operation;
switch (ublk_op) { switch (ublk_op) {
case UBLK_IO_OP_REPORT_ZONES: case UBLK_IO_OP_REPORT_ZONES:
iod->op_flags = ublk_op | ublk_req_build_flags(req); iod->op_flags = ublk_op | ublk_req_build_flags(req);
iod->nr_zones = pdu->nr_zones; iod->nr_zones = desc->nr_zones;
iod->start_sector = pdu->sector; iod->start_sector = desc->sector;
return BLK_STS_OK; return BLK_STS_OK;
default: default:
return BLK_STS_IOERR; return BLK_STS_IOERR;

View File

@ -1352,7 +1352,10 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
if (!urb) if (!urb)
return -ENOMEM; return -ENOMEM;
size = le16_to_cpu(data->intr_ep->wMaxPacketSize); /* Use maximum HCI Event size so the USB stack handles
* ZPL/short-transfer automatically.
*/
size = HCI_MAX_EVENT_SIZE;
buf = kmalloc(size, mem_flags); buf = kmalloc(size, mem_flags);
if (!buf) { if (!buf) {

View File

@ -85,6 +85,7 @@ static int integrator_ap_lm_probe(struct platform_device *pdev)
return -ENODEV; return -ENODEV;
} }
map = syscon_node_to_regmap(syscon); map = syscon_node_to_regmap(syscon);
of_node_put(syscon);
if (IS_ERR(map)) { if (IS_ERR(map)) {
dev_err(dev, dev_err(dev,
"could not find Integrator/AP system controller\n"); "could not find Integrator/AP system controller\n");

View File

@ -578,6 +578,15 @@ static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
.mru_default = 32768, .mru_default = 32768,
}; };
static const struct mhi_pci_dev_info mhi_telit_fe990a_info = {
.name = "telit-fe990a",
.config = &modem_telit_fn990_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.sideband_wake = false,
.mru_default = 32768,
};
/* Keep the list sorted based on the PID. New VID should be added as the last entry */ /* Keep the list sorted based on the PID. New VID should be added as the last entry */
static const struct pci_device_id mhi_pci_id_table[] = { static const struct pci_device_id mhi_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
@ -595,9 +604,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* Telit FN990 */ /* Telit FN990 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010), { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
/* Telit FE990 */ /* Telit FE990A */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015), { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, .driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */ { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */

View File

@ -94,8 +94,10 @@ static int bcm2835_rng_init(struct hwrng *rng)
return ret; return ret;
ret = reset_control_reset(priv->reset); ret = reset_control_reset(priv->reset);
if (ret) if (ret) {
clk_disable_unprepare(priv->clk);
return ret; return ret;
}
if (priv->mask_interrupts) { if (priv->mask_interrupts) {
/* mask the interrupt */ /* mask the interrupt */

View File

@ -624,6 +624,7 @@ static int __maybe_unused cctrng_resume(struct device *dev)
/* wait for Cryptocell reset completion */ /* wait for Cryptocell reset completion */
if (!cctrng_wait_for_reset_completion(drvdata)) { if (!cctrng_wait_for_reset_completion(drvdata)) {
dev_err(dev, "Cryptocell reset not completed"); dev_err(dev, "Cryptocell reset not completed");
clk_disable_unprepare(drvdata->clk);
return -EBUSY; return -EBUSY;
} }

View File

@ -142,7 +142,7 @@ static int mtk_rng_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, priv); dev_set_drvdata(&pdev->dev, priv);
pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT); pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev); devm_pm_runtime_enable(&pdev->dev);
dev_info(&pdev->dev, "registered RNG driver\n"); dev_info(&pdev->dev, "registered RNG driver\n");

View File

@ -47,6 +47,8 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
if (!ret) if (!ret)
ret = tpm2_commit_space(chip, space, buf, &len); ret = tpm2_commit_space(chip, space, buf, &len);
else
tpm2_flush_space(chip);
out_rc: out_rc:
return ret ? ret : len; return ret ? ret : len;

View File

@ -166,6 +166,9 @@ void tpm2_flush_space(struct tpm_chip *chip)
struct tpm_space *space = &chip->work_space; struct tpm_space *space = &chip->work_space;
int i; int i;
if (!space)
return;
for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++) for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++)
if (space->context_tbl[i] && ~space->context_tbl[i]) if (space->context_tbl[i] && ~space->context_tbl[i])
tpm2_flush_context(chip, space->context_tbl[i]); tpm2_flush_context(chip, space->context_tbl[i]);

View File

@ -66,6 +66,7 @@ enum pll_component_id {
PLL_COMPID_FRAC, PLL_COMPID_FRAC,
PLL_COMPID_DIV0, PLL_COMPID_DIV0,
PLL_COMPID_DIV1, PLL_COMPID_DIV1,
PLL_COMPID_MAX,
}; };
/* /*
@ -165,7 +166,7 @@ static struct sama7g5_pll {
u8 t; u8 t;
u8 eid; u8 eid;
u8 safe_div; u8 safe_div;
} sama7g5_plls[][PLL_ID_MAX] = { } sama7g5_plls[][PLL_COMPID_MAX] = {
[PLL_ID_CPU] = { [PLL_ID_CPU] = {
[PLL_COMPID_FRAC] = { [PLL_COMPID_FRAC] = {
.n = "cpupll_fracck", .n = "cpupll_fracck",
@ -1038,7 +1039,7 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
sama7g5_pmc->chws[PMC_MAIN] = hw; sama7g5_pmc->chws[PMC_MAIN] = hw;
for (i = 0; i < PLL_ID_MAX; i++) { for (i = 0; i < PLL_ID_MAX; i++) {
for (j = 0; j < 3; j++) { for (j = 0; j < PLL_COMPID_MAX; j++) {
struct clk_hw *parent_hw; struct clk_hw *parent_hw;
if (!sama7g5_plls[i][j].n) if (!sama7g5_plls[i][j].n)

View File

@ -14,6 +14,7 @@
#include "../clk-fractional-divider.h" #include "../clk-fractional-divider.h"
#include "clk.h" #include "clk.h"
#define PCG_PR_MASK BIT(31)
#define PCG_PCS_SHIFT 24 #define PCG_PCS_SHIFT 24
#define PCG_PCS_MASK 0x7 #define PCG_PCS_MASK 0x7
#define PCG_CGC_SHIFT 30 #define PCG_CGC_SHIFT 30
@ -78,6 +79,12 @@ static struct clk_hw *imx_ulp_clk_hw_composite(const char *name,
struct clk_hw *hw; struct clk_hw *hw;
u32 val; u32 val;
val = readl(reg);
if (!(val & PCG_PR_MASK)) {
pr_info("PCC PR is 0 for clk:%s, bypass\n", name);
return 0;
}
if (mux_present) { if (mux_present) {
mux = kzalloc(sizeof(*mux), GFP_KERNEL); mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux) if (!mux)

View File

@ -204,6 +204,34 @@ static const struct clk_ops imx8m_clk_composite_mux_ops = {
.determine_rate = imx8m_clk_composite_mux_determine_rate, .determine_rate = imx8m_clk_composite_mux_determine_rate,
}; };
static int imx8m_clk_composite_gate_enable(struct clk_hw *hw)
{
struct clk_gate *gate = to_clk_gate(hw);
unsigned long flags;
u32 val;
spin_lock_irqsave(gate->lock, flags);
val = readl(gate->reg);
val |= BIT(gate->bit_idx);
writel(val, gate->reg);
spin_unlock_irqrestore(gate->lock, flags);
return 0;
}
static void imx8m_clk_composite_gate_disable(struct clk_hw *hw)
{
/* composite clk requires the disable hook */
}
static const struct clk_ops imx8m_clk_composite_gate_ops = {
.enable = imx8m_clk_composite_gate_enable,
.disable = imx8m_clk_composite_gate_disable,
.is_enabled = clk_gate_is_enabled,
};
struct clk_hw *__imx8m_clk_hw_composite(const char *name, struct clk_hw *__imx8m_clk_hw_composite(const char *name,
const char * const *parent_names, const char * const *parent_names,
int num_parents, void __iomem *reg, int num_parents, void __iomem *reg,
@ -217,10 +245,11 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
struct clk_mux *mux = NULL; struct clk_mux *mux = NULL;
const struct clk_ops *divider_ops; const struct clk_ops *divider_ops;
const struct clk_ops *mux_ops; const struct clk_ops *mux_ops;
const struct clk_ops *gate_ops;
mux = kzalloc(sizeof(*mux), GFP_KERNEL); mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux) if (!mux)
goto fail; return ERR_CAST(hw);
mux_hw = &mux->hw; mux_hw = &mux->hw;
mux->reg = reg; mux->reg = reg;
@ -230,7 +259,7 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
div = kzalloc(sizeof(*div), GFP_KERNEL); div = kzalloc(sizeof(*div), GFP_KERNEL);
if (!div) if (!div)
goto fail; goto free_mux;
div_hw = &div->hw; div_hw = &div->hw;
div->reg = reg; div->reg = reg;
@ -257,28 +286,32 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
div->flags = CLK_DIVIDER_ROUND_CLOSEST; div->flags = CLK_DIVIDER_ROUND_CLOSEST;
/* skip registering the gate ops if M4 is enabled */ /* skip registering the gate ops if M4 is enabled */
if (!mcore_booted) { gate = kzalloc(sizeof(*gate), GFP_KERNEL);
gate = kzalloc(sizeof(*gate), GFP_KERNEL); if (!gate)
if (!gate) goto free_div;
goto fail;
gate_hw = &gate->hw; gate_hw = &gate->hw;
gate->reg = reg; gate->reg = reg;
gate->bit_idx = PCG_CGC_SHIFT; gate->bit_idx = PCG_CGC_SHIFT;
gate->lock = &imx_ccm_lock; gate->lock = &imx_ccm_lock;
} if (!mcore_booted)
gate_ops = &clk_gate_ops;
else
gate_ops = &imx8m_clk_composite_gate_ops;
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
mux_hw, mux_ops, div_hw, mux_hw, mux_ops, div_hw,
divider_ops, gate_hw, &clk_gate_ops, flags); divider_ops, gate_hw, gate_ops, flags);
if (IS_ERR(hw)) if (IS_ERR(hw))
goto fail; goto free_gate;
return hw; return hw;
fail: free_gate:
kfree(gate); kfree(gate);
free_div:
kfree(div); kfree(div);
free_mux:
kfree(mux); kfree(mux);
return ERR_CAST(hw); return ERR_CAST(hw);
} }

View File

@ -76,6 +76,13 @@ static int imx93_clk_composite_gate_enable(struct clk_hw *hw)
static void imx93_clk_composite_gate_disable(struct clk_hw *hw) static void imx93_clk_composite_gate_disable(struct clk_hw *hw)
{ {
/*
* Skip disable the root clock gate if mcore enabled.
* The root clock may be used by the mcore.
*/
if (mcore_booted)
return;
imx93_clk_composite_gate_endisable(hw, 0); imx93_clk_composite_gate_endisable(hw, 0);
} }
@ -222,7 +229,7 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
mux_hw, &clk_mux_ro_ops, div_hw, mux_hw, &clk_mux_ro_ops, div_hw,
&clk_divider_ro_ops, NULL, NULL, flags); &clk_divider_ro_ops, NULL, NULL, flags);
} else if (!mcore_booted) { } else {
gate = kzalloc(sizeof(*gate), GFP_KERNEL); gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate) if (!gate)
goto fail; goto fail;
@ -238,12 +245,6 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
&imx93_clk_composite_divider_ops, gate_hw, &imx93_clk_composite_divider_ops, gate_hw,
&imx93_clk_composite_gate_ops, &imx93_clk_composite_gate_ops,
flags | CLK_SET_RATE_NO_REPARENT); flags | CLK_SET_RATE_NO_REPARENT);
} else {
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
mux_hw, &imx93_clk_composite_mux_ops, div_hw,
&imx93_clk_composite_divider_ops, NULL,
&imx93_clk_composite_gate_ops,
flags | CLK_SET_RATE_NO_REPARENT);
} }
if (IS_ERR(hw)) if (IS_ERR(hw))

View File

@ -291,6 +291,10 @@ static int clk_fracn_gppll_prepare(struct clk_hw *hw)
if (val & POWERUP_MASK) if (val & POWERUP_MASK)
return 0; return 0;
if (pll->flags & CLK_FRACN_GPPLL_FRACN)
writel_relaxed(readl_relaxed(pll->base + PLL_NUMERATOR),
pll->base + PLL_NUMERATOR);
val |= CLKMUX_BYPASS; val |= CLKMUX_BYPASS;
writel_relaxed(val, pll->base + PLL_CTRL); writel_relaxed(val, pll->base + PLL_CTRL);

View File

@ -542,8 +542,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_set_parent(hws[IMX6UL_CLK_ENFC_SEL]->clk, hws[IMX6UL_CLK_PLL2_PFD2]->clk); clk_set_parent(hws[IMX6UL_CLK_ENFC_SEL]->clk, hws[IMX6UL_CLK_PLL2_PFD2]->clk);
clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET_REF]->clk); clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET1_REF_125M]->clk);
clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF]->clk); clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF_125M]->clk);
imx_register_uart_clocks(); imx_register_uart_clocks();
} }

View File

@ -146,6 +146,15 @@ static const struct clk_parent_data clk_imx8mp_audiomix_pll_bypass_sels[] = {
PDM_SEL, 2, 0 \ PDM_SEL, 2, 0 \
} }
#define CLK_GATE_PARENT(gname, cname, pname) \
{ \
gname"_cg", \
IMX8MP_CLK_AUDIOMIX_##cname, \
{ .fw_name = pname, .name = pname }, NULL, 1, \
CLKEN0 + 4 * !!(IMX8MP_CLK_AUDIOMIX_##cname / 32), \
1, IMX8MP_CLK_AUDIOMIX_##cname % 32 \
}
struct clk_imx8mp_audiomix_sel { struct clk_imx8mp_audiomix_sel {
const char *name; const char *name;
int clkid; int clkid;
@ -163,14 +172,14 @@ static struct clk_imx8mp_audiomix_sel sels[] = {
CLK_GATE("earc", EARC_IPG), CLK_GATE("earc", EARC_IPG),
CLK_GATE("ocrama", OCRAMA_IPG), CLK_GATE("ocrama", OCRAMA_IPG),
CLK_GATE("aud2htx", AUD2HTX_IPG), CLK_GATE("aud2htx", AUD2HTX_IPG),
CLK_GATE("earc_phy", EARC_PHY), CLK_GATE_PARENT("earc_phy", EARC_PHY, "sai_pll_out_div2"),
CLK_GATE("sdma2", SDMA2_ROOT), CLK_GATE("sdma2", SDMA2_ROOT),
CLK_GATE("sdma3", SDMA3_ROOT), CLK_GATE("sdma3", SDMA3_ROOT),
CLK_GATE("spba2", SPBA2_ROOT), CLK_GATE("spba2", SPBA2_ROOT),
CLK_GATE("dsp", DSP_ROOT), CLK_GATE("dsp", DSP_ROOT),
CLK_GATE("dspdbg", DSPDBG_ROOT), CLK_GATE("dspdbg", DSPDBG_ROOT),
CLK_GATE("edma", EDMA_ROOT), CLK_GATE("edma", EDMA_ROOT),
CLK_GATE("audpll", AUDPLL_ROOT), CLK_GATE_PARENT("audpll", AUDPLL_ROOT, "osc_24m"),
CLK_GATE("mu2", MU2_ROOT), CLK_GATE("mu2", MU2_ROOT),
CLK_GATE("mu3", MU3_ROOT), CLK_GATE("mu3", MU3_ROOT),
CLK_PDM, CLK_PDM,

View File

@ -551,8 +551,8 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1); hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1);
hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000); hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000);
hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080); hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080);
hws[IMX8MP_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mp_vpu_g1_sels, ccm_base + 0xa100); hws[IMX8MP_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mp_vpu_g1_sels, ccm_base + 0xa100);
hws[IMX8MP_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mp_vpu_g2_sels, ccm_base + 0xa180); hws[IMX8MP_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mp_vpu_g2_sels, ccm_base + 0xa180);
hws[IMX8MP_CLK_CAN1] = imx8m_clk_hw_composite("can1", imx8mp_can1_sels, ccm_base + 0xa200); hws[IMX8MP_CLK_CAN1] = imx8m_clk_hw_composite("can1", imx8mp_can1_sels, ccm_base + 0xa200);

View File

@ -165,8 +165,8 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER); imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER);
imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL); imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL);
imx_clk_scu2("lcd_clk", lcd_sels, ARRAY_SIZE(lcd_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER); imx_clk_scu2("lcd_clk", lcd_sels, ARRAY_SIZE(lcd_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER);
imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
imx_clk_scu("lcd_pxl_bypass_div_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_BYPASS); imx_clk_scu("lcd_pxl_bypass_div_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_BYPASS);
imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
/* Audio SS */ /* Audio SS */
imx_clk_scu("audio_pll0_clk", IMX_SC_R_AUDIO_PLL_0, IMX_SC_PM_CLK_PLL); imx_clk_scu("audio_pll0_clk", IMX_SC_R_AUDIO_PLL_0, IMX_SC_PM_CLK_PLL);
@ -199,18 +199,18 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC); imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC);
/* Display controller SS */ /* Display controller SS */
imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0);
imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1);
imx_clk_scu("dc0_pll0_clk", IMX_SC_R_DC_0_PLL_0, IMX_SC_PM_CLK_PLL); imx_clk_scu("dc0_pll0_clk", IMX_SC_R_DC_0_PLL_0, IMX_SC_PM_CLK_PLL);
imx_clk_scu("dc0_pll1_clk", IMX_SC_R_DC_0_PLL_1, IMX_SC_PM_CLK_PLL); imx_clk_scu("dc0_pll1_clk", IMX_SC_R_DC_0_PLL_1, IMX_SC_PM_CLK_PLL);
imx_clk_scu("dc0_bypass0_clk", IMX_SC_R_DC_0_VIDEO0, IMX_SC_PM_CLK_BYPASS); imx_clk_scu("dc0_bypass0_clk", IMX_SC_R_DC_0_VIDEO0, IMX_SC_PM_CLK_BYPASS);
imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0);
imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1);
imx_clk_scu("dc0_bypass1_clk", IMX_SC_R_DC_0_VIDEO1, IMX_SC_PM_CLK_BYPASS); imx_clk_scu("dc0_bypass1_clk", IMX_SC_R_DC_0_VIDEO1, IMX_SC_PM_CLK_BYPASS);
imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0);
imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1);
imx_clk_scu("dc1_pll0_clk", IMX_SC_R_DC_1_PLL_0, IMX_SC_PM_CLK_PLL); imx_clk_scu("dc1_pll0_clk", IMX_SC_R_DC_1_PLL_0, IMX_SC_PM_CLK_PLL);
imx_clk_scu("dc1_pll1_clk", IMX_SC_R_DC_1_PLL_1, IMX_SC_PM_CLK_PLL); imx_clk_scu("dc1_pll1_clk", IMX_SC_R_DC_1_PLL_1, IMX_SC_PM_CLK_PLL);
imx_clk_scu("dc1_bypass0_clk", IMX_SC_R_DC_1_VIDEO0, IMX_SC_PM_CLK_BYPASS); imx_clk_scu("dc1_bypass0_clk", IMX_SC_R_DC_1_VIDEO0, IMX_SC_PM_CLK_BYPASS);
imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0);
imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1);
imx_clk_scu("dc1_bypass1_clk", IMX_SC_R_DC_1_VIDEO1, IMX_SC_PM_CLK_BYPASS); imx_clk_scu("dc1_bypass1_clk", IMX_SC_R_DC_1_VIDEO1, IMX_SC_PM_CLK_BYPASS);
/* MIPI-LVDS SS */ /* MIPI-LVDS SS */

View File

@ -1757,6 +1757,58 @@ const struct clk_ops clk_alpha_pll_agera_ops = {
}; };
EXPORT_SYMBOL_GPL(clk_alpha_pll_agera_ops); EXPORT_SYMBOL_GPL(clk_alpha_pll_agera_ops);
/**
* clk_lucid_5lpe_pll_configure - configure the lucid 5lpe pll
*
* @pll: clk alpha pll
* @regmap: register map
* @config: configuration to apply for pll
*/
void clk_lucid_5lpe_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
{
/*
* If the bootloader left the PLL enabled it's likely that there are
* RCGs that will lock up if we disable the PLL below.
*/
if (trion_pll_is_enabled(pll, regmap)) {
pr_debug("Lucid 5LPE PLL is already enabled, skipping configuration\n");
return;
}
clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL);
clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll),
config->config_ctl_val);
clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll),
config->config_ctl_hi_val);
clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll),
config->config_ctl_hi1_val);
clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll),
config->user_ctl_val);
clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll),
config->user_ctl_hi_val);
clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U1(pll),
config->user_ctl_hi1_val);
clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll),
config->test_ctl_val);
clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll),
config->test_ctl_hi_val);
clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll),
config->test_ctl_hi1_val);
/* Disable PLL output */
regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
/* Set operation mode to OFF */
regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
/* Place the PLL in STANDBY mode */
regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
}
EXPORT_SYMBOL_GPL(clk_lucid_5lpe_pll_configure);
static int alpha_pll_lucid_5lpe_enable(struct clk_hw *hw) static int alpha_pll_lucid_5lpe_enable(struct clk_hw *hw)
{ {
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);

View File

@ -198,6 +198,8 @@ void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config); const struct alpha_pll_config *config);
void clk_lucid_5lpe_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config); const struct alpha_pll_config *config);
void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,

Some files were not shown because too many files have changed in this diff Show More