Merge tag 'android15-6.6.50_r00' into android15-6.6

This merges up to the 6.6.50 LTS release into android15-6.6.  Changes
included in here are:

* 71e124c708 ANDROID: db845c: add hwspin_lock_bus() to symbol list
* 8b26096a1e Revert "scsi: ufs: core: Check LSDBS cap when !mcq"
*   ffff4c673c Merge 6.6.50 into android15-6.6-lts
|\
| * ad07a29023 Linux 6.6.50
| * 9d24eaa217 i2c: Use IS_REACHABLE() for substituting empty ACPI functions
| * f5e9a22d19 virtio_net: Fix napi_skb_cache_put warning
| * d8915d2716 media: uvcvideo: Enforce alignment of frame and interval
| * e3a95f2964 drm/amd/display: Skip wbscl_set_scaler_filter if filter is null
| * e50bec62ac drm/amd/display: Check BIOS images before it is used
| * 67cf14c04f drm/amd/display: use preferred link settings for dp signal only
| * 800a5ab673 drm/amd/display: Correct the defined value for AMDGPU_DMUB_NOTIFICATION_MAX
| * 154a50bf42 drm/amd/display: added NULL check at start of dc_validate_stream
| * da696cbb47 drm/amd/display: Don't use fsleep for PSR exit waits on dmub replay
| * 381113ef01 drm/amdgpu: add lock in kfd_process_dequeue_from_device
| * ddfe95f2e1 drm/amdgpu: add lock in amdgpu_gart_invalidate_tlb
| * 494b42f35f drm/amdgpu: add skip_hw_access checks for sriov
| * 2e91ea2962 block: remove the blk_flush_integrity call in blk_integrity_unregister
| * 72f022ebb9 driver: iio: add missing checks on iio_info's callback access
| * 6290d3f588 f2fs: fix to do sanity check on blocks for inline_data inode
| * 862b19f0b1 wifi: cfg80211: make hash table duplicates more survivable
| * 63ca5b4670 hwmon: (k10temp) Check return value of amd_smn_read()
| * 20bf2920a8 dmaengine: altera-msgdma: properly free descriptor in msgdma_free_descriptor
| * cd3851ef3f dmaengine: altera-msgdma: use irq variant of spin_lock/unlock while invoking callbacks
| * 9d56712647 drm/bridge: tc358767: Check if fully initialized before signalling HPD event via IRQ
| * d9612c66af gfs2: Revert "Add quota_change type"
| * d93a2f86b0 crypto: stm32/cryp - call finalize with bh disabled
| * 68957f511b drm/meson: plane: Add error handling
| * ae9018e3f6 net/mlx5e: SHAMPO, Fix incorrect page release
| * 0b722b813c platform/chrome: cros_ec_lpc: MEC access can use an AML mutex
| * 4be9fd15c3 smack: tcp: ipv4, fix incorrect labeling
| * 4c1145144c regmap: spi: Fix potential off-by-one when calculating reserved size
| * 54a11ce4ff drm/amdgu: fix Unintentional integer overflow for mall size
| * 69f397e60c net: remove NULL-pointer net parameter in ip_metrics_convert
| * fc1b1e135c fsnotify: clear PARENT_WATCHED flags lazily
| * 3b9f2d9301 usb: typec: ucsi: Fix null pointer dereference in trace
| * 73ec94aac5 usbip: Don't submit special requests twice
| * f576acf752 media: v4l2-cci: Always assign *val
| * 37d9fd3134 rcu/nocb: Remove buggy bypass lock contention mitigation
| * f9a9cf96c3 pwm: xilinx: Fix u32 overflow issue in 32-bit width PWM mode.
| * 04e787f836 ionic: fix potential irq name truncation
| * fd867e74fa RDMA/efa: Properly handle unexpected AQ completions
| * 220725de5a soc: qcom: smem: Add qcom_smem_bust_hwspin_lock_by_host()
| * a6978d1b7b hwspinlock: Introduce hwspin_lock_bust()
| * e51077ad1b wifi: mac80211: check ieee80211_bss_info_change_notify() against MLD
| * 7cdb515855 PCI: al: Check IORESOURCE_BUS existence during probe
| * ea37096a6a cpufreq: scmi: Avoid overflow of target_freq in fast switch
| * 0bd1be7e84 wifi: iwlwifi: remove fw_running op
| * 0798e4330b drm/amdgpu: update type of buf size to u32 for eeprom functions
| * cdc65b5f99 drm/kfd: Correct pinned buffer handling at kfd restore and validate process
| * 1107129305 wifi: rtw89: ser: avoid multiple deinit on same CAM
| * 7e8d106ca9 drm/amd/pm: check negtive return for table entries
| * 7d265772e4 drm/amdgpu: the warning dereferencing obj for nbio_v7_4
| * 025798f44b drm/amd/pm: check specific index for smu13
| * d0230b3720 drm/amd/pm: check specific index for aldebaran
| * 4ab720b6aa drm/amdgpu: fix the waring dereferencing hive
| * 0aad97bf6d drm/amdgpu: fix dereference after null check
| * d116bb921e drm/amdgpu: Fix the warning division or modulo by zero
| * 58350786db drm/amdgpu/pm: Check input value for CUSTOM profile mode setting on legacy SOCs
| * eba7c58b7a wifi: ath11k: initialize 'ret' in ath11k_qmi_load_file_target_mem()
| * 17d89c7918 wifi: ath12k: initialize 'ret' in ath12k_qmi_load_file_target_mem()
| * 59f742e55a apparmor: fix possible NULL pointer dereference
| * f163ba83e6 drm/amdkfd: Reconcile the definition and use of oem_id in struct kfd_topology_device
| * d0a43bf367 drm/amdgpu: fix mc_data out-of-bounds read warning
| * f2b7a9f383 drm/amdgpu: fix ucode out-of-bounds read warning
| * f926797249 drm/amdgpu: Fix out-of-bounds read of df_v1_7_channel_number
| * e6ea3b8fe3 drm/amdkfd: Check debug trap enable before write dbg_ev_file
| * a60d1f7ff6 drm/amdgpu: Fix out-of-bounds write warning
| * f71ef2bb69 drm/amdgpu: Fix the uninitialized variable warning
| * 3ad44174a5 drm/amdgpu/pm: Fix uninitialized variable agc_btc_response
| * 3e04fa9707 drm/amdgpu/pm: Fix uninitialized variable warning for smu10
| * 97667de35b drm/amd/pm: fix uninitialized variable warnings for vangogh_ppt
| * 4dfec5f550 drm/amd/amdgpu: Check tbo resource pointer
| * 4003bac784 drm/amd/display: Fix index may exceed array range within fpu_update_bw_bounding_box
| * 8406158a54 drm/amd/display: Skip inactive planes within ModeSupportAndSystemConfiguration
| * 3dc6bb57da drm/amd/display: Ensure index calculation will not overflow
| * 94b0689984 drm/amd/display: Fix Coverity INTEGER_OVERFLOW within decide_fallback_link_setting_max_bw_policy
| * af43ed726f drm/amd/display: Spinlock before reading event
| * 313d3dd4ca drm/amd/display: Fix Coverity INTEGER_OVERFLOW within dal_gpio_service_create
| * 13faa9d401 drm/amd/display: Fix Coverity INTERGER_OVERFLOW within construct_integrated_info
| * cb63090a17 drm/amd/display: Check msg_id before processing transcation
| * 21f9cb44f8 drm/amd/display: Check num_valid_sets before accessing reader_wm_sets[]
| * 8b5ccf3d01 drm/amd/display: Add array index check for hdcp ddc access
| * 48e0b68e23 drm/amd/display: Check index for aux_rd_interval before using
| * 94cb77700f drm/amd/display: Stop amdgpu_dm initialize when stream nums greater than 6
| * 08e7755f75 drm/amd/display: Check gpio_id before used as array index
| * 30e60db429 drm/amdgpu: avoid reading vf2pf info size from FB
| * 1a2c89396d drm/amd/pm: fix uninitialized variable warnings for vega10_hwmgr
| * f1e261ced9 drm/amd/pm: fix the Out-of-bounds read warning
| * 4711b1347c drm/amd/pm: Fix negative array index read
| * 59dd0d4b33 drm/amd/pm: fix warning using uninitialized value of max_vid_step
| * 1dbce92f32 drm/amd/pm: fix uninitialized variable warning for smu8_hwmgr
| * fc0cb02efd drm/amd/pm: fix uninitialized variable warning
| * a2f2beaba7 drm/amdgpu/pm: Check the return value of smum_send_msg_to_smc
| * da22d1b98d drm/amdgpu: fix overflowed array index read warning
| * d2fe7ac613 drm/amd/display: Assign linear_pitch_alignment even for VM
| * 761964b756 drm/amdgpu: Fix uninitialized variable warning in amdgpu_afmt_acr
| * 3d2e1b8255 mptcp: pr_debug: add missing \n at the end
| * a088190f3f mptcp: avoid duplicated SUB_CLOSED events
| * 1f4ca105ab selftests: mptcp: join: stop transfer when check is done (part 2.2)
| * 5803af655f selftests: mptcp: join: disable get and dump addr checks
| * 81f2e73e73 selftests: mptcp: join: test for flush/re-add endpoints
| * f9ca09beed selftests: mptcp: join: check re-re-adding ID 0 signal
| * a417ef47a6 selftests: mptcp: join: validate event numbers
| * b66609e9aa selftests: mptcp: add mptcp_lib_events helper
| * 0a37a0ec0d selftests: mptcp: join: check re-adding init endp with != id
| * 43ca9a10d0 selftests: mptcp: join: check re-using ID of unused ADD_ADDR
| * a95e3e702c selftests: mptcp: add explicit test case for remove/readd
| * 8863e430e6 selftests: mptcp: join: cannot rm sf if closed
| * a17d141912 selftests: mptcp: declare event macros in mptcp_lib
| * 485bb1981a selftests: mptcp: userspace pm get addr tests
| * 1b8af4ba00 selftests: mptcp: dump userspace addrs list
| * 05867195c9 selftests: mptcp: userspace pm create id 0 subflow
| * 2a72ceb863 mptcp: pm: fix RM_ADDR ID for the initial subflow
| * 0229074a51 mptcp: make pm_remove_addrs_and_subflows static
| * 489f245e00 ASoC: codecs: ES8326: button detect issue
| * 2eb143e096 ASoC: amd: yc: Support mic on Lenovo Thinkpad E14 Gen 6
| * cc300463aa net: usb: qmi_wwan: add MeiG Smart SRM825L
| * 2ea1fab2df dma-debug: avoid deadlock between dma debug vs printk and netconsole
| * b1322bc32a i2c: Fix conditional for substituting empty ACPI functions
| * 8b28f8c5d1 spi: hisi-kunpeng: Add validation for the minimum value of speed_hz
| * 533e175c82 ASoC: amd: yc: Support mic on HP 14-em0002la
| * 77e5d743a8 smb: client: fix FSCTL_GET_REPARSE_POINT against NetApp
| * ad3bc43e72 net/mlx5: DR, Fix 'stack guard page was hit' error in dr_rule
| * f8b39e2da7 ALSA: seq: ump: Explicitly reset RPN with Null RPN
| * 9062e98473 ALSA: seq: ump: Transmit RPN/NRPN message at each MSB/LSB data reception
| * 7c34c68947 ALSA: seq: ump: Use the common RPN/bank conversion context
| * 4ed4e84939 ALSA: ump: Explicitly reset RPN with Null RPN
| * 5a494fdbfc ALSA: ump: Transmit RPN/NRPN message at each MSB/LSB data reception
| * bd819563d5 ALSA: hda/conexant: Mute speakers at suspend / shutdown
| * 8f11fbe119 ALSA: hda/generic: Add a helper to mute speakers at suspend/shutdown
| * 9b090ccd80 btrfs: tree-checker: validate dref root and objectid
| * 02c19d769d scsi: ufs: core: Bypass quick recovery if force reset is needed
| * 8d1af5c6af scsi: ufs: core: Check LSDBS cap when !mcq
| * 512bd0cd53 drm: panel-orientation-quirks: Add quirk for OrangePi Neo
| * d5618eaea8 drm/fb-helper: Don't schedule_work() to flush frame buffer during panic()
* | 5d5d11c197 Merge 6.6.49 into android15-6.6-lts
|\|
| * df1a7cc5e0 Linux 6.6.49
| * d60839a73a apparmor: fix policy_unpack_test on big endian systems
| * 8a3995a3ff scsi: aacraid: Fix double-free on probe failure
| * 790a8d36f9 arm64: dts: freescale: imx93-tqma9352-mba93xxla: fix typo
| * 7cf7de0f35 arm64: dts: freescale: imx93-tqma9352: fix CMA alloc-ranges
| * f8fb2cd486 arm64: dts: imx93: update default value for snps,clk-csr
| * d0c54c2949 arm64: dts: imx93: add nvmem property for eqos
| * 1e59301fcc arm64: dts: imx93: add nvmem property for fec1
| * 228d69f938 arm64: dts: imx8mp-beacon-kit: Fix Stereo Audio on WM8962
| * 8aaca1c9fb ARM: dts: omap3-n900: correct the accelerometer orientation
| * d54696813c usb: cdnsp: fix for Link TRB with TC
| * 72be846dc7 usb: cdnsp: fix incorrect index in cdnsp_get_hw_deq function
| * 8fc7c9dcfe usb: core: sysfs: Unmerge @usb3_hardware_lpm_attr_group in remove_power_attributes()
| * 70b43c3366 usb: dwc3: st: add missing depopulate in probe error path
| * e1e5e8ea27 usb: dwc3: st: fix probed platform device ref count on probe error path
| * 7bb11a75dd usb: dwc3: core: Prevent USB core invalid event buffer address access
| * df2daed529 usb: dwc3: omap: add missing depopulate in probe error path
| * b3e8696e03 ARM: dts: imx6dl-yapp43: Increase LED current to match the yapp4 HW design
| * 6d9f478386 USB: serial: option: add MeiG Smart SRM825L
| * 03e3156ea7 scsi: sd: Ignore command SYNCHRONIZE CACHE error if format in progress
| * cdf7efe4b0 firmware: qcom: scm: Mark get_wq_ctx() as atomic call
| * ea3b6ec38b cdc-acm: Add DISABLE_ECHO quirk for GE HealthCare UI Controller
| * 1efdbf5323 soc: qcom: pmic_glink: Fix race during initialization
| * c704091b59 soc: qcom: pmic_glink: Actually communicate when remote goes down
| * ef80520be0 soc: qcom: cmd-db: Map shared memory as WC, not WB
| * 56ad559cf6 nfc: pn533: Add poll mod list filling check
| * 55526afdbb net: busy-poll: use ktime_get_ns() instead of local_clock()
| * f6f5e39a3f drm/amd/display: avoid using null object of framebuffer
| * b65116bee7 sctp: fix association labeling in the duplicate COOKIE-ECHO case
| * 28c67f0f84 gtp: fix a potential NULL pointer dereference
| * 6b59806916 bonding: change ipsec_lock from spin lock to mutex
| * 1979275833 bonding: extract the use of real_device into local variable
| * 124a688ed3 bonding: implement xdo_dev_state_free and call it after deletion
| * 7aa9f978c2 selftests: forwarding: local_termination: Down ports on cleanup
| * 7e7d0bd538 selftests: forwarding: no_forwarding: Down ports on cleanup
| * 84f17718a0 netfilter: nf_tables_ipv6: consider network offset in netdev/egress validation
| * 94ab317024 ethtool: check device is present when getting link settings
| * 1a525d99dc wifi: iwlwifi: fw: fix wgds rev 3 exact size
| * 8e51088d91 netfilter: nf_tables: restore IP sanity checks for netdev/egress
| * 99912d85fa iommu: Do not return 0 from map_pages if it doesn't do anything
| * c8525821ec Bluetooth: hci_core: Fix not handling hibernation actions
| * 662a55986b Bluetooth: btnxpuart: Fix random crash seen while removing driver
| * 9aa6e15c01 Bluetooth: btnxpuart: Handle FW Download Abort scenario
| * 9d5df94ce0 Bluetooth: btnxpuart: Resolve TX timeout error in power save stress test
| * 7df36cceb9 dmaengine: dw: Add memory bus width verification
| * e1b63d054e dmaengine: dw: Add peripheral bus width verification
| * 24775bfdf4 phy: xilinx: phy-zynqmp: Fix SGMII linkup failure on resume
| * ffd2e852dc dmaengine: dw-edma: Do not enable watermark interrupts for HDMA
| * d3c9a2a6ca dmaengine: dw-edma: Fix unmasking STOP and ABORT interrupts for HDMA
| * b47ff45c4a soundwire: stream: fix programming slave ports for non-continous port maps
| * 954e1893fe phy: fsl-imx8mq-usb: fix tuning parameter name
| * b547cab1eb iommufd: Do not allow creating areas without READ or WRITE
| * 459584258d selinux,smack: don't bypass permissions check in inode_setsecctx hook
| * 05d2e16a9e Revert "change alloc_pages name in dma_map_ops to avoid name conflicts"
| * 288d398df8 cifs: Fix FALLOC_FL_PUNCH_HOLE support
| * 881aee27ce mm: Fix missing folio invalidation calls during truncation
| * 9de10f5b7b ovl: ovl_parse_param_lowerdir: Add missed '\n' for pr_err
| * dce7cbeaa1 ovl: fix wrong lowerdir number check for parameter Opt_lowerdir
| * 0e1c9709d7 ovl: pass string to ovl_parse_layer()
| * c15123bbe3 pinctrl: starfive: jh7110: Correct the level trigger configuration of iev register
| * 2ebdb6e987 pinctrl: mediatek: common-v2: Fix broken bias-disable for PULL_PU_PD_RSEL_TYPE
| * 805cb277fb ASoC: SOF: amd: Fix for acp init sequence
| * 30464c3174 ASoC: amd: acp: fix module autoloading
| * 2dfbf8991e thermal: of: Fix OF node leak in of_thermal_zone_find() error paths
| * 31019a2ab4 thermal: of: Fix OF node leak in thermal_of_trips_init() error path
| * 0199a29ec6 of: Introduce for_each_*_child_of_node_scoped() to automate of_node_put() handling
| * d967f6ae31 usb: typec: fix up incorrectly backported "usb: typec: tcpm: unregister existing source caps before re-registration"
| * 4ed03758dd tracing: Have format file honor EVENT_FILE_FL_FREED
| * 9a9716bbbf drm/vmwgfx: Fix prime with external buffers
| * 39defab0eb drm/amdgpu/swsmu: always force a state reprogram on init
| * 11182b33fa drm/amdgpu: align pp_power_profile_mode with kernel docs
| * 09c423d6fc selftests: mptcp: join: check re-re-adding ID 0 endp
| * 99c17b3be7 selftests: mptcp: join: no extra msg if no counter
| * a81c87ac60 selftests: mptcp: join: check removing ID 0 endpoint
| * dc14d542e6 mptcp: pm: ADD_ADDR 0 is not a new address
| * 53e2173172 mptcp: pm: fix ID 0 endp usage after multiple re-creations
| * 78b0414986 mptcp: pm: do not remove already closed subflows
| * edfbc14a4b mptcp: pm: send ACK on an active subflow
| * 26e0f27405 mptcp: pm: reset MPC endp ID when re-added
| * 6d6c145633 mptcp: pm: skip connecting to already established sf
| * 1448d9a34c mptcp: pm: reuse ID 0 after delete and re-add
| * 9e40cd7959 mptcp: sched: check both backup in retrans
| * 255bc4fc4f mptcp: close subflow when receiving TCP+FIN
| * c04cac9a59 net: mana: Fix race of mana_hwc_post_rx_wqe and new hwc response
| * 8e4084ed2b wifi: mwifiex: duplicate static structs used in driver instances
| * 9d5e5908f0 wifi: wfx: repair open network AP mode
| * cb739d3ce5 of: Add cleanup.h based auto release via __free(device_node) markings
| * 4ed45fe99e pinctrl: single: fix potential NULL dereference in pcs_get_function()
| * d80bdfaa48 pinctrl: rockchip: correct RK3328 iomux width flag for GPIO2-B pins
| * a01859dd6a smb/client: avoid dereferencing rdata=NULL in smb2_new_read_req()
| * 4401326066 btrfs: run delayed iputs when flushing delalloc
| * 51722b99f4 btrfs: fix a use-after-free when hitting errors inside btrfs_submit_chunk()
| * f6758eb792 LoongArch: Remove the unused dma-direct.h
| * b1922c3102 ALSA: seq: Skip event type filtering for UMP events
* | 15dc13fed9 UPSTREAM: Revert "change alloc_pages name in dma_map_ops to avoid name conflicts"
* | 5d953d23a5 ANDROID: add two drm symbols to db845c list
* | 387ae83039 ANDROID: db845c: add drm_rect_rotate_inv to symbol list
* | 9006a88086 Merge 6.6.48 into android15-6.6-lts
|\|
| * c77dee530e Linux 6.6.48
| * a2081b8cab tools: move alignment-related macros to new <linux/align.h>
| * 8f04edd554 Input: MT - limit max slots
| * 3d68d10760 Revert "s390/dasd: Establish DMA alignment"
| * b8d7b897e1 net: ngbe: Fix phy mode set to external phy
| * 118fd99761 ksmbd: fix race condition between destroy_previous_session() and smb2 operations()
| * c6372cbd91 drm/amdgpu/vcn: not pause dpg for unified queue
| * 44bb8f18a6 drm/amdgpu/vcn: identify unified queue in sw init
| * e0aeb26b04 NFSD: simplify error paths in nfsd_svc()
| * b12caa8f08 selftests/bpf: Add a test to verify previous stacksafe() fix
| * 7cad3174cc bpf: Fix a kernel verifier crash in stacksafe()
| * 19b4397c4a mm/numa: no task_numa_fault() call if PTE is changed
| * c789a78151 mm/numa: no task_numa_fault() call if PMD is changed
| * bb121128fd ALSA: timer: Relax start tick time check for slave timer elements
| * 1f3b52c0fd igc: Fix qbv tx latency by setting gtxoffset
| * ad569ac605 drm/panel: nt36523: Set 120Hz fps for xiaomi,elish panels
| * ab7554fb51 drm/msm/mdss: specify cfg bandwidth for SDM670
| * cc9006d00d hwmon: (ltc2992) Fix memory leak in ltc2992_parse_dt()
| * f0974e6bc3 tcp: do not export tcp_twsk_purge()
| * 3d3a8654a5 platform/x86/intel/ifs: Call release_firmware() when handling errors.
| * 50fe8565a9 Revert "drm/amd/display: Validate hw_points_num before using it"
| * 8e1f64d446 Revert "usb: gadget: uvc: cleanup request when not in correct state"
| * ce2f28a549 selftests: mptcp: join: check re-using ID of closed subflow
| * f845af67e7 selftests: mptcp: join: validate fullmesh endp on 1st sf
| * 9a9afbbc3f mptcp: pm: avoid possible UaF when selecting endp
| * b762e1e301 mptcp: pm: fullmesh: select the right ID later
| * a7a692b791 mptcp: pm: only in-kernel cannot have entries with ID 0
| * 9907af6a28 mptcp: pm: check add_addr_accept_max before accepting new ADD_ADDR
| * d20bf2c96d mptcp: pm: only decrement add_addr_accepted for MPJ req
| * 43cf912b0b mptcp: pm: only mark 'subflow' endp as available
| * 01db518468 mptcp: pm: remove mptcp_pm_remove_subflow()
| * 257d56fbff mptcp: pm: re-using ID of unused flushed subflows
| * 7b64bdbfcf mptcp: pm: re-using ID of unused removed subflows
| * cb24bdcdec mptcp: pm: re-using ID of unused removed ADD_ADDR
| * cc29c5546c nouveau/firmware: use dma non-coherent allocator
| * d25fd6eb26 pmdomain: imx: wait SSAR when i.MX93 power domain on
| * e6d49dd41e pmdomain: imx: scu-pd: Remove duplicated clocks
| * 7bbf8f8b88 mmc: dw_mmc: allow biu and ciu clocks to defer
| * c1b17191ab mmc: mtk-sd: receive cmd8 data when hs400 tuning fail
| * 9d7629bec5 KVM: arm64: Make ICC_*SGI*_EL1 undef in the absence of a vGICv3
| * 2d7cb80bff cxgb4: add forgotten u64 ivlan cast before shift
| * 11343059da Input: i8042 - use new forcenorestore quirk to replace old buggy quirk combination
| * fdda14aba7 Input: i8042 - add forcenorestore quirk to leave controller untouched even on s3
| * 2b5f22ea55 HID: wacom: Defer calculation of resolution until resolution_code is known
| * ac5d3baf14 MIPS: Loongson64: Set timer mode in cpu-probe
| * 242665bd91 net: dsa: microchip: fix PTP config failure when using multiple ports
| * 50553ea7cb drm/amdgpu: Validate TA binary size
| * 09982d418a ksmbd: the buffer of smb2 query dir response has at least 1 byte
| * 76df3a1970 scsi: core: Fix the return value of scsi_logical_block_count()
| * 064dd929c7 Bluetooth: MGMT: Add error handling to pair_device()
| * 0fa4b4aadb smb: client: ignore unhandled reparse tags
| * cac2815f49 mmc: mmc_test: Fix NULL dereference on allocation failure
| * 88c232fd06 drm/msm: fix the highest_bank_bit for sc7180
| * aba7569333 drm/msm/mdss: Handle the reg bus ICC path
| * 707601fcf6 drm/msm/mdss: Rename path references to mdp_path
| * 9611899f42 drm/msm/mdss: switch mdss to use devm_of_icc_get()
| * 8b93b3e158 drm/msm/dpu: take plane rotation into account for wide planes
| * 1af8f3db50 drm/msm/dpu: try multirect based on mdp clock limits
| * a3c5815b07 drm/msm/dpu: cleanup FB if dpu_format_populate_layout fails
| * 6f3aea37aa drm/msm/dp: reset the link phy params before link training
| * 3fb61718bc drm/msm/dpu: move dpu_encoder's connector assignment to atomic_enable()
| * 8e7ef27e4e drm/msm/dpu: capture snapshot on the first commit_done timeout
| * 801f49c83b drm/msm/dpu: split dpu_encoder_wait_for_event into two functions
| * da7243e522 drm/msm/dpu: drop MSM_ENC_VBLANK support
| * e86721b0d0 drm/msm/dpu: use drmm-managed allocation for dpu_encoder_phys
| * 4a2798cc24 drm/msm/dp: fix the max supported bpp logic
| * 69da87fb0d drm/msm/dpu: don't play tricks with debug macros
| * 6be50c8991 net: ovs: fix ovs_drop_reasons error
| * 4bf322e5af net: xilinx: axienet: Fix dangling multicast addresses
| * 14ebcb4a67 net: xilinx: axienet: Always disable promiscuous mode
| * 7a8c74df7c octeontx2-af: Fix CPT AF register offset calculation
| * 0279c35d24 netfilter: flowtable: validate vlan header
| * fa4e6ae385 bnxt_en: Fix double DMA unmapping for XDP_REDIRECT
| * 124b428fe2 ipv6: prevent possible UAF in ip6_xmit()
| * 6ab6bf7313 ipv6: fix possible UAF in ip6_finish_output2()
| * af1dde074e ipv6: prevent UAF in ip6_send_skb()
| * cfa692e7e7 udp: fix receiving fraglist GSO packets
| * 577d6c0619 netem: fix return value if duplicate enqueue fails
| * f7d8c2fabd net: dsa: mv88e6xxx: Fix out-of-bound access
| * 8ea80ff5d8 igb: cope with large MAX_SKB_FRAGS
| * da7d3fddce dpaa2-switch: Fix error checking in dpaa2_switch_seed_bp()
| * c864bf0166 ice: fix truesize operations for PAGE_SIZE >= 8192
| * e9a1df12d5 ice: fix ICE_LAST_OFFSET formula
| * 5898525275 ice: fix page reuse when PAGE_SIZE is over 8k
| * 996ba2f0d7 bonding: fix xfrm state handling when clearing active slave
| * 4582d4ff41 bonding: fix xfrm real_dev null pointer dereference
| * 0707260a18 bonding: fix null pointer deref in bond_ipsec_offload_ok
| * f2b3d38d79 bonding: fix bond_ipsec_offload_ok return type
| * 467ee0d4c5 ip6_tunnel: Fix broken GRO
| * 9a131a5099 netfilter: nft_counter: Synchronize nft_counter_reset() against reader.
| * b1ac83483e netfilter: nft_counter: Disable BH in nft_counter_offload_stats().
| * 00425508f3 kcm: Serialise kcm_sendmsg() for the same socket.
| * ed37ac430c net: mctp: test: Use correct skb for route input check
| * 99580ae890 tcp: prevent concurrent execution of tcp_sk_exit_batch
| * 7348061662 tcp/dccp: do not care about families in inet_twsk_purge()
| * 9624febd69 tcp/dccp: bypass empty buckets in inet_twsk_purge()
| * 0322502538 selftests: udpgro: report error when receive failed
| * 440efd86cd tc-testing: don't access non-existent variable on exception
| * e83b49ecb5 net: mscc: ocelot: serialize access to the injection/extraction groups
| * ff7f554bbd net: mscc: ocelot: fix QoS class for injected packets with "ocelot-8021q"
| * dd17e1e682 net: mscc: ocelot: use ocelot_xmit_get_vlan_info() also for FDMA and register injection
| * 4177f2b0a3 Bluetooth: SMP: Fix assumption of Central always being Initiator
| * 50ce491164 Bluetooth: hci_core: Fix LE quote calculation
| * 9b707444be drm/amdkfd: reserve the BO before validating it
| * 15e3bbd83b ALSA: hda/tas2781: Use correct endian conversion
| * 9dcb933a16 platform/surface: aggregator: Fix warning when controller is destroyed in probe
| * 114858d713 drm/amd/amdgpu: command submission parser for JPEG
| * a50a25dc0c drm/amd/display: fix cursor offset on rotation 180
| * 6490f063d5 drm/amd/display: Enable otg synchronization logic for DCN321
| * d15fc910b6 drm/amd/display: Adjust cursor position
| * f0e8658790 btrfs: send: allow cloning non-aligned extent if it ends at i_size
| * 1bca9776ed btrfs: replace sb::s_blocksize by fs_info::sectorsize
| * de7bad8634 mm/vmalloc: fix page mapping if vm_area_alloc_pages() with high order fallback to order 0
| * 983e6b2636 change alloc_pages name in dma_map_ops to avoid name conflicts
| * f2ce57463d selftests: memfd_secret: don't build memfd_secret test on unsupported arches
| * 7b0e822d65 selftests/mm: log run_vmtests.sh results in TAP format
| * b4426da8c1 tools/testing/selftests/mm/run_vmtests.sh: lower the ptrace permissions
| * b77471c676 mm: fix endless reclaim on machines with unaccepted memory
| * 00b395e95a dm suspend: return -ERESTARTSYS instead of -EINTR
| * 84557cd611 riscv: entry: always initialize regs->a0 to -ENOSYS
| * e84f4400bf i2c: stm32f7: Add atomic_xfer method to driver
| * 2ff51719ec jfs: define xtree root and page independently
| * 34ba4f29f3 gtp: pull network headers in gtp_dev_xmit()
| * 9c375a9566 nvme: fix namespace removal list
| * 6019283e1e EDAC/skx_common: Allow decoding of SGX addresses
| * 81bd4b07a4 ionic: check cmd_regs before copying in or out
| * 1ae3ff27c6 ionic: use pci_is_enabled not open code
| * a855d12979 hrtimer: Prevent queuing of hrtimer without a function callback
| * 3cc03d1dbd drm/amdgpu: fix dereference null return value for the function amdgpu_vm_pt_parent
| * 82f20194bf nvme: use srcu for iterating namespace list
| * 913c30f827 Revert "bpf, sockmap: Prevent lock inversion deadlock in map delete elem"
| * 1b2631dd54 selftests/bpf: Fix a few tests for GCC related warnings.
| * 73c50bd104 nvmet-rdma: fix possible bad dereference when freeing rsps
| * e9c0aa6c3d ext4: set the type of max_zeroout to unsigned int to avoid overflow
| * 4ca547488d irqchip/gic-v3-its: Remove BUG_ON in its_vpe_irq_domain_alloc
| * def4422ff0 usb: dwc3: core: Skip setting event buffers for host only controllers
| * c0076d2c8d platform/x86: lg-laptop: fix %s null argument warning
| * 14bd62d580 clocksource: Make watchdog and suspend-timing multiplication overflow safe
| * 831420f210 irqchip/renesas-rzg2l: Do not set TIEN and TINT source at the same time
| * d8d4da5c68 s390/iucv: fix receive buffer virtual vs physical address confusion
| * 7ad21517c3 openrisc: Call setup_memory() earlier in the init sequence
| * fbc63fb165 NFS: avoid infinite loop in pnfs_update_layout.
| * 4ff710fdf7 nvmet-tcp: do not continue for invalid icreq
| * be285b8dd2 net: hns3: add checking for vf id of mailbox
| * 454ba1740c rtc: nct3018y: fix possible NULL dereference
| * 664ad87c36 firmware: cirrus: cs_dsp: Initialize debugfs_root to invalid
| * 7ff15407c4 Bluetooth: bnep: Fix out-of-bound access
| * 5469f609be nvme: clear caller pointer on identify failure
| * 5419f3001e usb: gadget: fsl: Increase size of name buffer for endpoints
| * 428fb40bd9 f2fs: fix to do sanity check in update_sit_entry
| * b9b019acfb btrfs: delete pointless BUG_ON check on quota root in btrfs_qgroup_account_extent()
| * c7e0e8acc5 btrfs: change BUG_ON to assertion in tree_move_down()
| * 48256173f2 btrfs: send: handle unexpected inode in header process_recorded_refs()
| * 4eb8be942e btrfs: send: handle unexpected data in header buffer in begin_cmd()
| * 46ca3ec9b8 btrfs: handle invalid root reference found in may_destroy_subvol()
| * 6be930556d btrfs: push errors up from add_async_extent()
| * 981a749cef btrfs: tests: allocate dummy fs_info and root in test_find_delalloc()
| * f379c3f49e btrfs: change BUG_ON to assertion when checking for delayed_node root
| * 315471004b btrfs: defrag: change BUG_ON to assertion in btrfs_defrag_leaves()
| * a7fec145b0 btrfs: delayed-inode: drop pointless BUG_ON in __btrfs_remove_delayed_item()
| * 97db7b5987 powerpc/boot: Only free if realloc() succeeds
| * 9c96b5b056 powerpc/boot: Handle allocation failure in simple_realloc()
| * 881613a975 f2fs: stop checkpoint when get a out-of-bounds segment
| * 23494bccd2 rxrpc: Don't pick values out of the wire header when setting up security
| * cbdac8249f parisc: Use irq_enter_rcu() to fix warning at kernel/context_tracking.c:367
| * 0f7acd8cef memory: stm32-fmc2-ebi: check regmap_read return value
| * 1a45994fb2 x86: Increase brk randomness entropy for 64-bit systems
| * 60563755c9 md: clean up invalid BUG_ON in md_ioctl
| * 9cf3b89b4f netlink: hold nlk->cb_mutex longer in __netlink_dump_start()
| * 0e07c0c84a tick: Move got_idle_tick away from common flags
| * 6d4fbad0ef clocksource/drivers/arm_global_timer: Guard against division by zero
| * 533893c2e0 accel/habanalabs: fix debugfs files permissions
| * 0b9f748da2 virtiofs: forbid newlines in tags
| * a48d12797e hrtimer: Select housekeeping CPU during migration
| * 53b2f35399 gpio: sysfs: extend the critical section for unregistering sysfs devices
| * 932490268c drm/lima: set gp bus_stop bit before hard reset
| * aeecb08b64 net/sun3_82586: Avoid reading past buffer in debug output
| * 6d3ff0437e wifi: iwlwifi: mvm: avoid garbage iPN
| * 2f50c1ea7f media: drivers/media/dvb-core: copy user arrays safely
| * 12b5b95908 scsi: lpfc: Initialize status local variable in lpfc_sli4_repost_sgl_list()
| * 342352caf0 fs: binfmt_elf_efpic: don't use missing interpreter's properties
| * b1397fb4a7 media: pci: cx23885: check cx23885_vdev_init() return
| * 1fb112cefa kernfs: fix false-positive WARN(nr_mmapped) in kernfs_drain_open_files
| * d5a9588cc3 riscv: blacklist assembly symbols for kprobe
| * 18b9264a13 quota: Remove BUG_ON from dqget()
| * 30bbdff9e1 wifi: ath12k: Add missing qmi_txn_cancel() calls
| * 535e9bd0e8 fuse: fix UAF in rcu pathwalks
| * e968edf6ec afs: fix __afs_break_callback() / afs_drop_open_mmap() race
| * 56a1bf2b83 btrfs: zlib: fix and simplify the inline extent decompression
| * 0a56dcce6b ext4: do not trim the group with corrupted block bitmap
| * 5b7766ab91 nvmet-trace: avoid dereferencing pointer too early
| * 9b139b16b5 EDAC/skx_common: Filter out the invalid address
| * 73567149be gfs2: Refcounting fix in gfs2_thaw_super
| * 3cde81f8ad Bluetooth: hci_conn: Check non NULL function before calling for HFP offload
| * 7a3e7f1ed6 evm: don't copy up 'security.evm' xattr
| * e9c902dd36 drm/rockchip: vop2: clear afbc en and transform bit for cluster window at linear mode
| * 9324cbc465 ionic: no fw read when PCI reset failed
| * b41d5ce6b2 ionic: prevent pci disable of already disabled device
| * fb768e0571 powerpc/pseries/papr-sysparm: Validate buffer object lengths
| * cbd7ab7d7e hwmon: (pc87360) Bounds check data->innr usage
| * 61db7910e8 ASoC: SOF: ipc4: check return value of snd_sof_ipc_msg_data
| * 9badede607 powerpc/xics: Check return value of kasprintf in icp_native_map_one_cpu
| * dff1afeaea memory: tegra: Skip SID programming if SID registers aren't set
| * 9064a70eee drm/msm: Reduce fallout of fence signaling vs reclaim hangs
| * edb39f621b block: Fix lockdep warning in blk_mq_mark_tag_wait
| * c58f777045 arm64: Fix KASAN random tag seed initialization
| * 8555383730 powerpc/topology: Check if a core is online
| * f17c3a37b2 cpu/SMT: Enable SMT only if a core is online
| * 881eb2fae1 rust: fix the default format for CONFIG_{RUSTC,BINDGEN}_VERSION_TEXT
| * b4dad0cab6 rust: suppress error messages from CONFIG_{RUSTC,BINDGEN}_VERSION_TEXT
| * 34e1335905 rust: work around `bindgen` 0.69.0 issue
| * 7d805d9255 hwmon: (ltc2992) Avoid division by zero
| * 67288cbb7a IB/hfi1: Fix potential deadlock on &irq_src_lock and &dd->uctxt_lock
| * c357e4cf58 clk: visconti: Add bounds-checking coverage for struct visconti_pll_provider
| * 3081947105 wifi: iwlwifi: check for kmemdup() return value in iwl_parse_tlv_firmware()
| * 0a1a3c11fc wifi: iwlwifi: fw: Fix debugfs command sending
| * 1cbdaf141a wifi: iwlwifi: abort scan when rfkill on but device enabled
| * 355aec2ed8 gfs2: setattr_chown: Add missing initialization
| * 1a327862eb wifi: mac80211: flush STA queues on unauthorization
| * e9f6100503 scsi: spi: Fix sshdr use
| * 3b2f36068c ASoC: SOF: Intel: hda-dsp: Make sure that no irq handler is pending before suspend
| * 66ddb97699 iommu/arm-smmu-qcom: Add SDM670 MDSS compatible
| * 9649d26dc9 media: qcom: venus: fix incorrect return value
| * abbba0768c drm/tegra: Zero-initialize iosys_map
| * 5ee7df8143 binfmt_misc: cleanup on filesystem umount
| * ebf6f517d3 md/raid5-cache: use READ_ONCE/WRITE_ONCE for 'conf->log'
| * 03be3489b1 accel/habanalabs: fix bug in timestamp interrupt handling
| * db5ba2c1ed accel/habanalabs: export dma-buf only if size/offset multiples of PAGE_SIZE
| * fa8cb3102f accel/habanalabs/gaudi2: unsecure tpc count registers
| * e6571cff84 media: s5p-mfc: Fix potential deadlock on condlock
| * 14dde93c31 platform/x86/intel/ifs: Validate image size
| * 8464e99e38 staging: ks7010: disable bh on tx_dev_lock
| * a8b30d53a1 drm/amd/display: Validate hw_points_num before using it
| * 1b60d354a4 usb: gadget: uvc: cleanup request when not in correct state
| * d2defcddfe wifi: mt76: fix race condition related to checking tx queue fill status
| * b87691d733 staging: iio: resolver: ad2s1210: fix use before initialization
| * 84d6173880 wifi: ath11k: fix ath11k_mac_op_remain_on_channel() stack usage
| * 55e9057c8a media: radio-isa: use dev_name to fill in bus_info
| * 14fafdfdad drm/amdkfd: Move dma unmapping after TLB flush
| * 7620f9c3bd i3c: mipi-i3c-hci: Do not unmap region not mapped for transfer
| * 6c85c7c9a8 i3c: mipi-i3c-hci: Remove BUG() when Ring Abort request times out
| * 98ed3f40f2 wifi: ath12k: fix WARN_ON during ath12k_mac_update_vif_chan
| * 10b1f85254 drm/bridge: tc358768: Attempt to fix DSI horizontal timings
| * 8653d7bddf s390/smp,mcck: fix early IPI handling
| * 0b9c00d9fa RDMA/rtrs: Fix the problem of variable not initialized fully
| * 75a92689e3 i2c: riic: avoid potential division by zero
| * a2225b7af5 cgroup: Avoid extra dereference in css_populate_dir()
| * a4dc742456 wifi: cw1200: Avoid processing an invalid TIM IE
| * 97f3817039 sched/topology: Handle NUMA_NO_NODE in sched_numa_find_nth_cpu()
| * 7ede6ef04c net: ethernet: mtk_wed: check update_wo_rx_stats in mtk_wed_update_rx_stats()
| * 81ba4dd37a rcu: Eliminate rcu_gp_slow_unregister() false positive
| * e160de344f rcu: Dump memory object info if callback function is invalid
| * 4a2f094601 mm: Remove kmem_valid_obj()
| * ee6669b463 wifi: iwlwifi: mvm: fix recovery flow in CSA
| * 86f22e7cce wifi: mac80211: fix BA session teardown race
| * 268f84a827 wifi: cfg80211: check wiphy mutex is held for wdev mutex
| * f34056c305 wifi: mac80211: lock wiphy in IP address notifier
| * 5a002f41eb ASoC: cs35l45: Checks index of cs35l45_irqs[]
| * 8574cdabb5 ssb: Fix division by zero issue in ssb_calc_clock_rate
| * ec71cc24b0 drm/amdgpu: access RLC_SPM_MC_CNTL through MMIO in SRIOV runtime
| * 695f692bcd drm/amd/amdgpu/imu_v11_0: Increase buffer size to ensure all possible values can be stored
| * 51e4630ef0 drm/amd/pm: fix error flow in sensor fetching
| * c3254bc09f ALSA: hda/realtek: Fix noise from speakers on Lenovo IdeaPad 3 15IAU7
| * a8544dec14 gpio: mlxbf3: Support shutdown() function
| * fa1d4de726 net: hns3: fix a deadlock problem when config TC during resetting
| * 1d2f4a7316 net: hns3: use the user's cfg after reset
| * a6c0178c81 net: hns3: fix wrong use of semaphore up
| * d4b8c4bb77 selftests: net: lib: kill PIDs before del netns
| * fc9cae2199 selftests: net: lib: ignore possible errors
| * 921f1acf0c vsock: fix recursive ->recvmsg calls
| * f123293db1 netfilter: nf_tables: Add locking for NFT_MSG_GETOBJ_RESET requests
| * 4340de3e65 netfilter: nf_tables: Introduce nf_tables_getobj_single
| * cf4ebf7797 netfilter: nf_tables: Carry reset boolean in nft_obj_dump_ctx
| * f3b7dc8b6c netfilter: nf_tables: nft_obj_filter fits into cb->ctx
| * 959c9bf85f netfilter: nf_tables: Carry s_idx in nft_obj_dump_ctx
| * 2c6a79b94e netfilter: nf_tables: A better name for nft_obj_filter
| * ba9b99d909 netfilter: nf_tables: Unconditionally allocate nft_obj_filter
| * 83d37714c1 netfilter: nf_tables: Drop pointless memset in nf_tables_dump_obj
| * bb6231e533 netfilter: nf_tables: Audit log dump reset after the fact
| * 74e6eb7fd2 netfilter: nf_queue: drop packets with cloned unconfirmed conntracks
| * c7b760499f netfilter: flowtable: initialise extack before use
| * 7b825f91a0 netfilter: allow ipv6 fragments to arrive on different devices
| * 227355ad4e tcp: Update window clamping condition
| * ff8292bb10 mptcp: correct MPTCP_SUBFLOW_ATTR_SSN_OFFSET reserved size
| * c920ab13d7 mlxbf_gige: disable RX filters until RX path initialized
| * 326a89321f net: ethernet: mtk_wed: fix use-after-free panic in mtk_wed_setup_tc_block_cb()
| * b3917d8a4e net: dsa: vsc73xx: check busy flag in MDIO operations
| * cec515531e net: dsa: vsc73xx: use read_poll_timeout instead delay loop
| * e077f51928 net: dsa: vsc73xx: pass value in phy_write operation
| * 948ee178f4 net: axienet: Fix register defines comment description
| * ef23c18ab8 atm: idt77252: prevent use after free in dequeue_rx()
| * 6c88d53bcf net/mlx5e: Correctly report errors for ethtool rx flows
| * b3b9a87ade net/mlx5e: Take state lock during tx timeout reporter
| * 920dff7f43 igc: Fix reset adapter logics when tx mode change
| * 01ad5058b8 igc: Fix qbv_config_change_errors logics
| * 3fa593db45 igc: Fix packet still tx after gate close by reducing i226 MAC retry buffer
| * 944f2d4db9 bpf: Fix updating attached freplace prog in prog_array map
| * 730f7a5e44 s390/uv: Panic for set and remove shared access UVC errors
| * 0971767873 drm/amdgpu/jpeg4: properly set atomics vmid field
| * 0e93fa4027 drm/amdgpu/jpeg2: properly set atomics vmid field
| * f1aa7c509a memcg_write_event_control(): fix a user-triggerable oops
| * 2685a2b9e5 drm/amdgpu: Actually check flags for all context ops.
| * d3ba98ce5c btrfs: tree-checker: add dev extent item checks
| * 9baca56eb0 btrfs: zoned: properly take lock to read/update block group's zoned variables
| * 251508b933 btrfs: tree-checker: reject BTRFS_FT_UNKNOWN dir type
| * f7668d0339 mm/memory-failure: use raw_spinlock_t in struct memory_failure_cpu
| * 5295951b53 selinux: add the processing of the failure of avc_add_xperms_decision()
| * 01a6b34b60 selinux: fix potential counting error in avc_add_xperms_decision()
| * dfaa39b05a fs/netfs/fscache_cookie: add missing "n_accesses" check
| * 4291f94f8c wifi: brcmfmac: cfg80211: Handle SSID based pmksa deletion
| * 221cf83217 net: mana: Fix doorbell out of order violation and avoid unnecessary doorbell rings
| * 65f20b174e net: mana: Fix RX buf alloc_size alignment and atomic op panic
| * 753f174514 rtla/osnoise: Prevent NULL dereference in error handling
| * 67d1d8cc59 i2c: qcom-geni: Add missing geni_icc_disable in geni_i2c_runtime_resume
| * dd72ae8b0f fix bitmap corruption on close_range() with CLOSE_RANGE_UNSHARE
| * 97a532c3ac bitmap: introduce generic optimized bitmap_size()
| * ef725854f8 btrfs: rename bitmap_set_bits() -> btrfs_bitmap_set_bits()
| * 9805a88117 s390/cio: rename bitmap_size() -> idset_bitmap_size()
| * 706cc80260 fs/ntfs3: add prefix to bitmap_size() and use BITS_TO_U64()
| * b9bda5f601 vfs: Don't evict inode under the inode lru traversing context
| * a6bba25f15 dm persistent data: fix memory allocation failure
| * 00df2f4868 dm resume: don't return EINVAL when signalled
| * 0237b5517c arm64: ACPI: NUMA: initialize all values of acpi_early_node_map to NUMA_NO_NODE
| * d2c7680250 ACPI: EC: Evaluate _REG outside the EC scope more carefully
| * 03fd525dfe ACPICA: Add a depth argument to acpi_execute_reg_methods()
| * 6861faf423 i2c: tegra: Do not mark ACPI devices as irq safe
| * 36ebafda35 riscv: change XIP's kernel_map.size to be size of the entire kernel
| * 051c0a5581 KVM: s390: fix validity interception issue when gisa is switched off
| * 93a7e28569 s390/dasd: fix error recovery leading to data corruption on ESE devices
| * 31ba13202c ALSA: hda/tas2781: fix wrong calibrated data order
| * 18b3ad2a3c thunderbolt: Mark XDomain as unplugged when router is removed
| * 6b99de301d xhci: Fix Panther point NULL pointer deref at full-speed re-enumeration
| * 9dd9078485 ALSA: usb-audio: Support Yamaha P-125 quirk entry
| * 82d06b8163 ALSA: usb-audio: Add delay quirk for VIVO USB-C-XE710 HEADSET
| * 5cff754692 char: xillybus: Check USB endpoints when probing device
| * 435fc9cae2 char: xillybus: Refine workqueue handling
| * a7ad105b12 char: xillybus: Don't destroy workqueue from work item running on it
| * ac42e0f0eb fuse: Initialize beyond-EOF page contents before setting uptodate
| * 61eb7aae8b selinux: revert our use of vma_is_initial_heap()
| * c2a3b181f0 Revert "usb: typec: tcpm: clear pd_event queue in PORT_RESET"
| * ea13bd807f Revert "misc: fastrpc: Restrict untrusted app to attach to privileged PD"
| * 7adc8a3d5d Revert "ACPI: EC: Evaluate orphan _REG under EC device"
| * 63e80efa63 tty: atmel_serial: use the correct RTS flag.
| * 8eb92cfca6 tty: serial: fsl_lpuart: mark last busy before uart_add_one_port
* | 0475cf04e0 Revert "cgroup: Make operations on the cgroup root_list RCU safe"
* | 4cf149762d Revert "cgroup: Move rcu_head up near the top of cgroup_root"
* | 01ed5e66a4 Revert "fs: Convert to bdev_open_by_dev()"
* | a8167db0c5 Revert "jfs: Convert to bdev_open_by_dev()"
* | a1f8d07b9e Revert "jfs: fix log->bdev_handle null ptr deref in lbmStartIO"
* | d09a1b6c70 Merge 6.6.47 into android15-6.6-lts
|\|
| * 4c1a2d4cd9 Linux 6.6.47
| * 88042e4153 Revert "ata: libata-scsi: Honor the D_SENSE bit for CK_COND=1 and no error"
| * fb6f56244a media: Revert "media: dvb-usb: Fix unexpected infinite loop in dvb_usb_read_remote_control()"
| * 60d90e1582 KVM: arm64: Don't pass a TLBI level hint when zapping table entries
| * 2c770086e0 KVM: arm64: Don't defer TLB invalidation when zapping table entries
| * f3c60ab676 cgroup: Move rcu_head up near the top of cgroup_root
| * 6419341b6b mm/debug_vm_pgtable: drop RANDOM_ORVALUE trick
| * e2bf9ba1d3 Revert "Input: bcm5974 - check endpoint type before starting traffic"
| * 6fb93eeb25 Revert "jfs: fix shift-out-of-bounds in dbJoin"
| * 49df34d2b7 binfmt_flat: Fix corruption when not offsetting data start
| * f95d175a9e ALSA: usb: Fix UBSAN warning in parse_audio_unit()
| * 5472b587cf fs/ntfs3: Do copy_to_user out of run_lock
| * f650148b43 jfs: Fix shift-out-of-bounds in dbDiscardAG
| * 6ea10dbb1e jfs: fix null ptr deref in dtInsertEntry
| * 3db4395332 fou: remove warn in gue_gro_receive on unsupported protocol
| * 263df78166 f2fs: fix to cover read extent cache access with lock
| * ae00e6536a f2fs: fix to do sanity check on F2FS_INLINE_DATA flag in inode during GC
| * bd104cbb9d bpf, net: Use DEV_STAT_INC()
| * 3f6bbe6e07 ext4: sanity check for NULL pointer after ext4_force_shutdown
| * 3a2c70baf6 ext4: convert ext4_da_do_write_end() to take a folio
| * 8a3ac7fb36 wifi: cfg80211: restrict NL80211_ATTR_TXQ_QUANTUM values
| * 5b485efcb6 mm/page_table_check: support userfault wr-protect entries
| * 737fb7853a ext4: do not create EA inode under buffer lock
| * f2a77188a3 ext4: fold quota accounting into ext4_xattr_inode_lookup_create()
| * 4ea65e2095 Bluetooth: RFCOMM: Fix not validating setsockopt user input
| * 0f10613320 nfc: llcp: fix nfc_llcp_setsockopt() unsafe copies
| * ae7f73e64e net: add copy_safe_from_sockptr() helper
| * bae45e9b78 mISDN: fix MISDN_TIME_STAMP handling
| * 107449cfb2 fs: Annotate struct file_handle with __counted_by() and use struct_size()
| * 63f13eb5d6 bpf: Avoid kfree_rcu() under lock in bpf_lpm_trie.
| * ef33f02968 bpf: Replace bpf_lpm_trie_key 0-length array with flexible array
| * 5fbbd952e7 pppoe: Fix memory leak in pppoe_sendmsg()
| * 84c176fbec net: sctp: fix skb leak in sctp_inq_free()
| * 6cee13d8d4 net:rds: Fix possible deadlock in rds_message_put
| * a2f2e5a4c9 quota: Detect loops in quota tree
| * 0252e359af Input: bcm5974 - check endpoint type before starting traffic
| * 2cea502f58 net: tls, add test to capture error on large splice
| * 2fdcf3c4ad erofs: avoid debugging output for (de)compressed data
| * ca9b877a2e reiserfs: fix uninit-value in comp_keys
| * 984ed0567f Squashfs: fix variable overflow triggered by sysbot
| * 2dbaa75748 squashfs: squashfs_read_data need to check if the length is 0
| * 0b24b79410 jfs: fix shift-out-of-bounds in dbJoin
| * 3999d26986 net: don't dump stack on queue timeout
| * e4a4435787 jfs: fix log->bdev_handle null ptr deref in lbmStartIO
| * 9641706cbb jfs: Convert to bdev_open_by_dev()
| * 4365d0d660 fs: Convert to bdev_open_by_dev()
| * e58695f6c5 wifi: mac80211: fix change_address deadlock during unregister
| * be31c9be87 wifi: mac80211: take wiphy lock for MAC addr change
| * 77100f2e84 tcp_metrics: optimize tcp_metrics_flush_all()
| * dd9542ae7c cgroup: Make operations on the cgroup root_list RCU safe
| * bcd5148043 genirq/cpuhotplug: Retry with cpu_online_mask when migration fails
| * 20dbad7525 genirq/cpuhotplug: Skip suspended interrupts when restoring affinity
| * a47b54846a nvme/pci: Add APST quirk for Lenovo N60z laptop
| * 26273f5f4c mm: gup: stop abusing try_grab_folio
| * 9eae190014 nfsd: make svc_stat per-network namespace instead of global
| * 9ae63aab0d nfsd: remove nfsd_stats, make th_cnt a global counter
| * b670a59817 nfsd: make all of the nfsd stats per-network namespace
| * 6f8d6ed342 nfsd: expose /proc/net/sunrpc/nfsd in net namespaces
| * 5b3a1ecf07 nfsd: rename NFSD_NET_* to NFSD_STATS_*
| * 2e8076df20 sunrpc: use the struct net as the svc proc private
| * 791be93cf1 sunrpc: remove ->pg_stats from svc_program
| * 465bb0f1f4 sunrpc: pass in the sv_stats struct through svc_create_pooled
| * 032ed4c630 nfsd: stop setting ->pg_stats for unused stats
| * 1257fe22e1 sunrpc: don't change ->sv_stats if it doesn't exist
| * 9b31d561f4 NFSD: Fix frame size warning in svc_export_parse()
| * 5bc2b8f225 NFSD: Rewrite synopsis of nfsd_percpu_counters_init()
| * 9d91b004df LoongArch: Define __ARCH_WANT_NEW_STAT in unistd.h
| * 7c3e55d8b4 ASoC: topology: Fix route memory corruption
| * bd865c7690 ASoC: topology: Clean up route loading
| * d2a2a4714d exec: Fix ToCToU between perm check and set-uid/gid usage
* d247f814b1 Merge branch 'android15-6.6' into android15-6.6-lts
* 1bd56c7d11 Merge branch 'android15-6.6' into android15-6.6-lts

Change-Id: I004b670039412931c68a39f6ebd72ce8207d730c
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-09-12 14:39:03 +00:00 committed by Treehugger Robot
commit 4cdfe8c8c4
645 changed files with 7341 additions and 3579 deletions

View File

@ -565,7 +565,8 @@ Description: Control Symmetric Multi Threading (SMT)
================ =========================================
If control status is "forceoff" or "notsupported" writes
are rejected.
are rejected. Note that enabling SMT on PowerPC skips
offline cores.
What: /sys/devices/system/cpu/cpuX/power/energy_perf_bias
Date: March 2019

View File

@ -17,7 +17,7 @@ significant byte.
LPM tries may be created with a maximum prefix length that is a multiple
of 8, in the range from 8 to 2048. The key used for lookup and update
operations is a ``struct bpf_lpm_trie_key``, extended by
operations is a ``struct bpf_lpm_trie_key_u8``, extended by
``max_prefixlen/8`` bytes.
- For IPv4 addresses the data length is 4 bytes

View File

@ -14,7 +14,7 @@ Page table check performs extra verifications at the time when new pages become
accessible from the userspace by getting their page table entries (PTEs PMDs
etc.) added into the table.
In case of detected corruption, the kernel is crashed. There is a small
In case of most detected corruption, the kernel is crashed. There is a small
performance and memory overhead associated with the page table check. Therefore,
it is disabled by default, but can be optionally enabled on systems where the
extra hardening outweighs the performance costs. Also, because page table check
@ -22,6 +22,13 @@ is synchronous, it can help with debugging double map memory corruption issues,
by crashing kernel at the time wrong mapping occurs instead of later which is
often the case with memory corruptions bugs.
It can also be used to do page table entry checks over various flags, dump
warnings when illegal combinations of entry flags are detected. Currently,
userfaultfd is the only user of such to sanity check wr-protect bit against
any writable flags. Illegal flag combinations will not directly cause data
corruption in this case immediately, but that will cause read-only data to
be writable, leading to corrupt when the page content is later modified.
Double mapping detection logic
==============================

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 46
SUBLEVEL = 50
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

View File

@ -381900,6 +381900,15 @@ elf_symbol {
type_id: 0x1784717f
full_name: "drm_rect_rotate"
}
elf_symbol {
id: 0xf836eb53
name: "drm_rect_rotate_inv"
is_defined: true
symbol_type: FUNCTION
crc: 0x6e30ba8e
type_id: 0x1784717f
full_name: "drm_rect_rotate_inv"
}
elf_symbol {
id: 0xa9f64e27
name: "drm_release"
@ -428556,6 +428565,7 @@ interface {
symbol_id: 0x7f211ebf
symbol_id: 0x62aa5e8d
symbol_id: 0xeeb4f73d
symbol_id: 0xf836eb53
symbol_id: 0xa9f64e27
symbol_id: 0x4ba62142
symbol_id: 0x39b330a8

View File

@ -258,6 +258,9 @@
drm_mode_vrefresh
drm_of_find_panel_or_bridge
drm_printf
drm_rect_rotate
drm_rect_rotate_inv
drmm_kmalloc
enable_irq
eth_type_trans
_find_first_bit
@ -1968,6 +1971,7 @@
device_find_child
# required by smem.ko
hwspin_lock_bust
hwspin_lock_free
hwspin_lock_request_specific
__hwspin_lock_timeout

View File

@ -274,24 +274,24 @@
led@0 {
chan-name = "R";
led-cur = /bits/ 8 <0x20>;
max-cur = /bits/ 8 <0x60>;
led-cur = /bits/ 8 <0x6e>;
max-cur = /bits/ 8 <0xc8>;
reg = <0>;
color = <LED_COLOR_ID_RED>;
};
led@1 {
chan-name = "G";
led-cur = /bits/ 8 <0x20>;
max-cur = /bits/ 8 <0x60>;
led-cur = /bits/ 8 <0xbe>;
max-cur = /bits/ 8 <0xc8>;
reg = <1>;
color = <LED_COLOR_ID_GREEN>;
};
led@2 {
chan-name = "B";
led-cur = /bits/ 8 <0x20>;
max-cur = /bits/ 8 <0x60>;
led-cur = /bits/ 8 <0xbe>;
max-cur = /bits/ 8 <0xc8>;
reg = <2>;
color = <LED_COLOR_ID_BLUE>;
};

View File

@ -781,7 +781,7 @@
mount-matrix = "-1", "0", "0",
"0", "1", "0",
"0", "0", "1";
"0", "0", "-1";
};
cam1: camera@3e {

View File

@ -163,13 +163,12 @@
simple-audio-card,cpu {
sound-dai = <&sai3>;
frame-master;
bitclock-master;
};
simple-audio-card,codec {
sound-dai = <&wm8962>;
clocks = <&clk IMX8MP_CLK_IPP_DO_CLKO1>;
frame-master;
bitclock-master;
};
};
};
@ -381,10 +380,9 @@
&sai3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sai3>;
assigned-clocks = <&clk IMX8MP_CLK_SAI3>,
<&clk IMX8MP_AUDIO_PLL2> ;
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
assigned-clock-rates = <12288000>, <361267200>;
assigned-clocks = <&clk IMX8MP_CLK_SAI3>;
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL1_OUT>;
assigned-clock-rates = <12288000>;
fsl,sai-mclk-direction-output;
status = "okay";
};

View File

@ -437,7 +437,7 @@
pinctrl-0 = <&pinctrl_usdhc2_hs>, <&pinctrl_usdhc2_gpio>;
pinctrl-1 = <&pinctrl_usdhc2_uhs>, <&pinctrl_usdhc2_gpio>;
pinctrl-2 = <&pinctrl_usdhc2_uhs>, <&pinctrl_usdhc2_gpio>;
cd-gpios = <&gpio3 00 GPIO_ACTIVE_LOW>;
cd-gpios = <&gpio3 0 GPIO_ACTIVE_LOW>;
vmmc-supply = <&reg_usdhc2_vmmc>;
bus-width = <4>;
no-sdio;

View File

@ -19,7 +19,7 @@
linux,cma {
compatible = "shared-dma-pool";
reusable;
alloc-ranges = <0 0x60000000 0 0x40000000>;
alloc-ranges = <0 0x80000000 0 0x40000000>;
size = <0 0x10000000>;
linux,cma-default;
};

View File

@ -786,6 +786,8 @@
fsl,num-tx-queues = <3>;
fsl,num-rx-queues = <3>;
fsl,stop-mode = <&wakeupmix_gpr 0x0c 1>;
nvmem-cells = <&eth_mac1>;
nvmem-cell-names = "mac-address";
status = "disabled";
};
@ -807,7 +809,9 @@
<&clk IMX93_CLK_SYS_PLL_PFD0_DIV2>;
assigned-clock-rates = <100000000>, <250000000>;
intf_mode = <&wakeupmix_gpr 0x28>;
snps,clk-csr = <0>;
snps,clk-csr = <6>;
nvmem-cells = <&eth_mac2>;
nvmem-cell-names = "mac-address";
status = "disabled";
};
@ -888,6 +892,15 @@
reg = <0x47510000 0x10000>;
#address-cells = <1>;
#size-cells = <1>;
eth_mac1: mac-address@4ec {
reg = <0x4ec 0x6>;
};
eth_mac2: mac-address@4f2 {
reg = <0x4f2 0x6>;
};
};
s4muap: mailbox@47520000 {

View File

@ -27,7 +27,7 @@
#include <asm/numa.h>
static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE };
static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE };
int __init acpi_numa_get_nid(unsigned int cpu)
{

View File

@ -374,9 +374,6 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
smp_init_cpus();
smp_build_mpidr_hash();
/* Init percpu seeds for random tags after cpus are set up. */
kasan_init_sw_tags();
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
* Make sure init_thread_info.ttbr0 always generates translation

View File

@ -464,6 +464,8 @@ void __init smp_prepare_boot_cpu(void)
init_gic_priority_masking();
kasan_init_hw_tags();
/* Init percpu seeds for random tags after cpus are set up. */
kasan_init_sw_tags();
}
/*

View File

@ -32,6 +32,7 @@
#include <trace/events/kvm.h>
#include "sys_regs.h"
#include "vgic/vgic.h"
#include "trace.h"
@ -281,6 +282,11 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
{
bool g1;
if (!kvm_has_gicv3(vcpu->kvm)) {
kvm_inject_undefined(vcpu);
return false;
}
if (!p->is_write)
return read_from_write_only(vcpu, p, r);

View File

@ -341,4 +341,11 @@ void vgic_v4_configure_vsgis(struct kvm *kvm);
void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
static inline bool kvm_has_gicv3(struct kvm *kvm)
{
return (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) &&
irqchip_in_kernel(kvm) &&
kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3);
}
#endif

View File

@ -1,11 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _LOONGARCH_DMA_DIRECT_H
#define _LOONGARCH_DMA_DIRECT_H
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
#endif /* _LOONGARCH_DMA_DIRECT_H */

View File

@ -1,4 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_CLONE3

View File

@ -1725,12 +1725,16 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER,
LOONGSON_CONF6_INTIMER);
break;
case PRID_IMP_LOONGSON_64G:
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R2);
decode_cpucfg(c);
change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER,
LOONGSON_CONF6_INTIMER);
break;
default:
panic("Unknown Loongson Processor ID!");

View File

@ -255,6 +255,9 @@ void calibrate_delay(void)
void __init setup_arch(char **cmdline_p)
{
/* setup memblock allocator */
setup_memory();
unflatten_and_copy_device_tree();
setup_cpuinfo();
@ -278,9 +281,6 @@ void __init setup_arch(char **cmdline_p)
}
#endif
/* setup memblock allocator */
setup_memory();
/* paging_init() sets up the MMU and marks all pages as reserved */
paging_init();

View File

@ -498,7 +498,7 @@ asmlinkage void do_cpu_irq_mask(struct pt_regs *regs)
old_regs = set_irq_regs(regs);
local_irq_disable();
irq_enter();
irq_enter_rcu();
eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
if (!eirr_val)
@ -533,7 +533,7 @@ asmlinkage void do_cpu_irq_mask(struct pt_regs *regs)
#endif /* CONFIG_IRQSTACKS */
out:
irq_exit();
irq_exit_rcu();
set_irq_regs(old_regs);
return;

View File

@ -112,8 +112,11 @@ static void *simple_realloc(void *ptr, unsigned long size)
return ptr;
new = simple_malloc(size);
memcpy(new, ptr, p->size);
simple_free(ptr);
if (new) {
memcpy(new, ptr, p->size);
simple_free(ptr);
}
return new;
}

View File

@ -145,6 +145,7 @@ static inline int cpu_to_coregroup_id(int cpu)
#ifdef CONFIG_HOTPLUG_SMT
#include <linux/cpu_smt.h>
#include <linux/cpumask.h>
#include <asm/cputhreads.h>
static inline bool topology_is_primary_thread(unsigned int cpu)
@ -156,6 +157,18 @@ static inline bool topology_smt_thread_allowed(unsigned int cpu)
{
return cpu_thread_in_core(cpu) < cpu_smt_num_threads;
}
#define topology_is_core_online topology_is_core_online
static inline bool topology_is_core_online(unsigned int cpu)
{
int i, first_cpu = cpu_first_thread_sibling(cpu);
for (i = first_cpu; i < first_cpu + threads_per_core; ++i) {
if (cpu_online(i))
return true;
}
return false;
}
#endif
#endif /* __KERNEL__ */

View File

@ -23,6 +23,46 @@ void papr_sysparm_buf_free(struct papr_sysparm_buf *buf)
kfree(buf);
}
static size_t papr_sysparm_buf_get_length(const struct papr_sysparm_buf *buf)
{
return be16_to_cpu(buf->len);
}
static void papr_sysparm_buf_set_length(struct papr_sysparm_buf *buf, size_t length)
{
WARN_ONCE(length > sizeof(buf->val),
"bogus length %zu, clamping to safe value", length);
length = min(sizeof(buf->val), length);
buf->len = cpu_to_be16(length);
}
/*
* For use on buffers returned from ibm,get-system-parameter before
* returning them to callers. Ensures the encoded length of valid data
* cannot overrun buf->val[].
*/
static void papr_sysparm_buf_clamp_length(struct papr_sysparm_buf *buf)
{
papr_sysparm_buf_set_length(buf, papr_sysparm_buf_get_length(buf));
}
/*
* Perform some basic diligence on the system parameter buffer before
* submitting it to RTAS.
*/
static bool papr_sysparm_buf_can_submit(const struct papr_sysparm_buf *buf)
{
/*
* Firmware ought to reject buffer lengths that exceed the
* maximum specified in PAPR, but there's no reason for the
* kernel to allow them either.
*/
if (papr_sysparm_buf_get_length(buf) > sizeof(buf->val))
return false;
return true;
}
/**
* papr_sysparm_get() - Retrieve the value of a PAPR system parameter.
* @param: PAPR system parameter token as described in
@ -63,6 +103,9 @@ int papr_sysparm_get(papr_sysparm_t param, struct papr_sysparm_buf *buf)
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
if (!papr_sysparm_buf_can_submit(buf))
return -EINVAL;
work_area = rtas_work_area_alloc(sizeof(*buf));
memcpy(rtas_work_area_raw_buf(work_area), buf, sizeof(*buf));
@ -77,6 +120,7 @@ int papr_sysparm_get(papr_sysparm_t param, struct papr_sysparm_buf *buf)
case 0:
ret = 0;
memcpy(buf, rtas_work_area_raw_buf(work_area), sizeof(*buf));
papr_sysparm_buf_clamp_length(buf);
break;
case -3: /* parameter not implemented */
ret = -EOPNOTSUPP;
@ -115,6 +159,9 @@ int papr_sysparm_set(papr_sysparm_t param, const struct papr_sysparm_buf *buf)
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
if (!papr_sysparm_buf_can_submit(buf))
return -EINVAL;
work_area = rtas_work_area_alloc(sizeof(*buf));
memcpy(rtas_work_area_raw_buf(work_area), buf, sizeof(*buf));

View File

@ -236,6 +236,8 @@ static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr,
rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation",
cpu, hw_id);
if (!rname)
return -ENOMEM;
if (!request_mem_region(addr, size, rname)) {
pr_warn("icp_native: Could not reserve ICP MMIO for CPU %d, interrupt server #0x%x\n",
cpu, hw_id);

View File

@ -164,6 +164,16 @@
REG_L x31, PT_T6(sp)
.endm
/* Annotate a function as being unsuitable for kprobes. */
#ifdef CONFIG_KPROBES
#define ASM_NOKPROBE(name) \
.pushsection "_kprobe_blacklist", "aw"; \
RISCV_PTR name; \
.popsection
#else
#define ASM_NOKPROBE(name)
#endif
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_ASM_H */

View File

@ -110,6 +110,7 @@ _save_context:
1:
tail do_trap_unknown
SYM_CODE_END(handle_exception)
ASM_NOKPROBE(handle_exception)
/*
* The ret_from_exception must be called with interrupt disabled. Here is the
@ -180,6 +181,7 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
sret
#endif
SYM_CODE_END(ret_from_exception)
ASM_NOKPROBE(ret_from_exception)
#ifdef CONFIG_VMAP_STACK
SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
@ -215,6 +217,7 @@ SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
move a0, sp
tail handle_bad_stack
SYM_CODE_END(handle_kernel_stack_overflow)
ASM_NOKPROBE(handle_kernel_stack_overflow)
#endif
SYM_CODE_START(ret_from_fork)

View File

@ -311,6 +311,7 @@ asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
regs->epc += 4;
regs->orig_a0 = regs->a0;
regs->a0 = -ENOSYS;
riscv_v_vstate_discard(regs);
@ -318,8 +319,6 @@ asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
if (syscall >= 0 && syscall < NR_syscalls)
syscall_handler(regs, syscall);
else if (syscall != -1)
regs->a0 = -ENOSYS;
syscall_exit_to_user_mode(regs);
} else {

View File

@ -912,7 +912,7 @@ static void __init create_kernel_page_table(pgd_t *pgdir,
PMD_SIZE, PAGE_KERNEL_EXEC);
/* Map the data in RAM */
end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size;
end_va = kernel_map.virt_addr + kernel_map.size;
for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
@ -1081,7 +1081,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
phys_ram_base = CONFIG_PHYS_RAM_BASE;
kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
#else

View File

@ -442,7 +442,10 @@ static inline int share(unsigned long addr, u16 cmd)
if (!uv_call(0, (u64)&uvcb))
return 0;
return -EINVAL;
pr_err("%s UVC failed (rc: 0x%x, rrc: 0x%x), possible hypervisor bug.\n",
uvcb.header.cmd == UVC_CMD_SET_SHARED_ACCESS ? "Share" : "Unshare",
uvcb.header.rc, uvcb.header.rrc);
panic("System security cannot be guaranteed unless the system panics now.\n");
}
/*

View File

@ -258,15 +258,9 @@ static inline void save_vector_registers(void)
#endif
}
static inline void setup_control_registers(void)
static inline void setup_low_address_protection(void)
{
unsigned long reg;
__ctl_store(reg, 0, 0);
reg |= CR0_LOW_ADDRESS_PROTECTION;
reg |= CR0_EMERGENCY_SIGNAL_SUBMASK;
reg |= CR0_EXTERNAL_CALL_SUBMASK;
__ctl_load(reg, 0, 0);
__ctl_set_bit(0, 28);
}
static inline void setup_access_registers(void)
@ -314,7 +308,7 @@ void __init startup_init(void)
save_vector_registers();
setup_topology();
sclp_early_detect();
setup_control_registers();
setup_low_address_protection();
setup_access_registers();
lockdep_on();
}

View File

@ -1013,12 +1013,12 @@ void __init smp_fill_possible_mask(void)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
/* request the 0x1201 emergency signal external interrupt */
if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1201");
/* request the 0x1202 external call external interrupt */
ctl_set_bit(0, 14);
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1202");
ctl_set_bit(0, 13);
}
void __init smp_prepare_boot_cpu(void)

View File

@ -249,7 +249,12 @@ static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots)
static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
{
u32 gd = virt_to_phys(kvm->arch.gisa_int.origin);
u32 gd;
if (!kvm->arch.gisa_int.origin)
return 0;
gd = virt_to_phys(kvm->arch.gisa_int.origin);
if (gd && sclp.has_gisaf)
gd |= GISA_FORMAT1;

View File

@ -387,23 +387,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
static inline int pte_uffd_wp(pte_t pte)
{
bool wp = pte_flags(pte) & _PAGE_UFFD_WP;
#ifdef CONFIG_DEBUG_VM
/*
* Having write bit for wr-protect-marked present ptes is fatal,
* because it means the uffd-wp bit will be ignored and write will
* just go through.
*
* Use any chance of pgtable walking to verify this (e.g., when
* page swapped out or being migrated for all purposes). It means
* something is already wrong. Tell the admin even before the
* process crashes. We also nail it with wrong pgtable setup.
*/
WARN_ON_ONCE(wp && pte_write(pte));
#endif
return wp;
return pte_flags(pte) & _PAGE_UFFD_WP;
}
static inline pte_t pte_mkuffd_wp(pte_t pte)

View File

@ -1031,7 +1031,10 @@ unsigned long arch_align_stack(unsigned long sp)
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
return randomize_page(mm->brk, 0x02000000);
if (mmap_is_ia32())
return randomize_page(mm->brk, SZ_32M);
return randomize_page(mm->brk, SZ_1G);
}
/*

View File

@ -396,8 +396,6 @@ void blk_integrity_unregister(struct gendisk *disk)
if (!bi->profile)
return;
/* ensure all bios are off the integrity workqueue */
blk_flush_integrity();
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
memset(bi, 0, sizeof(*bi));
}

View File

@ -38,6 +38,7 @@ static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
unsigned int users;
unsigned long flags;
struct blk_mq_tags *tags = hctx->tags;
/*
@ -56,11 +57,11 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
return;
}
spin_lock_irq(&tags->lock);
spin_lock_irqsave(&tags->lock, flags);
users = tags->active_queues + 1;
WRITE_ONCE(tags->active_queues, users);
blk_mq_update_wake_batch(tags, users);
spin_unlock_irq(&tags->lock);
spin_unlock_irqrestore(&tags->lock, flags);
}
/*

View File

@ -1645,19 +1645,19 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
&hl_data64b_fops);
debugfs_create_file("set_power_state",
0200,
0644,
root,
dev_entry,
&hl_power_fops);
debugfs_create_file("device",
0200,
0644,
root,
dev_entry,
&hl_device_fops);
debugfs_create_file("clk_gate",
0200,
0644,
root,
dev_entry,
&hl_clk_gate_fops);
@ -1669,13 +1669,13 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
&hl_stop_on_err_fops);
debugfs_create_file("dump_security_violations",
0644,
0400,
root,
dev_entry,
&hl_security_violations_fops);
debugfs_create_file("dump_razwi_events",
0644,
0400,
root,
dev_entry,
&hl_razwi_check_fops);
@ -1708,7 +1708,7 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
&hdev->reset_info.skip_reset_on_timeout);
debugfs_create_file("state_dump",
0600,
0644,
root,
dev_entry,
&hl_state_dump_fops);
@ -1726,7 +1726,7 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
debugfs_create_file(hl_debugfs_list[i].name,
0444,
0644,
root,
entry,
&hl_debugfs_fops);

View File

@ -271,6 +271,9 @@ static int handle_registration_node(struct hl_device *hdev, struct hl_user_pendi
free_node->cq_cb = pend->ts_reg_info.cq_cb;
list_add(&free_node->free_objects_node, *free_list);
/* Mark TS record as free */
pend->ts_reg_info.in_use = false;
return 0;
}

View File

@ -1878,16 +1878,16 @@ err_dma_buf_put:
static int validate_export_params_common(struct hl_device *hdev, u64 device_addr, u64 size)
{
if (!IS_ALIGNED(device_addr, PAGE_SIZE)) {
if (!PAGE_ALIGNED(device_addr)) {
dev_dbg(hdev->dev,
"exported device memory address 0x%llx should be aligned to 0x%lx\n",
"exported device memory address 0x%llx should be aligned to PAGE_SIZE 0x%lx\n",
device_addr, PAGE_SIZE);
return -EINVAL;
}
if (size < PAGE_SIZE) {
if (!size || !PAGE_ALIGNED(size)) {
dev_dbg(hdev->dev,
"exported device memory size %llu should be equal to or greater than %lu\n",
"exported device memory size %llu should be a multiple of PAGE_SIZE %lu\n",
size, PAGE_SIZE);
return -EINVAL;
}
@ -1938,6 +1938,13 @@ static int validate_export_params(struct hl_device *hdev, u64 device_addr, u64 s
if (rc)
return rc;
if (!PAGE_ALIGNED(offset)) {
dev_dbg(hdev->dev,
"exported device memory offset %llu should be a multiple of PAGE_SIZE %lu\n",
offset, PAGE_SIZE);
return -EINVAL;
}
if ((offset + size) > phys_pg_pack->total_size) {
dev_dbg(hdev->dev, "offset %#llx and size %#llx exceed total map size %#llx\n",
offset, size, phys_pg_pack->total_size);

View File

@ -1601,6 +1601,7 @@ static const u32 gaudi2_pb_dcr0_tpc0_unsecured_regs[] = {
mmDCORE0_TPC0_CFG_KERNEL_SRF_30,
mmDCORE0_TPC0_CFG_KERNEL_SRF_31,
mmDCORE0_TPC0_CFG_TPC_SB_L0CD,
mmDCORE0_TPC0_CFG_TPC_COUNT,
mmDCORE0_TPC0_CFG_TPC_ID,
mmDCORE0_TPC0_CFG_QM_KERNEL_ID_INC,
mmDCORE0_TPC0_CFG_QM_TID_BASE_SIZE_HIGH_DIM_0,

View File

@ -188,13 +188,9 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
u8 acpi_ns_is_locked);
void
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, u32 max_depth,
acpi_adr_space_type space_id, u32 function);
void
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *node,
acpi_adr_space_type space_id);
acpi_status
acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function);

View File

@ -20,6 +20,10 @@ extern u8 acpi_gbl_default_address_spaces[];
/* Local prototypes */
static void
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
acpi_adr_space_type space_id);
static acpi_status
acpi_ev_reg_run(acpi_handle obj_handle,
u32 level, void *context, void **return_value);
@ -61,6 +65,7 @@ acpi_status acpi_ev_initialize_op_regions(void)
acpi_gbl_default_address_spaces
[i])) {
acpi_ev_execute_reg_methods(acpi_gbl_root_node,
ACPI_UINT32_MAX,
acpi_gbl_default_address_spaces
[i], ACPI_REG_CONNECT);
}
@ -668,6 +673,7 @@ cleanup1:
* FUNCTION: acpi_ev_execute_reg_methods
*
* PARAMETERS: node - Namespace node for the device
* max_depth - Depth to which search for _REG
* space_id - The address space ID
* function - Passed to _REG: On (1) or Off (0)
*
@ -679,7 +685,7 @@ cleanup1:
******************************************************************************/
void
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, u32 max_depth,
acpi_adr_space_type space_id, u32 function)
{
struct acpi_reg_walk_info info;
@ -713,7 +719,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
* regions and _REG methods. (i.e. handlers must be installed for all
* regions of this Space ID before we can run any _REG methods)
*/
(void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
(void)acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, max_depth,
ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, NULL,
&info, NULL);
@ -814,7 +820,7 @@ acpi_ev_reg_run(acpi_handle obj_handle,
*
******************************************************************************/
void
static void
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
acpi_adr_space_type space_id)
{

View File

@ -85,7 +85,8 @@ acpi_install_address_space_handler_internal(acpi_handle device,
/* Run all _REG methods for this address space */
if (run_reg) {
acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT);
acpi_ev_execute_reg_methods(node, ACPI_UINT32_MAX, space_id,
ACPI_REG_CONNECT);
}
unlock_and_exit:
@ -263,6 +264,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
* FUNCTION: acpi_execute_reg_methods
*
* PARAMETERS: device - Handle for the device
* max_depth - Depth to which search for _REG
* space_id - The address space ID
*
* RETURN: Status
@ -271,7 +273,8 @@ ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
*
******************************************************************************/
acpi_status
acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
acpi_execute_reg_methods(acpi_handle device, u32 max_depth,
acpi_adr_space_type space_id)
{
struct acpi_namespace_node *node;
acpi_status status;
@ -296,7 +299,8 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
/* Run all _REG methods for this address space */
acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT);
acpi_ev_execute_reg_methods(node, max_depth, space_id,
ACPI_REG_CONNECT);
} else {
status = AE_BAD_PARAMETER;
}
@ -306,57 +310,3 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
}
ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods)
/*******************************************************************************
*
* FUNCTION: acpi_execute_orphan_reg_method
*
* PARAMETERS: device - Handle for the device
* space_id - The address space ID
*
* RETURN: Status
*
* DESCRIPTION: Execute an "orphan" _REG method that appears under an ACPI
* device. This is a _REG method that has no corresponding region
* within the device's scope.
*
******************************************************************************/
acpi_status
acpi_execute_orphan_reg_method(acpi_handle device, acpi_adr_space_type space_id)
{
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_execute_orphan_reg_method);
/* Parameter validation */
if (!device) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Convert and validate the device handle */
node = acpi_ns_validate_handle(device);
if (node) {
/*
* If an "orphan" _REG method is present in the device's scope
* for the given address space ID, run it.
*/
acpi_ev_execute_orphan_reg_method(node, space_id);
} else {
status = AE_BAD_PARAMETER;
}
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_execute_orphan_reg_method)

View File

@ -1487,12 +1487,13 @@ static bool install_gpio_irq_event_handler(struct acpi_ec *ec)
static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
bool call_reg)
{
acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle;
acpi_status status;
acpi_ec_start(ec, false);
if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle;
acpi_ec_enter_noirq(ec);
status = acpi_install_address_space_handler_no_reg(scope_handle,
ACPI_ADR_SPACE_EC,
@ -1506,10 +1507,7 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
}
if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) {
acpi_execute_reg_methods(scope_handle, ACPI_ADR_SPACE_EC);
if (scope_handle != ec->handle)
acpi_execute_orphan_reg_method(ec->handle, ACPI_ADR_SPACE_EC);
acpi_execute_reg_methods(ec->handle, ACPI_UINT32_MAX, ACPI_ADR_SPACE_EC);
set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags);
}
@ -1724,6 +1722,12 @@ static void acpi_ec_remove(struct acpi_device *device)
}
}
void acpi_ec_register_opregions(struct acpi_device *adev)
{
if (first_ec && first_ec->handle != adev->handle)
acpi_execute_reg_methods(adev->handle, 1, ACPI_ADR_SPACE_EC);
}
static acpi_status
ec_parse_io_ports(struct acpi_resource *resource, void *context)
{

View File

@ -204,6 +204,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
acpi_handle handle, acpi_ec_query_func func,
void *data);
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
void acpi_ec_register_opregions(struct acpi_device *adev);
#ifdef CONFIG_PM_SLEEP
void acpi_ec_flush_work(void);

View File

@ -2198,6 +2198,8 @@ static int acpi_bus_attach(struct acpi_device *device, void *first_pass)
if (device->handler)
goto ok;
acpi_ec_register_opregions(device);
if (!device->flags.initialized) {
device->flags.power_manageable =
device->power.states[ACPI_STATE_D0].flags.valid;

View File

@ -941,8 +941,19 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
&sense_key, &asc, &ascq);
ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq);
} else {
/* ATA PASS-THROUGH INFORMATION AVAILABLE */
ata_scsi_set_sense(qc->dev, cmd, RECOVERED_ERROR, 0, 0x1D);
/*
* ATA PASS-THROUGH INFORMATION AVAILABLE
*
* Note: we are supposed to call ata_scsi_set_sense(), which
* respects the D_SENSE bit, instead of unconditionally
* generating the sense data in descriptor format. However,
* because hdparm, hddtemp, and udisks incorrectly assume sense
* data in descriptor format, without even looking at the
* RESPONSE CODE field in the returned sense data (to see which
* format the returned sense data is in), we are stuck with
* being bug compatible with older kernels.
*/
scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D);
}
}

View File

@ -1118,8 +1118,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
rpp->len += skb->len;
if (stat & SAR_RSQE_EPDU) {
unsigned int len, truesize;
unsigned char *l1l2;
unsigned int len;
l1l2 = (unsigned char *) ((unsigned long) skb->data + skb->len - 6);
@ -1189,14 +1189,15 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
ATM_SKB(skb)->vcc = vcc;
__net_timestamp(skb);
truesize = skb->truesize;
vcc->push(vcc, skb);
atomic_inc(&vcc->stats->rx);
if (skb->truesize > SAR_FB_SIZE_3)
if (truesize > SAR_FB_SIZE_3)
add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
else if (skb->truesize > SAR_FB_SIZE_2)
else if (truesize > SAR_FB_SIZE_2)
add_rx_skb(card, 2, SAR_FB_SIZE_2, 1);
else if (skb->truesize > SAR_FB_SIZE_1)
else if (truesize > SAR_FB_SIZE_1)
add_rx_skb(card, 1, SAR_FB_SIZE_1, 1);
else
add_rx_skb(card, 0, SAR_FB_SIZE_0, 1);

View File

@ -122,8 +122,7 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
return ERR_PTR(-ENOMEM);
max_msg_size = spi_max_message_size(spi);
reg_reserve_size = config->reg_bits / BITS_PER_BYTE
+ config->pad_bits / BITS_PER_BYTE;
reg_reserve_size = (config->reg_bits + config->pad_bits) / BITS_PER_BYTE;
if (max_size + reg_reserve_size > max_msg_size)
max_size -= reg_reserve_size;

View File

@ -29,6 +29,7 @@
#define BTNXPUART_CHECK_BOOT_SIGNATURE 3
#define BTNXPUART_SERDEV_OPEN 4
#define BTNXPUART_IR_IN_PROGRESS 5
#define BTNXPUART_FW_DOWNLOAD_ABORT 6
/* NXP HW err codes */
#define BTNXPUART_IR_HW_ERR 0xb0
@ -126,6 +127,7 @@ struct ps_data {
struct hci_dev *hdev;
struct work_struct work;
struct timer_list ps_timer;
struct mutex ps_lock;
};
struct wakeup_cmd_payload {
@ -158,6 +160,7 @@ struct btnxpuart_dev {
u8 fw_name[MAX_FW_FILE_NAME_LEN];
u32 fw_dnld_v1_offset;
u32 fw_v1_sent_bytes;
u32 fw_dnld_v3_offset;
u32 fw_v3_offset_correction;
u32 fw_v1_expected_len;
u32 boot_reg_offset;
@ -333,6 +336,9 @@ static void ps_start_timer(struct btnxpuart_dev *nxpdev)
if (psdata->cur_psmode == PS_MODE_ENABLE)
mod_timer(&psdata->ps_timer, jiffies + msecs_to_jiffies(psdata->h2c_ps_interval));
if (psdata->ps_state == PS_STATE_AWAKE && psdata->ps_cmd == PS_CMD_ENTER_PS)
cancel_work_sync(&psdata->work);
}
static void ps_cancel_timer(struct btnxpuart_dev *nxpdev)
@ -353,6 +359,7 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state)
!test_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state))
return;
mutex_lock(&psdata->ps_lock);
switch (psdata->cur_h2c_wakeupmode) {
case WAKEUP_METHOD_DTR:
if (ps_state == PS_STATE_AWAKE)
@ -366,12 +373,15 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state)
status = serdev_device_break_ctl(nxpdev->serdev, 0);
else
status = serdev_device_break_ctl(nxpdev->serdev, -1);
msleep(20); /* Allow chip to detect UART-break and enter sleep */
bt_dev_dbg(hdev, "Set UART break: %s, status=%d",
str_on_off(ps_state == PS_STATE_SLEEP), status);
break;
}
if (!status)
psdata->ps_state = ps_state;
mutex_unlock(&psdata->ps_lock);
if (ps_state == PS_STATE_AWAKE)
btnxpuart_tx_wakeup(nxpdev);
}
@ -407,17 +417,42 @@ static void ps_setup(struct hci_dev *hdev)
psdata->hdev = hdev;
INIT_WORK(&psdata->work, ps_work_func);
mutex_init(&psdata->ps_lock);
timer_setup(&psdata->ps_timer, ps_timeout_func, 0);
}
static void ps_wakeup(struct btnxpuart_dev *nxpdev)
static bool ps_wakeup(struct btnxpuart_dev *nxpdev)
{
struct ps_data *psdata = &nxpdev->psdata;
u8 ps_state;
if (psdata->ps_state != PS_STATE_AWAKE) {
mutex_lock(&psdata->ps_lock);
ps_state = psdata->ps_state;
mutex_unlock(&psdata->ps_lock);
if (ps_state != PS_STATE_AWAKE) {
psdata->ps_cmd = PS_CMD_EXIT_PS;
schedule_work(&psdata->work);
return true;
}
return false;
}
static void ps_cleanup(struct btnxpuart_dev *nxpdev)
{
struct ps_data *psdata = &nxpdev->psdata;
u8 ps_state;
mutex_lock(&psdata->ps_lock);
ps_state = psdata->ps_state;
mutex_unlock(&psdata->ps_lock);
if (ps_state != PS_STATE_AWAKE)
ps_control(psdata->hdev, PS_STATE_AWAKE);
ps_cancel_timer(nxpdev);
cancel_work_sync(&psdata->work);
mutex_destroy(&psdata->ps_lock);
}
static int send_ps_cmd(struct hci_dev *hdev, void *data)
@ -550,6 +585,7 @@ static int nxp_download_firmware(struct hci_dev *hdev)
nxpdev->fw_v1_sent_bytes = 0;
nxpdev->fw_v1_expected_len = HDR_LEN;
nxpdev->boot_reg_offset = 0;
nxpdev->fw_dnld_v3_offset = 0;
nxpdev->fw_v3_offset_correction = 0;
nxpdev->baudrate_changed = false;
nxpdev->timeout_changed = false;
@ -564,14 +600,23 @@ static int nxp_download_firmware(struct hci_dev *hdev)
!test_bit(BTNXPUART_FW_DOWNLOADING,
&nxpdev->tx_state),
msecs_to_jiffies(60000));
release_firmware(nxpdev->fw);
memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
if (err == 0) {
bt_dev_err(hdev, "FW Download Timeout.");
bt_dev_err(hdev, "FW Download Timeout. offset: %d",
nxpdev->fw_dnld_v1_offset ?
nxpdev->fw_dnld_v1_offset :
nxpdev->fw_dnld_v3_offset);
return -ETIMEDOUT;
}
if (test_bit(BTNXPUART_FW_DOWNLOAD_ABORT, &nxpdev->tx_state)) {
bt_dev_err(hdev, "FW Download Aborted");
return -EINTR;
}
serdev_device_set_flow_control(nxpdev->serdev, true);
release_firmware(nxpdev->fw);
memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
/* Allow the downloaded FW to initialize */
msleep(1200);
@ -982,8 +1027,9 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
goto free_skb;
}
serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data + offset -
nxpdev->fw_v3_offset_correction, len);
nxpdev->fw_dnld_v3_offset = offset - nxpdev->fw_v3_offset_correction;
serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data +
nxpdev->fw_dnld_v3_offset, len);
free_skb:
kfree_skb(skb);
@ -1215,7 +1261,6 @@ static struct sk_buff *nxp_dequeue(void *data)
{
struct btnxpuart_dev *nxpdev = (struct btnxpuart_dev *)data;
ps_wakeup(nxpdev);
ps_start_timer(nxpdev);
return skb_dequeue(&nxpdev->txq);
}
@ -1230,6 +1275,9 @@ static void btnxpuart_tx_work(struct work_struct *work)
struct sk_buff *skb;
int len;
if (ps_wakeup(nxpdev))
return;
while ((skb = nxp_dequeue(nxpdev))) {
len = serdev_device_write_buf(serdev, skb->data, skb->len);
hdev->stat.byte_tx += len;
@ -1276,7 +1324,6 @@ static int btnxpuart_close(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
ps_wakeup(nxpdev);
serdev_device_close(nxpdev->serdev);
skb_queue_purge(&nxpdev->txq);
kfree_skb(nxpdev->rx_skb);
@ -1412,16 +1459,22 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev);
struct hci_dev *hdev = nxpdev->hdev;
/* Restore FW baudrate to fw_init_baudrate if changed.
* This will ensure FW baudrate is in sync with
* driver baudrate in case this driver is re-inserted.
*/
if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
nxp_set_baudrate_cmd(hdev, NULL);
if (is_fw_downloading(nxpdev)) {
set_bit(BTNXPUART_FW_DOWNLOAD_ABORT, &nxpdev->tx_state);
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
wake_up_interruptible(&nxpdev->check_boot_sign_wait_q);
wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q);
} else {
/* Restore FW baudrate to fw_init_baudrate if changed.
* This will ensure FW baudrate is in sync with
* driver baudrate in case this driver is re-inserted.
*/
if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
nxp_set_baudrate_cmd(hdev, NULL);
}
}
ps_cancel_timer(nxpdev);
ps_cleanup(nxpdev);
hci_unregister_dev(hdev);
hci_free_dev(hdev);
}

View File

@ -50,6 +50,7 @@ MODULE_LICENSE("GPL v2");
static const char xillyname[] = "xillyusb";
static unsigned int fifo_buf_order;
static struct workqueue_struct *wakeup_wq;
#define USB_VENDOR_ID_XILINX 0x03fd
#define USB_VENDOR_ID_ALTERA 0x09fb
@ -569,10 +570,6 @@ static void cleanup_dev(struct kref *kref)
* errors if executed. The mechanism relies on that xdev->error is assigned
* a non-zero value by report_io_error() prior to queueing wakeup_all(),
* which prevents bulk_in_work() from calling process_bulk_in().
*
* The fact that wakeup_all() and bulk_in_work() are queued on the same
* workqueue makes their concurrent execution very unlikely, however the
* kernel's API doesn't seem to ensure this strictly.
*/
static void wakeup_all(struct work_struct *work)
@ -627,7 +624,7 @@ static void report_io_error(struct xillyusb_dev *xdev,
if (do_once) {
kref_get(&xdev->kref); /* xdev is used by work item */
queue_work(xdev->workq, &xdev->wakeup_workitem);
queue_work(wakeup_wq, &xdev->wakeup_workitem);
}
}
@ -1906,6 +1903,13 @@ static const struct file_operations xillyusb_fops = {
static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev)
{
struct usb_device *udev = xdev->udev;
/* Verify that device has the two fundamental bulk in/out endpoints */
if (usb_pipe_type_check(udev, usb_sndbulkpipe(udev, MSG_EP_NUM)) ||
usb_pipe_type_check(udev, usb_rcvbulkpipe(udev, IN_EP_NUM)))
return -ENODEV;
xdev->msg_ep = endpoint_alloc(xdev, MSG_EP_NUM | USB_DIR_OUT,
bulk_out_work, 1, 2);
if (!xdev->msg_ep)
@ -1935,14 +1939,15 @@ static int setup_channels(struct xillyusb_dev *xdev,
__le16 *chandesc,
int num_channels)
{
struct xillyusb_channel *chan;
struct usb_device *udev = xdev->udev;
struct xillyusb_channel *chan, *new_channels;
int i;
chan = kcalloc(num_channels, sizeof(*chan), GFP_KERNEL);
if (!chan)
return -ENOMEM;
xdev->channels = chan;
new_channels = chan;
for (i = 0; i < num_channels; i++, chan++) {
unsigned int in_desc = le16_to_cpu(*chandesc++);
@ -1971,6 +1976,15 @@ static int setup_channels(struct xillyusb_dev *xdev,
*/
if ((out_desc & 0x80) && i < 14) { /* Entry is valid */
if (usb_pipe_type_check(udev,
usb_sndbulkpipe(udev, i + 2))) {
dev_err(xdev->dev,
"Missing BULK OUT endpoint %d\n",
i + 2);
kfree(new_channels);
return -ENODEV;
}
chan->writable = 1;
chan->out_synchronous = !!(out_desc & 0x40);
chan->out_seekable = !!(out_desc & 0x20);
@ -1980,6 +1994,7 @@ static int setup_channels(struct xillyusb_dev *xdev,
}
}
xdev->channels = new_channels;
return 0;
}
@ -2096,9 +2111,11 @@ static int xillyusb_discovery(struct usb_interface *interface)
* just after responding with the IDT, there is no reason for any
* work item to be running now. To be sure that xdev->channels
* is updated on anything that might run in parallel, flush the
* workqueue, which rarely does anything.
* device's workqueue and the wakeup work item. This rarely
* does anything.
*/
flush_workqueue(xdev->workq);
flush_work(&xdev->wakeup_workitem);
xdev->num_channels = num_channels;
@ -2258,6 +2275,10 @@ static int __init xillyusb_init(void)
{
int rc = 0;
wakeup_wq = alloc_workqueue(xillyname, 0, 0);
if (!wakeup_wq)
return -ENOMEM;
if (LOG2_INITIAL_FIFO_BUF_SIZE > PAGE_SHIFT)
fifo_buf_order = LOG2_INITIAL_FIFO_BUF_SIZE - PAGE_SHIFT;
else
@ -2265,12 +2286,17 @@ static int __init xillyusb_init(void)
rc = usb_register(&xillyusb_driver);
if (rc)
destroy_workqueue(wakeup_wq);
return rc;
}
static void __exit xillyusb_exit(void)
{
usb_deregister(&xillyusb_driver);
destroy_workqueue(wakeup_wq);
}
module_init(xillyusb_init);

View File

@ -329,12 +329,12 @@ struct visconti_pll_provider * __init visconti_init_pll(struct device_node *np,
if (!ctx)
return ERR_PTR(-ENOMEM);
for (i = 0; i < nr_plls; ++i)
ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
ctx->node = np;
ctx->reg_base = base;
ctx->clk_data.num = nr_plls;
for (i = 0; i < nr_plls; ++i)
ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
return ctx;
}

View File

@ -290,18 +290,17 @@ static int gt_clk_rate_change_cb(struct notifier_block *nb,
switch (event) {
case PRE_RATE_CHANGE:
{
int psv;
unsigned long psv;
psv = DIV_ROUND_CLOSEST(ndata->new_rate,
gt_target_rate);
if (abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
psv = DIV_ROUND_CLOSEST(ndata->new_rate, gt_target_rate);
if (!psv ||
abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
return NOTIFY_BAD;
psv--;
/* prescaler within legal range? */
if (psv < 0 || psv > GT_CONTROL_PRESCALER_MAX)
if (psv > GT_CONTROL_PRESCALER_MAX)
return NOTIFY_BAD;
/*

View File

@ -11,6 +11,7 @@
#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/bottom_half.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
@ -1665,8 +1666,11 @@ static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
it_mask &= ~IMSCR_OUT;
stm32_cryp_write(cryp, cryp->caps->imsc, it_mask);
if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out)
if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out) {
local_bh_disable();
stm32_cryp_finish_req(cryp, 0);
local_bh_enable();
}
return IRQ_HANDLED;
}

View File

@ -233,7 +233,7 @@ static void msgdma_free_descriptor(struct msgdma_device *mdev,
struct msgdma_sw_desc *child, *next;
mdev->desc_free_cnt++;
list_add_tail(&desc->node, &mdev->free_list);
list_move_tail(&desc->node, &mdev->free_list);
list_for_each_entry_safe(child, next, &desc->tx_list, node) {
mdev->desc_free_cnt++;
list_move_tail(&child->node, &mdev->free_list);
@ -583,17 +583,16 @@ static void msgdma_issue_pending(struct dma_chan *chan)
static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
{
struct msgdma_sw_desc *desc, *next;
unsigned long irqflags;
list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
struct dmaengine_desc_callback cb;
list_del(&desc->node);
dmaengine_desc_get_callback(&desc->async_tx, &cb);
if (dmaengine_desc_callback_valid(&cb)) {
spin_unlock(&mdev->lock);
spin_unlock_irqrestore(&mdev->lock, irqflags);
dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock(&mdev->lock);
spin_lock_irqsave(&mdev->lock, irqflags);
}
/* Run any dependencies, then free the descriptor */

View File

@ -17,8 +17,8 @@ enum dw_hdma_control {
DW_HDMA_V0_CB = BIT(0),
DW_HDMA_V0_TCB = BIT(1),
DW_HDMA_V0_LLP = BIT(2),
DW_HDMA_V0_LIE = BIT(3),
DW_HDMA_V0_RIE = BIT(4),
DW_HDMA_V0_LWIE = BIT(3),
DW_HDMA_V0_RWIE = BIT(4),
DW_HDMA_V0_CCS = BIT(8),
DW_HDMA_V0_LLE = BIT(9),
};
@ -195,25 +195,14 @@ static void dw_hdma_v0_write_ll_link(struct dw_edma_chunk *chunk,
static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
struct dw_edma_chan *chan = chunk->chan;
u32 control = 0, i = 0;
int j;
if (chunk->cb)
control = DW_HDMA_V0_CB;
j = chunk->bursts_alloc;
list_for_each_entry(child, &chunk->burst->list, list) {
j--;
if (!j) {
control |= DW_HDMA_V0_LIE;
if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
control |= DW_HDMA_V0_RIE;
}
list_for_each_entry(child, &chunk->burst->list, list)
dw_hdma_v0_write_ll_data(chunk, i++, control, child->sz,
child->sar, child->dar);
}
control = DW_HDMA_V0_LLP | DW_HDMA_V0_TCB;
if (!chunk->cb)
@ -247,10 +236,11 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
if (first) {
/* Enable engine */
SET_CH_32(dw, chan->dir, chan->id, ch_en, BIT(0));
/* Interrupt enable&unmask - done, abort */
tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK |
HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
/* Interrupt unmask - stop, abort */
tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup);
tmp &= ~(HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK);
/* Interrupt enable - stop, abort */
tmp |= HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL))
tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN;
SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);

View File

@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
@ -621,12 +622,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dw_desc *prev;
struct dw_desc *first;
u32 ctllo, ctlhi;
u8 m_master = dwc->dws.m_master;
u8 lms = DWC_LLP_LMS(m_master);
u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
dma_addr_t reg;
unsigned int reg_width;
unsigned int mem_width;
unsigned int data_width = dw->pdata->data_width[m_master];
unsigned int i;
struct scatterlist *sg;
size_t total_len = 0;
@ -660,7 +659,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
mem = sg_dma_address(sg);
len = sg_dma_len(sg);
mem_width = __ffs(data_width | mem | len);
mem_width = __ffs(sconfig->src_addr_width | mem | len);
slave_sg_todev_fill_desc:
desc = dwc_desc_get(dwc);
@ -720,7 +719,7 @@ slave_sg_fromdev_fill_desc:
lli_write(desc, sar, reg);
lli_write(desc, dar, mem);
lli_write(desc, ctlhi, ctlhi);
mem_width = __ffs(data_width | mem);
mem_width = __ffs(sconfig->dst_addr_width | mem);
lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
desc->len = dlen;
@ -780,17 +779,93 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
}
EXPORT_SYMBOL_GPL(dw_dma_filter);
static int dwc_verify_p_buswidth(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
u32 reg_width, max_width;
if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
reg_width = dwc->dma_sconfig.dst_addr_width;
else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM)
reg_width = dwc->dma_sconfig.src_addr_width;
else /* DMA_MEM_TO_MEM */
return 0;
max_width = dw->pdata->data_width[dwc->dws.p_master];
/* Fall-back to 1-byte transfer width if undefined */
if (reg_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
reg_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
else if (!is_power_of_2(reg_width) || reg_width > max_width)
return -EINVAL;
else /* bus width is valid */
return 0;
/* Update undefined addr width value */
if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
dwc->dma_sconfig.dst_addr_width = reg_width;
else /* DMA_DEV_TO_MEM */
dwc->dma_sconfig.src_addr_width = reg_width;
return 0;
}
static int dwc_verify_m_buswidth(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
u32 reg_width, reg_burst, mem_width;
mem_width = dw->pdata->data_width[dwc->dws.m_master];
/*
* It's possible to have a data portion locked in the DMA FIFO in case
* of the channel suspension. Subsequent channel disabling will cause
* that data silent loss. In order to prevent that maintain the src and
* dst transfer widths coherency by means of the relation:
* (CTLx.SRC_TR_WIDTH * CTLx.SRC_MSIZE >= CTLx.DST_TR_WIDTH)
* Look for the details in the commit message that brings this change.
*
* Note the DMA configs utilized in the calculations below must have
* been verified to have correct values by this method call.
*/
if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) {
reg_width = dwc->dma_sconfig.dst_addr_width;
if (mem_width < reg_width)
return -EINVAL;
dwc->dma_sconfig.src_addr_width = mem_width;
} else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) {
reg_width = dwc->dma_sconfig.src_addr_width;
reg_burst = rounddown_pow_of_two(dwc->dma_sconfig.src_maxburst);
dwc->dma_sconfig.dst_addr_width = min(mem_width, reg_width * reg_burst);
}
return 0;
}
static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
int ret;
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
dwc->dma_sconfig.src_maxburst =
clamp(dwc->dma_sconfig.src_maxburst, 0U, dwc->max_burst);
clamp(dwc->dma_sconfig.src_maxburst, 1U, dwc->max_burst);
dwc->dma_sconfig.dst_maxburst =
clamp(dwc->dma_sconfig.dst_maxburst, 0U, dwc->max_burst);
clamp(dwc->dma_sconfig.dst_maxburst, 1U, dwc->max_burst);
ret = dwc_verify_p_buswidth(chan);
if (ret)
return ret;
ret = dwc_verify_m_buswidth(chan);
if (ret)
return ret;
dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst);
dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst);

View File

@ -659,6 +659,10 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
memset(&res, 0, sizeof(res));
res.mce = mce;
res.addr = mce->addr & MCI_ADDR_PHYSADDR;
if (!pfn_to_online_page(res.addr >> PAGE_SHIFT) && !arch_is_platform_page(res.addr)) {
pr_err("Invalid address 0x%llx in IA32_MC%d_ADDR\n", mce->addr, mce->bank);
return NOTIFY_DONE;
}
/* Try driver decoder first */
if (!(driver_decode && driver_decode(&res))) {

View File

@ -522,7 +522,7 @@ void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
{
cs_dsp_debugfs_clear(dsp);
debugfs_remove_recursive(dsp->debugfs_root);
dsp->debugfs_root = NULL;
dsp->debugfs_root = ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP);
#else
@ -2343,6 +2343,11 @@ static int cs_dsp_common_init(struct cs_dsp *dsp)
mutex_init(&dsp->pwr_lock);
#ifdef CONFIG_DEBUG_FS
/* Ensure this is invalid if client never provides a debugfs root */
dsp->debugfs_root = ERR_PTR(-ENODEV);
#endif
return 0;
}

View File

@ -71,7 +71,7 @@ int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending)
struct arm_smccc_res get_wq_res;
struct arm_smccc_args get_wq_ctx = {0};
get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,
ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_WQ_CTX));

View File

@ -39,6 +39,8 @@
#define MLXBF_GPIO_CAUSE_OR_EVTEN0 0x14
#define MLXBF_GPIO_CAUSE_OR_CLRCAUSE 0x18
#define MLXBF_GPIO_CLR_ALL_INTS GENMASK(31, 0)
struct mlxbf3_gpio_context {
struct gpio_chip gc;
@ -82,6 +84,8 @@ static void mlxbf3_gpio_irq_disable(struct irq_data *irqd)
val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
val &= ~BIT(offset);
writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
writel(BIT(offset), gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
gpiochip_disable_irq(gc, offset);
@ -253,6 +257,15 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
return 0;
}
static void mlxbf3_gpio_shutdown(struct platform_device *pdev)
{
struct mlxbf3_gpio_context *gs = platform_get_drvdata(pdev);
/* Disable and clear all interrupts */
writel(0, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
writel(MLXBF_GPIO_CLR_ALL_INTS, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
}
static const struct acpi_device_id mlxbf3_gpio_acpi_match[] = {
{ "MLNXBF33", 0 },
{}
@ -265,6 +278,7 @@ static struct platform_driver mlxbf3_gpio_driver = {
.acpi_match_table = mlxbf3_gpio_acpi_match,
},
.probe = mlxbf3_gpio_probe,
.shutdown = mlxbf3_gpio_shutdown,
};
module_platform_driver(mlxbf3_gpio_driver);

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/init.h>
@ -774,15 +775,15 @@ void gpiochip_sysfs_unregister(struct gpio_device *gdev)
struct gpio_desc *desc;
struct gpio_chip *chip = gdev->chip;
if (!gdev->mockdev)
return;
scoped_guard(mutex, &sysfs_lock) {
if (!gdev->mockdev)
return;
device_unregister(gdev->mockdev);
device_unregister(gdev->mockdev);
/* prevent further gpiod exports */
mutex_lock(&sysfs_lock);
gdev->mockdev = NULL;
mutex_unlock(&sysfs_lock);
/* prevent further gpiod exports */
gdev->mockdev = NULL;
}
/* unregister gpiod class devices owned by sysfs */
for_each_gpio_desc_with_flag(chip, desc, FLAG_SYSFS) {

View File

@ -100,6 +100,7 @@ struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock)
amdgpu_afmt_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
amdgpu_afmt_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
amdgpu_afmt_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
res.clock = clock;
return res;
}

View File

@ -303,6 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory(
struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,

View File

@ -407,6 +407,10 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
"Called with userptr BO"))
return -EINVAL;
/* bo has been pinned, not need validate it */
if (bo->tbo.pin_count)
return 0;
amdgpu_bo_placement_from_domain(bo, domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
@ -733,7 +737,7 @@ kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
enum dma_data_direction dir;
if (unlikely(!ttm->sg)) {
pr_err("SG Table of BO is UNEXPECTEDLY NULL");
pr_debug("SG Table of BO is NULL");
return;
}
@ -1202,8 +1206,6 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
amdgpu_sync_fence(sync, bo_va->last_pt_update);
kfd_mem_dmaunmap_attachment(mem, entry);
}
static int update_gpuvm_pte(struct kgd_mem *mem,
@ -1258,6 +1260,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
update_gpuvm_pte_failed:
unmap_bo_from_gpuvm(mem, entry, sync);
kfd_mem_dmaunmap_attachment(mem, entry);
return ret;
}
@ -1862,8 +1865,10 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
mem->va + bo_size * (1 + mem->aql_queue));
/* Remove from VM internal data structures */
list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
list_for_each_entry_safe(entry, tmp, &mem->attachments, list) {
kfd_mem_dmaunmap_attachment(mem, entry);
kfd_mem_detach(entry);
}
ret = unreserve_bo_and_vms(&ctx, false, false);
@ -2037,6 +2042,37 @@ out:
return ret;
}
int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
{
struct kfd_mem_attachment *entry;
struct amdgpu_vm *vm;
int ret;
vm = drm_priv_to_vm(drm_priv);
mutex_lock(&mem->lock);
ret = amdgpu_bo_reserve(mem->bo, true);
if (ret)
goto out;
list_for_each_entry(entry, &mem->attachments, list) {
if (entry->bo_va->base.vm != vm)
continue;
if (entry->bo_va->base.bo->tbo.ttm &&
!entry->bo_va->base.bo->tbo.ttm->sg)
continue;
kfd_mem_dmaunmap_attachment(mem, entry);
}
amdgpu_bo_unreserve(mem->bo);
out:
mutex_unlock(&mem->lock);
return ret;
}
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
{
@ -2599,7 +2635,7 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i
/* keep mem without hmm range at userptr_inval_list */
if (!mem->range)
continue;
continue;
/* Only check mem with hmm range associated */
valid = amdgpu_ttm_tt_get_user_pages_done(
@ -2816,9 +2852,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
if (!attachment->is_mapped)
continue;
if (attachment->bo_va->base.bo->tbo.pin_count)
continue;
kfd_mem_dmaunmap_attachment(mem, attachment);
ret = update_gpuvm_pte(mem, attachment, &sync_obj);
if (ret) {

View File

@ -1476,6 +1476,8 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
(u32)le32_to_cpu(*((u32 *)reg_data + j));
j++;
} else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
if (i == 0)
continue;
reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
}

View File

@ -213,6 +213,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
struct amdgpu_firmware_info *ucode;
id = fw_type_convert(cgs_device, type);
if (id >= AMDGPU_UCODE_ID_MAXIMUM)
return -EINVAL;
ucode = &adev->firmware.ucode[id];
if (ucode->fw == NULL)
return -EINVAL;

View File

@ -1057,6 +1057,9 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
r = amdgpu_ring_parse_cs(ring, p, job, ib);
if (r)
return r;
if (ib->sa_bo)
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
} else {
ib->ptr = (uint32_t *)kptr;
r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);

View File

@ -684,16 +684,24 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
if (args->in.flags)
return -EINVAL;
r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
args->out.alloc.ctx_id = id;
break;
case AMDGPU_CTX_OP_FREE_CTX:
if (args->in.flags)
return -EINVAL;
r = amdgpu_ctx_free(fpriv, id);
break;
case AMDGPU_CTX_OP_QUERY_STATE:
if (args->in.flags)
return -EINVAL;
r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
break;
case AMDGPU_CTX_OP_QUERY_STATE2:
if (args->in.flags)
return -EINVAL;
r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
break;
case AMDGPU_CTX_OP_GET_STABLE_PSTATE:

View File

@ -4480,7 +4480,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
shadow = vmbo->shadow;
/* No need to recover an evicted BO */
if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
if (!shadow->tbo.resource ||
shadow->tbo.resource->mem_type != TTM_PL_TT ||
shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
continue;
@ -5235,7 +5236,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* to put adev in the 1st position.
*/
INIT_LIST_HEAD(&device_list);
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
list_add_tail(&tmp_adev->reset_list, &device_list);
if (gpu_reset_for_dev_remove && adev->shutdown)

View File

@ -1550,7 +1550,7 @@ static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
break;
case 2:
mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc;
break;
default:
dev_err(adev->dev,

View File

@ -179,7 +179,7 @@ static int __amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
* Returns the number of bytes read/written; -errno on error.
*/
static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
u8 *eeprom_buf, u16 buf_size, bool read)
u8 *eeprom_buf, u32 buf_size, bool read)
{
const struct i2c_adapter_quirks *quirks = i2c_adap->quirks;
u16 limit;
@ -225,7 +225,7 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
u32 eeprom_addr, u8 *eeprom_buf,
u16 bytes)
u32 bytes)
{
return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
true);
@ -233,7 +233,7 @@ int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
u32 eeprom_addr, u8 *eeprom_buf,
u16 bytes)
u32 bytes)
{
return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
false);

View File

@ -28,10 +28,10 @@
int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
u32 eeprom_addr, u8 *eeprom_buf,
u16 bytes);
u32 bytes);
int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
u32 eeprom_addr, u8 *eeprom_buf,
u16 bytes);
u32 bytes);
#endif

View File

@ -34,6 +34,7 @@
#include <asm/set_memory.h>
#endif
#include "amdgpu.h"
#include "amdgpu_reset.h"
#include <drm/drm_drv.h>
#include <drm/ttm/ttm_tt.h>
@ -400,7 +401,10 @@ void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev)
return;
mb();
amdgpu_device_flush_hdp(adev, NULL);
if (down_read_trylock(&adev->reset_domain->sem)) {
amdgpu_device_flush_hdp(adev, NULL);
up_read(&adev->reset_domain->sem);
}
for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
}

View File

@ -1336,6 +1336,9 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
uint8_t dst_num_links = node_info.num_links;
hive = amdgpu_get_xgmi_hive(psp->adev);
if (WARN_ON(!hive))
return;
list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
struct psp_xgmi_topology_info *mirror_top_info;
int j;

View File

@ -166,6 +166,9 @@ static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t
if (ret)
return -EFAULT;
if (ta_bin_len > PSP_1_MEG)
return -EINVAL;
copy_pos += sizeof(uint32_t);
ta_bin = kzalloc(ta_bin_len, GFP_KERNEL);

View File

@ -352,7 +352,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->max_dw = max_dw;
ring->hw_prio = hw_prio;
if (!ring->no_scheduler) {
if (!ring->no_scheduler && ring->funcs->type < AMDGPU_HW_IP_NUM) {
hw_ip = ring->funcs->type;
num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
@ -469,8 +469,9 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_ring *ring = file_inode(f)->i_private;
int r, i;
uint32_t value, result, early[3];
loff_t i;
int r;
if (*pos & 3 || size & 3)
return -EINVAL;

View File

@ -135,6 +135,10 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
mutex_unlock(&psp->securedisplay_context.mutex);
break;
case 2:
if (size < 3 || phy_id >= TA_SECUREDISPLAY_MAX_PHY) {
dev_err(adev->dev, "Invalid input: %s\n", str);
return -EINVAL;
}
mutex_lock(&psp->securedisplay_context.mutex);
psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);

View File

@ -135,6 +135,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
}
}
/* from vcn4 and above, only unified queue is used */
adev->vcn.using_unified_queue =
adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0);
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
@ -259,18 +263,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
return 0;
}
/* from vcn4 and above, only unified queue is used */
static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
bool ret = false;
if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0))
ret = true;
return ret;
}
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
{
bool ret = false;
@ -380,7 +372,9 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
!adev->vcn.using_unified_queue) {
struct dpg_pause_state new_state;
if (fence[j] ||
@ -426,7 +420,9 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
!adev->vcn.using_unified_queue) {
struct dpg_pause_state new_state;
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
@ -452,8 +448,12 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
/* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
!adev->vcn.using_unified_queue)
atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
atomic_dec(&ring->adev->vcn.total_submission_cnt);
@ -707,12 +707,11 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
struct amdgpu_job *job;
struct amdgpu_ib *ib;
uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
bool sq = amdgpu_vcn_using_unified_queue(ring);
uint32_t *ib_checksum;
uint32_t ib_pack_in_dw;
int i, r;
if (sq)
if (adev->vcn.using_unified_queue)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
@ -725,7 +724,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
ib->length_dw = 0;
/* single queue headers */
if (sq) {
if (adev->vcn.using_unified_queue) {
ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
+ 4 + 2; /* engine info + decoding ib in dw */
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
@ -744,7 +743,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
if (sq)
if (adev->vcn.using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
r = amdgpu_job_submit_direct(job, ring, &f);
@ -834,15 +833,15 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
struct dma_fence **fence)
{
unsigned int ib_size_dw = 16;
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
uint32_t *ib_checksum = NULL;
uint64_t addr;
bool sq = amdgpu_vcn_using_unified_queue(ring);
int i, r;
if (sq)
if (adev->vcn.using_unified_queue)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
@ -856,7 +855,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
ib->length_dw = 0;
if (sq)
if (adev->vcn.using_unified_queue)
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
ib->ptr[ib->length_dw++] = 0x00000018;
@ -878,7 +877,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
if (sq)
if (adev->vcn.using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
r = amdgpu_job_submit_direct(job, ring, &f);
@ -901,15 +900,15 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
struct dma_fence **fence)
{
unsigned int ib_size_dw = 16;
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
uint32_t *ib_checksum = NULL;
uint64_t addr;
bool sq = amdgpu_vcn_using_unified_queue(ring);
int i, r;
if (sq)
if (adev->vcn.using_unified_queue)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
@ -923,7 +922,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
ib->length_dw = 0;
if (sq)
if (adev->vcn.using_unified_queue)
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
ib->ptr[ib->length_dw++] = 0x00000018;
@ -945,7 +944,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
if (sq)
if (adev->vcn.using_unified_queue)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
r = amdgpu_job_submit_direct(job, ring, &f);

View File

@ -284,6 +284,7 @@ struct amdgpu_vcn {
uint16_t inst_mask;
uint8_t num_inst_per_aid;
bool using_unified_queue;
};
struct amdgpu_fw_shared_rb_ptrs_struct {

View File

@ -615,7 +615,7 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
vf2pf_info->checksum =
amd_sriov_msg_checksum(
vf2pf_info, vf2pf_info->header.size, 0, 0);
vf2pf_info, sizeof(*vf2pf_info), 0, 0);
return 0;
}
@ -998,6 +998,9 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
return 0;
}
if (amdgpu_device_skip_hw_access(adev))
return 0;
reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
@ -1073,6 +1076,9 @@ void amdgpu_sriov_wreg(struct amdgpu_device *adev,
{
u32 rlcg_flag;
if (amdgpu_device_skip_hw_access(adev))
return;
if (!amdgpu_sriov_runtime(adev) &&
amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id);
@ -1090,6 +1096,9 @@ u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
{
u32 rlcg_flag;
if (amdgpu_device_skip_hw_access(adev))
return 0;
if (!amdgpu_sriov_runtime(adev) &&
amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id);

View File

@ -766,11 +766,15 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
struct amdgpu_vm_bo_base *entry)
{
struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
struct amdgpu_bo *bo = parent->bo, *pbo;
struct amdgpu_bo *bo, *pbo;
struct amdgpu_vm *vm = params->vm;
uint64_t pde, pt, flags;
unsigned int level;
if (WARN_ON(!parent))
return -EINVAL;
bo = parent->bo;
for (level = 0, pbo = bo->parent; pbo; ++level)
pbo = pbo->parent;

View File

@ -500,6 +500,12 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
dev_err(adev->dev,
"Invalid config, no compatible compute partition mode found, available memory partitions: %d",
adev->gmc.num_mem_partitions);
return -EINVAL;
}
} else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
dev_err(adev->dev,
"Invalid compute partition mode requested, requested: %s, available memory partitions: %d",

View File

@ -70,6 +70,8 @@ static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev)
int fb_channel_number;
fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
if (fb_channel_number >= ARRAY_SIZE(df_v1_7_channel_number))
fb_channel_number = 0;
return df_v1_7_channel_number[fb_channel_number];
}

View File

@ -7892,22 +7892,15 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev,
unsigned int vmid)
{
u32 reg, data;
u32 data;
/* not for *_SOC15 */
reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg);
else
data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
data = RREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL);
data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
if (amdgpu_sriov_is_pp_one_vf(adev))
WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
else
WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
}
static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid)

View File

@ -4961,23 +4961,16 @@ static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
u32 reg, data;
u32 data;
amdgpu_gfx_off_ctrl(adev, false);
reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg);
else
data = RREG32(reg);
data = RREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL);
data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
if (amdgpu_sriov_is_pp_one_vf(adev))
WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
else
WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
amdgpu_gfx_off_ctrl(adev, true);
}

View File

@ -39,7 +39,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_4_imu.bin");
static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
{
char fw_name[40];
char fw_name[45];
char ucode_prefix[30];
int err;
const struct imu_firmware_header_v1_0 *imu_hdr;

View File

@ -543,11 +543,11 @@ void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, (vmid | (vmid << 4)));
amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, (vmid | (vmid << 4)));
amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));

View File

@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
#include "amdgpu_cs.h"
#include "soc15.h"
#include "soc15d.h"
#include "jpeg_v4_0_3.h"
@ -769,11 +770,15 @@ static void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, (vmid | (vmid << 4)));
if (ring->funcs->parse_cs)
amdgpu_ring_write(ring, 0);
else
amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, (vmid | (vmid << 4)));
amdgpu_ring_write(ring, (vmid | (vmid << 4) | (vmid << 8)));
amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
@ -1052,6 +1057,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_3_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_3_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
.parse_cs = jpeg_v4_0_3_dec_ring_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@ -1216,3 +1222,56 @@ static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev)
{
adev->jpeg.ras = &jpeg_v4_0_3_ras;
}
/**
* jpeg_v4_0_3_dec_ring_parse_cs - command submission parser
*
* @parser: Command submission parser context
* @job: the job to parse
* @ib: the IB to parse
*
* Parse the command stream, return -EINVAL for invalid packet,
* 0 otherwise
*/
int jpeg_v4_0_3_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
struct amdgpu_job *job,
struct amdgpu_ib *ib)
{
uint32_t i, reg, res, cond, type;
struct amdgpu_device *adev = parser->adev;
for (i = 0; i < ib->length_dw ; i += 2) {
reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
res = CP_PACKETJ_GET_RES(ib->ptr[i]);
cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
if (res) /* only support 0 at the moment */
return -EINVAL;
switch (type) {
case PACKETJ_TYPE0:
if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END) {
dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
return -EINVAL;
}
break;
case PACKETJ_TYPE3:
if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END) {
dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
return -EINVAL;
}
break;
case PACKETJ_TYPE6:
if (ib->ptr[i] == CP_PACKETJ_NOP)
continue;
dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
return -EINVAL;
default:
dev_err(adev->dev, "Unknown packet type %d !\n", type);
return -EINVAL;
}
}
return 0;
}

View File

@ -46,6 +46,12 @@
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
#define JPEG_REG_RANGE_START 0x4000
#define JPEG_REG_RANGE_END 0x41c2
extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block;
int jpeg_v4_0_3_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
struct amdgpu_job *job,
struct amdgpu_ib *ib);
#endif /* __JPEG_V4_0_3_H__ */

View File

@ -384,7 +384,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
else
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
if (!ras->disable_ras_err_cnt_harvest) {
if (ras && !ras->disable_ras_err_cnt_harvest && obj) {
/*
* clear error status after ras_controller_intr
* according to hw team and count ue number

View File

@ -76,6 +76,12 @@
((cond & 0xF) << 24) | \
((type & 0xF) << 28))
#define CP_PACKETJ_NOP 0x60000000
#define CP_PACKETJ_GET_REG(x) ((x) & 0x3FFFF)
#define CP_PACKETJ_GET_RES(x) (((x) >> 18) & 0x3F)
#define CP_PACKETJ_GET_COND(x) (((x) >> 24) & 0xF)
#define CP_PACKETJ_GET_TYPE(x) (((x) >> 28) & 0xF)
/* Packet 3 types */
#define PACKET3_NOP 0x10
#define PACKET3_SET_BASE 0x11

View File

@ -1432,17 +1432,23 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
goto sync_memory_failed;
}
}
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
if (WARN_ON_ONCE(!peer_pdd))
continue;
if (flush_tlb)
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
/* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
if (err)
goto sync_memory_failed;
}
mutex_unlock(&p->mutex);
if (flush_tlb) {
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
if (WARN_ON_ONCE(!peer_pdd))
continue;
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
}
}
kfree(devices_arr);
return 0;

View File

@ -42,8 +42,6 @@
#define CRAT_OEMTABLEID_LENGTH 8
#define CRAT_RESERVED_LENGTH 6
#define CRAT_OEMID_64BIT_MASK ((1ULL << (CRAT_OEMID_LENGTH * 8)) - 1)
/* Compute Unit flags */
#define COMPUTE_UNIT_CPU (1 << 0) /* Create Virtual CRAT for CPU */
#define COMPUTE_UNIT_GPU (1 << 1) /* Create Virtual CRAT for GPU */

View File

@ -103,7 +103,8 @@ void debug_event_write_work_handler(struct work_struct *work)
struct kfd_process,
debug_event_workarea);
kernel_write(process->dbg_ev_file, &write_data, 1, &pos);
if (process->debug_trap_enabled && process->dbg_ev_file)
kernel_write(process->dbg_ev_file, &write_data, 1, &pos);
}
/* update process/device/queue exception status, write to descriptor
@ -645,6 +646,7 @@ int kfd_dbg_trap_disable(struct kfd_process *target)
else if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED)
target->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED;
cancel_work_sync(&target->debug_event_workarea);
fput(target->dbg_ev_file);
target->dbg_ev_file = NULL;

View File

@ -28,6 +28,7 @@
#include "kfd_priv.h"
#include "kfd_kernel_queue.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_reset.h"
static inline struct process_queue_node *get_queue_by_qid(
struct process_queue_manager *pqm, unsigned int qid)
@ -87,8 +88,12 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
return;
dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
if (dev->kfd->shared_resources.enable_mes)
amdgpu_mes_flush_shader_debugger(dev->adev, pdd->proc_ctx_gpu_addr);
if (dev->kfd->shared_resources.enable_mes &&
down_read_trylock(&dev->adev->reset_domain->sem)) {
amdgpu_mes_flush_shader_debugger(dev->adev,
pdd->proc_ctx_gpu_addr);
up_read(&dev->adev->reset_domain->sem);
}
pdd->already_dequeued = true;
}

View File

@ -958,8 +958,7 @@ static void kfd_update_system_properties(void)
dev = list_last_entry(&topology_device_list,
struct kfd_topology_device, list);
if (dev) {
sys_props.platform_id =
(*((uint64_t *)dev->oem_id)) & CRAT_OEMID_64BIT_MASK;
sys_props.platform_id = dev->oem_id64;
sys_props.platform_oem = *((uint64_t *)dev->oem_table_id);
sys_props.platform_rev = dev->oem_revision;
}

View File

@ -154,7 +154,10 @@ struct kfd_topology_device {
struct attribute attr_gpuid;
struct attribute attr_name;
struct attribute attr_props;
uint8_t oem_id[CRAT_OEMID_LENGTH];
union {
uint8_t oem_id[CRAT_OEMID_LENGTH];
uint64_t oem_id64;
};
uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH];
uint32_t oem_revision;
};

Some files were not shown because too many files have changed in this diff Show More