This is the 6.1.36 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmSb+ZMACgkQONu9yGCS
 aT7qORAAmbYIAtIdWp+3wAM8g9TihQAeknz6f7Q8sxUB1EkZKJ9TgnFdR1IPPcBI
 BWMUNutyUtHY90XTlZbVM04P070FafWjyT23+sdNG+pZGv+sOJkTHO6NgvkFlt0m
 doOq9ojOe6hXS5oqK+8grfbwBG0VDUv7HHuUKsGBLhlDAHP58sVqtkrpiK2EiJpx
 WGIR1t7gPd7jIxsnWTSurdjGfrAUw3SmE07K6sjwGgHsc2Mvd5vluQ+ljnmlz2qd
 3WMyHymIhNP69/HY5Zz6sqCNGJ0eglp6IP8VPw9a7eGDu1UNp2Gu+P5ZB4FR7ABg
 Rbsvrkr/08S9on0OSFiYJ11sfbzdIb4AfGdSHnUpeuqBp5ak1JS5jE6eSiy9YZU/
 V9wDFdlDDwwORCWTMJzcTvhtlzWI+BkKq0bZEiYSxeCZ6m5RKi0i6X/lOPFt/ihA
 PfEHGZVZ12atEEnYm5iich8Frqyp3nOYJKR972/zeKkkcWoYslFA6KuA3PA3eV7S
 rdbz3hK6T3kZTe7FUvmghdi1lGgIKYy8IOiqY9tbMHWa3YQ7k5ZA2BZOiCEri0RF
 tfzT1wI4DknbEXv5fs5PQ8c8eYMXaFKxdZ4+ndfB7f/jPn6IEK2xb5VtbnLe/NIE
 qeRtanzccoKh8P7CmnwWqQ4CaqVeZTFrQ3jiadptSbpTnt3qzlM=
 =oSd/
 -----END PGP SIGNATURE-----

Merge tag 'v6.1.36' into lf-6.1.y

This is the 6.1.36 stable release

* tag 'v6.1.36': (171 commits)
  Linux 6.1.36
  smb: move client and server files to common directory fs/smb
  i2c: imx-lpi2c: fix type char overflow issue when calculating the clock cycle
  ...

Signed-off-by: Jason Liu <jason.hui.liu@nxp.com>

Conflicts:
       drivers/spi/spi-fsl-lpspi.c
       drivers/tty/serial/fsl_lpuart.c
       sound/soc/fsl/fsl_sai.c
This commit is contained in:
Jason Liu 2023-06-29 14:23:35 -05:00
commit c28d05153a
311 changed files with 2156 additions and 806 deletions

View File

@ -5183,8 +5183,8 @@ S: Supported
W: https://wiki.samba.org/index.php/LinuxCIFS
T: git git://git.samba.org/sfrench/cifs-2.6.git
F: Documentation/admin-guide/cifs/
F: fs/cifs/
F: fs/smbfs_common/
F: fs/smb/client/
F: fs/smb/common/
F: include/uapi/linux/cifs
COMPACTPCI HOTPLUG CORE
@ -11178,8 +11178,8 @@ L: linux-cifs@vger.kernel.org
S: Maintained
T: git git://git.samba.org/ksmbd.git
F: Documentation/filesystems/cifs/ksmbd.rst
F: fs/ksmbd/
F: fs/smbfs_common/
F: fs/smb/common/
F: fs/smb/server/
KERNEL UNIT TESTING FRAMEWORK (KUnit)
M: Brendan Higgins <brendanhiggins@google.com>

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 35
SUBLEVEL = 36
EXTRAVERSION =
NAME = Curry Ramen

View File

@ -527,7 +527,7 @@
interrupt-parent = <&gpio1>;
interrupts = <31 0>;
pendown-gpio = <&gpio1 31 0>;
pendown-gpio = <&gpio1 31 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <0x0>;

View File

@ -156,7 +156,7 @@
compatible = "ti,ads7843";
interrupts-extended = <&pioC 2 IRQ_TYPE_EDGE_BOTH>;
spi-max-frequency = <3000000>;
pendown-gpio = <&pioC 2 GPIO_ACTIVE_HIGH>;
pendown-gpio = <&pioC 2 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <150>;
ti,x-max = /bits/ 16 <3830>;

View File

@ -64,7 +64,7 @@
interrupt-parent = <&gpio2>;
interrupts = <7 0>;
spi-max-frequency = <1000000>;
pendown-gpio = <&gpio2 7 0>;
pendown-gpio = <&gpio2 7 GPIO_ACTIVE_LOW>;
vcc-supply = <&reg_3p3v>;
ti,x-min = /bits/ 16 <0>;
ti,x-max = /bits/ 16 <4095>;

View File

@ -237,7 +237,7 @@
pinctrl-0 = <&pinctrl_tsc2046_pendown>;
interrupt-parent = <&gpio2>;
interrupts = <29 0>;
pendown-gpio = <&gpio2 29 GPIO_ACTIVE_HIGH>;
pendown-gpio = <&gpio2 29 GPIO_ACTIVE_LOW>;
touchscreen-max-pressure = <255>;
wakeup-source;
};

View File

@ -227,7 +227,7 @@
interrupt-parent = <&gpio2>;
interrupts = <25 0>; /* gpio_57 */
pendown-gpio = <&gpio2 25 GPIO_ACTIVE_HIGH>;
pendown-gpio = <&gpio2 25 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <0x0>;
ti,x-max = /bits/ 16 <0x0fff>;

View File

@ -54,7 +54,7 @@
interrupt-parent = <&gpio1>;
interrupts = <27 0>; /* gpio_27 */
pendown-gpio = <&gpio1 27 GPIO_ACTIVE_HIGH>;
pendown-gpio = <&gpio1 27 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <0x0>;
ti,x-max = /bits/ 16 <0x0fff>;

View File

@ -311,7 +311,7 @@
interrupt-parent = <&gpio1>;
interrupts = <8 0>; /* boot6 / gpio_8 */
spi-max-frequency = <1000000>;
pendown-gpio = <&gpio1 8 GPIO_ACTIVE_HIGH>;
pendown-gpio = <&gpio1 8 GPIO_ACTIVE_LOW>;
vcc-supply = <&reg_vcc3>;
pinctrl-names = "default";
pinctrl-0 = <&tsc2048_pins>;

View File

@ -149,7 +149,7 @@
interrupt-parent = <&gpio4>;
interrupts = <18 0>; /* gpio_114 */
pendown-gpio = <&gpio4 18 GPIO_ACTIVE_HIGH>;
pendown-gpio = <&gpio4 18 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <0x0>;
ti,x-max = /bits/ 16 <0x0fff>;

View File

@ -160,7 +160,7 @@
interrupt-parent = <&gpio4>;
interrupts = <18 0>; /* gpio_114 */
pendown-gpio = <&gpio4 18 GPIO_ACTIVE_HIGH>;
pendown-gpio = <&gpio4 18 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <0x0>;
ti,x-max = /bits/ 16 <0x0fff>;

View File

@ -651,7 +651,7 @@
pinctrl-0 = <&penirq_pins>;
interrupt-parent = <&gpio3>;
interrupts = <30 IRQ_TYPE_NONE>; /* GPIO_94 */
pendown-gpio = <&gpio3 30 GPIO_ACTIVE_HIGH>;
pendown-gpio = <&gpio3 30 GPIO_ACTIVE_LOW>;
vcc-supply = <&vaux4>;
ti,x-min = /bits/ 16 <0>;

View File

@ -354,7 +354,7 @@
interrupt-parent = <&gpio1>;
interrupts = <15 0>; /* gpio1_wk15 */
pendown-gpio = <&gpio1 15 GPIO_ACTIVE_HIGH>;
pendown-gpio = <&gpio1 15 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <0x0>;

View File

@ -488,7 +488,6 @@
wcd_rx: codec@0,4 {
compatible = "sdw20217010d00";
reg = <0 4>;
#sound-dai-cells = <1>;
qcom,rx-port-mapping = <1 2 3 4 5>;
};
};
@ -499,7 +498,6 @@
wcd_tx: codec@0,3 {
compatible = "sdw20217010d00";
reg = <0 3>;
#sound-dai-cells = <1>;
qcom,tx-port-mapping = <1 2 3 4>;
};
};

View File

@ -418,7 +418,6 @@
wcd_rx: codec@0,4 {
compatible = "sdw20217010d00";
reg = <0 4>;
#sound-dai-cells = <1>;
qcom,rx-port-mapping = <1 2 3 4 5>;
};
};
@ -427,7 +426,6 @@
wcd_tx: codec@0,3 {
compatible = "sdw20217010d00";
reg = <0 3>;
#sound-dai-cells = <1>;
qcom,tx-port-mapping = <1 2 3 4>;
};
};

View File

@ -28,6 +28,16 @@
regulator-max-microvolt = <5000000>;
vin-supply = <&vcc12v_dcin>;
};
vcc_sd_pwr: vcc-sd-pwr-regulator {
compatible = "regulator-fixed";
regulator-name = "vcc_sd_pwr";
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
vin-supply = <&vcc3v3_sys>;
};
};
&gmac1 {
@ -119,13 +129,7 @@
};
&sdmmc0 {
vmmc-supply = <&sdmmc_pwr>;
status = "okay";
};
&sdmmc_pwr {
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
vmmc-supply = <&vcc_sd_pwr>;
status = "okay";
};

View File

@ -92,16 +92,6 @@
regulator-max-microvolt = <3300000>;
vin-supply = <&vcc5v0_sys>;
};
sdmmc_pwr: sdmmc-pwr-regulator {
compatible = "regulator-fixed";
enable-active-high;
gpio = <&gpio0 RK_PA5 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc_pwr_h>;
regulator-name = "sdmmc_pwr";
status = "disabled";
};
};
&cpu0 {
@ -143,6 +133,24 @@
status = "disabled";
};
&gpio0 {
nextrst-hog {
gpio-hog;
/*
* GPIO_ACTIVE_LOW + output-low here means that the pin is set
* to high, because output-low decides the value pre-inversion.
*/
gpios = <RK_PA5 GPIO_ACTIVE_LOW>;
line-name = "nEXTRST";
output-low;
};
};
&gpu {
mali-supply = <&vdd_gpu>;
status = "okay";
};
&i2c0 {
status = "okay";
@ -480,12 +488,6 @@
rockchip,pins = <2 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
sdmmc-pwr {
sdmmc_pwr_h: sdmmc-pwr-h {
rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
&pmu_io_domains {

View File

@ -94,9 +94,10 @@
power-domains = <&power RK3568_PD_PIPE>;
reg = <0x3 0xc0400000 0x0 0x00400000>,
<0x0 0xfe270000 0x0 0x00010000>,
<0x3 0x7f000000 0x0 0x01000000>;
ranges = <0x01000000 0x0 0x3ef00000 0x3 0x7ef00000 0x0 0x00100000>,
<0x02000000 0x0 0x00000000 0x3 0x40000000 0x0 0x3ef00000>;
<0x0 0xf2000000 0x0 0x00100000>;
ranges = <0x01000000 0x0 0xf2100000 0x0 0xf2100000 0x0 0x00100000>,
<0x02000000 0x0 0xf2200000 0x0 0xf2200000 0x0 0x01e00000>,
<0x03000000 0x0 0x40000000 0x3 0x40000000 0x0 0x40000000>;
reg-names = "dbi", "apb", "config";
resets = <&cru SRST_PCIE30X1_POWERUP>;
reset-names = "pipe";
@ -146,9 +147,10 @@
power-domains = <&power RK3568_PD_PIPE>;
reg = <0x3 0xc0800000 0x0 0x00400000>,
<0x0 0xfe280000 0x0 0x00010000>,
<0x3 0xbf000000 0x0 0x01000000>;
ranges = <0x01000000 0x0 0x3ef00000 0x3 0xbef00000 0x0 0x00100000>,
<0x02000000 0x0 0x00000000 0x3 0x80000000 0x0 0x3ef00000>;
<0x0 0xf0000000 0x0 0x00100000>;
ranges = <0x01000000 0x0 0xf0100000 0x0 0xf0100000 0x0 0x00100000>,
<0x02000000 0x0 0xf0200000 0x0 0xf0200000 0x0 0x01e00000>,
<0x03000000 0x0 0x40000000 0x3 0x80000000 0x0 0x40000000>;
reg-names = "dbi", "apb", "config";
resets = <&cru SRST_PCIE30X2_POWERUP>;
reset-names = "pipe";

View File

@ -951,7 +951,7 @@
compatible = "rockchip,rk3568-pcie";
reg = <0x3 0xc0000000 0x0 0x00400000>,
<0x0 0xfe260000 0x0 0x00010000>,
<0x3 0x3f000000 0x0 0x01000000>;
<0x0 0xf4000000 0x0 0x00100000>;
reg-names = "dbi", "apb", "config";
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
@ -981,8 +981,9 @@
phys = <&combphy2 PHY_TYPE_PCIE>;
phy-names = "pcie-phy";
power-domains = <&power RK3568_PD_PIPE>;
ranges = <0x01000000 0x0 0x3ef00000 0x3 0x3ef00000 0x0 0x00100000
0x02000000 0x0 0x00000000 0x3 0x00000000 0x0 0x3ef00000>;
ranges = <0x01000000 0x0 0xf4100000 0x0 0xf4100000 0x0 0x00100000>,
<0x02000000 0x0 0xf4200000 0x0 0xf4200000 0x0 0x01e00000>,
<0x03000000 0x0 0x40000000 0x3 0x00000000 0x0 0x40000000>;
resets = <&cru SRST_PCIE20_POWERUP>;
reset-names = "pipe";
#address-cells = <3>;

View File

@ -111,8 +111,14 @@
#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31)
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4)
#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6)
#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
#define SYS_DC_CGSW sys_insn(1, 0, 7, 10, 4)
#define SYS_DC_CGDSW sys_insn(1, 0, 7, 10, 6)
#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
#define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4)
#define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6)
/*
* Automatically generated definitions for system registers, the

View File

@ -81,7 +81,12 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
* EL1 instead of being trapped to EL2.
*/
if (kvm_arm_support_pmu_v3()) {
struct kvm_cpu_context *hctxt;
write_sysreg(0, pmselr_el0);
hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
}
@ -105,8 +110,12 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2);
write_sysreg(0, hstr_el2);
if (kvm_arm_support_pmu_v3())
write_sysreg(0, pmuserenr_el0);
if (kvm_arm_support_pmu_v3()) {
struct kvm_cpu_context *hctxt;
hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
}
if (cpus_have_final_cap(ARM64_SME)) {
sysreg_clear_set_s(SYS_HFGRTR_EL2, 0,

View File

@ -446,6 +446,7 @@ int vgic_lazy_init(struct kvm *kvm)
int kvm_vgic_map_resources(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
enum vgic_type type;
gpa_t dist_base;
int ret = 0;
@ -460,10 +461,13 @@ int kvm_vgic_map_resources(struct kvm *kvm)
if (!irqchip_in_kernel(kvm))
goto out;
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
ret = vgic_v2_map_resources(kvm);
else
type = VGIC_V2;
} else {
ret = vgic_v3_map_resources(kvm);
type = VGIC_V3;
}
if (ret) {
__kvm_vgic_destroy(kvm);
@ -473,8 +477,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
dist_base = dist->vgic_dist_base;
mutex_unlock(&kvm->arch.config_lock);
ret = vgic_register_dist_iodev(kvm, dist_base,
kvm_vgic_global_state.type);
ret = vgic_register_dist_iodev(kvm, dist_base, type);
if (ret) {
kvm_err("Unable to register VGIC dist MMIO regions\n");
kvm_vgic_destroy(kvm);

View File

@ -26,6 +26,7 @@ KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float -fno-common
KBUILD_CFLAGS += -fno-stack-protector
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS += $(CLANG_FLAGS)
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS))

View File

@ -97,7 +97,10 @@ static void init_x2apic_ldr(void)
static int x2apic_phys_probe(void)
{
if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))
if (!x2apic_mode)
return 0;
if (x2apic_phys || x2apic_fadt_phys())
return 1;
return apic == &apic_x2apic_phys;

View File

@ -172,10 +172,10 @@ void __meminit init_trampoline_kaslr(void)
set_p4d(p4d_tramp,
__p4d(_KERNPG_TABLE | __pa(pud_page_tramp)));
set_pgd(&trampoline_pgd_entry,
__pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
trampoline_pgd_entry =
__pgd(_KERNPG_TABLE | __pa(p4d_page_tramp));
} else {
set_pgd(&trampoline_pgd_entry,
__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
trampoline_pgd_entry =
__pgd(_KERNPG_TABLE | __pa(pud_page_tramp));
}
}

View File

@ -2478,7 +2478,7 @@ out_image:
}
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, proglen, pass + 1, image);
bpf_jit_dump(prog->len, proglen, pass + 1, rw_image);
if (image) {
if (!prog->is_func || extra_pass) {

View File

@ -101,8 +101,6 @@ acpi_status
acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
acpi_event_status *event_status);
acpi_status acpi_hw_disable_all_gpes(void);
acpi_status acpi_hw_enable_all_runtime_gpes(void);
acpi_status acpi_hw_enable_all_wakeup_gpes(void);

View File

@ -636,11 +636,19 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
}
/*
* Disable and clear GPE status before interrupt is enabled. Some GPEs
* (like wakeup GPE) haven't handler, this can avoid such GPE misfire.
* acpi_leave_sleep_state will reenable specific GPEs later
* Disable all GPE and clear their status bits before interrupts are
* enabled. Some GPEs (like wakeup GPEs) have no handlers and this can
* prevent them from producing spurious interrups.
*
* acpi_leave_sleep_state() will reenable specific GPEs later.
*
* Because this code runs on one CPU with disabled interrupts (all of
* the other CPUs are offline at this time), it need not acquire any
* sleeping locks which may trigger an implicit preemption point even
* if there is no contention, so avoid doing that by using a low-level
* library routine here.
*/
acpi_disable_all_gpes();
acpi_hw_disable_all_gpes();
/* Allow EC transactions to happen. */
acpi_ec_unblock_transactions();

View File

@ -5313,7 +5313,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
mutex_init(&ap->scsi_scan_mutex);
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
INIT_DELAYED_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
INIT_LIST_HEAD(&ap->eh_done_q);
init_waitqueue_head(&ap->eh_wait_q);
init_completion(&ap->park_req_pending);
@ -5919,6 +5919,7 @@ static void ata_port_detach(struct ata_port *ap)
WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
cancel_delayed_work_sync(&ap->hotplug_task);
cancel_delayed_work_sync(&ap->scsi_rescan_task);
skip_eh:
/* clean up zpodd on port removal */

View File

@ -2981,7 +2981,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
ehc->i.flags |= ATA_EHI_SETMODE;
/* schedule the scsi_rescan_device() here */
schedule_work(&(ap->scsi_rescan_task));
schedule_delayed_work(&ap->scsi_rescan_task, 0);
} else if (dev->class == ATA_DEV_UNKNOWN &&
ehc->tries[dev->devno] &&
ata_class_enabled(ehc->classes[dev->devno])) {

View File

@ -4604,10 +4604,11 @@ int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
void ata_scsi_dev_rescan(struct work_struct *work)
{
struct ata_port *ap =
container_of(work, struct ata_port, scsi_rescan_task);
container_of(work, struct ata_port, scsi_rescan_task.work);
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
bool delay_rescan = false;
mutex_lock(&ap->scsi_scan_mutex);
spin_lock_irqsave(ap->lock, flags);
@ -4621,6 +4622,21 @@ void ata_scsi_dev_rescan(struct work_struct *work)
if (scsi_device_get(sdev))
continue;
/*
* If the rescan work was scheduled because of a resume
* event, the port is already fully resumed, but the
* SCSI device may not yet be fully resumed. In such
* case, executing scsi_rescan_device() may cause a
* deadlock with the PM code on device_lock(). Prevent
* this by giving up and retrying rescan after a short
* delay.
*/
delay_rescan = sdev->sdev_gendev.power.is_suspended;
if (delay_rescan) {
scsi_device_put(sdev);
break;
}
spin_unlock_irqrestore(ap->lock, flags);
scsi_rescan_device(&(sdev->sdev_gendev));
scsi_device_put(sdev);
@ -4630,4 +4646,8 @@ void ata_scsi_dev_rescan(struct work_struct *work)
spin_unlock_irqrestore(ap->lock, flags);
mutex_unlock(&ap->scsi_scan_mutex);
if (delay_rescan)
schedule_delayed_work(&ap->scsi_rescan_task,
msecs_to_jiffies(5));
}

View File

@ -660,7 +660,7 @@ static const struct regmap_bus regmap_spi_avmm_bus = {
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
.max_raw_read = SPI_AVMM_VAL_SIZE * MAX_READ_CNT,
.max_raw_write = SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
.max_raw_write = SPI_AVMM_REG_SIZE + SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
.free_context = spi_avmm_bridge_ctx_free,
};

View File

@ -2194,6 +2194,7 @@ static void null_destroy_dev(struct nullb *nullb)
struct nullb_device *dev = nullb->dev;
null_del_dev(nullb);
null_free_device_storage(dev, false);
null_free_dev(dev);
}

View File

@ -98,6 +98,8 @@ struct crb_priv {
u8 __iomem *rsp;
u32 cmd_size;
u32 smc_func_id;
u32 __iomem *pluton_start_addr;
u32 __iomem *pluton_reply_addr;
};
struct tpm2_crb_smc {
@ -108,6 +110,11 @@ struct tpm2_crb_smc {
u32 smc_func_id;
};
struct tpm2_crb_pluton {
u64 start_addr;
u64 reply_addr;
};
static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
unsigned long timeout)
{
@ -127,6 +134,25 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
return ((ioread32(reg) & mask) == value);
}
static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete)
{
if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
return 0;
if (!crb_wait_for_reg_32(priv->pluton_reply_addr, ~0, 1, TPM2_TIMEOUT_C))
return -ETIME;
iowrite32(1, priv->pluton_start_addr);
if (wait_for_complete == false)
return 0;
if (!crb_wait_for_reg_32(priv->pluton_start_addr,
0xffffffff, 0, 200))
return -ETIME;
return 0;
}
/**
* __crb_go_idle - request tpm crb device to go the idle state
*
@ -145,6 +171,8 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
*/
static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
{
int rc;
if ((priv->sm == ACPI_TPM2_START_METHOD) ||
(priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
(priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
@ -152,6 +180,10 @@ static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
rc = crb_try_pluton_doorbell(priv, true);
if (rc)
return rc;
if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req,
CRB_CTRL_REQ_GO_IDLE/* mask */,
0, /* value */
@ -188,12 +220,19 @@ static int crb_go_idle(struct tpm_chip *chip)
*/
static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
{
int rc;
if ((priv->sm == ACPI_TPM2_START_METHOD) ||
(priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
(priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
return 0;
iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
rc = crb_try_pluton_doorbell(priv, true);
if (rc)
return rc;
if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req,
CRB_CTRL_REQ_CMD_READY /* mask */,
0, /* value */
@ -371,6 +410,10 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
return -E2BIG;
}
/* Seems to be necessary for every command */
if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
__crb_cmd_ready(&chip->dev, priv);
memcpy_toio(priv->cmd, buf, len);
/* Make sure that cmd is populated before issuing start. */
@ -394,7 +437,10 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id);
}
return rc;
if (rc)
return rc;
return crb_try_pluton_doorbell(priv, false);
}
static void crb_cancel(struct tpm_chip *chip)
@ -524,15 +570,18 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
return ret;
acpi_dev_free_resource_list(&acpi_resource_list);
if (resource_type(iores_array) != IORESOURCE_MEM) {
dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
return -EINVAL;
} else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
IORESOURCE_MEM) {
dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
memset(iores_array + TPM_CRB_MAX_RESOURCES,
0, sizeof(*iores_array));
iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
/* Pluton doesn't appear to define ACPI memory regions */
if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
if (resource_type(iores_array) != IORESOURCE_MEM) {
dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
return -EINVAL;
} else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
IORESOURCE_MEM) {
dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
memset(iores_array + TPM_CRB_MAX_RESOURCES,
0, sizeof(*iores_array));
iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
}
}
iores = NULL;
@ -656,6 +705,22 @@ out_relinquish_locality:
return ret;
}
static int crb_map_pluton(struct device *dev, struct crb_priv *priv,
struct acpi_table_tpm2 *buf, struct tpm2_crb_pluton *crb_pluton)
{
priv->pluton_start_addr = crb_map_res(dev, NULL, NULL,
crb_pluton->start_addr, 4);
if (IS_ERR(priv->pluton_start_addr))
return PTR_ERR(priv->pluton_start_addr);
priv->pluton_reply_addr = crb_map_res(dev, NULL, NULL,
crb_pluton->reply_addr, 4);
if (IS_ERR(priv->pluton_reply_addr))
return PTR_ERR(priv->pluton_reply_addr);
return 0;
}
static int crb_acpi_add(struct acpi_device *device)
{
struct acpi_table_tpm2 *buf;
@ -663,6 +728,7 @@ static int crb_acpi_add(struct acpi_device *device)
struct tpm_chip *chip;
struct device *dev = &device->dev;
struct tpm2_crb_smc *crb_smc;
struct tpm2_crb_pluton *crb_pluton;
acpi_status status;
u32 sm;
int rc;
@ -700,6 +766,20 @@ static int crb_acpi_add(struct acpi_device *device)
priv->smc_func_id = crb_smc->smc_func_id;
}
if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
if (buf->header.length < (sizeof(*buf) + sizeof(*crb_pluton))) {
dev_err(dev,
FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
buf->header.length,
ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON);
return -EINVAL;
}
crb_pluton = ACPI_ADD_PTR(struct tpm2_crb_pluton, buf, sizeof(*buf));
rc = crb_map_pluton(dev, priv, buf, crb_pluton);
if (rc)
return rc;
}
priv->sm = sm;
priv->hid = acpi_device_hid(device);

View File

@ -772,7 +772,9 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
wake_up_interruptible(&priv->int_queue);
/* Clear interrupts handled with TPM_EOI */
tpm_tis_request_locality(chip, 0);
rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), interrupt);
tpm_tis_relinquish_locality(chip, 0);
if (rc < 0)
return IRQ_NONE;

View File

@ -221,8 +221,12 @@ static int sifive_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
for (i = 0; i < ngpio; i++)
chip->irq_number[i] = platform_get_irq(pdev, i);
for (i = 0; i < ngpio; i++) {
ret = platform_get_irq(pdev, i);
if (ret < 0)
return ret;
chip->irq_number[i] = ret;
}
ret = bgpio_init(&chip->gc, dev, 4,
chip->base + SIFIVE_GPIO_INPUT_VAL,

View File

@ -1650,7 +1650,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gc)
}
/* Remove all IRQ mappings and delete the domain */
if (gc->irq.domain) {
if (!gc->irq.domain_is_allocated_externally && gc->irq.domain) {
unsigned int irq;
for (offset = 0; offset < gc->ngpio; offset++) {
@ -1696,6 +1696,15 @@ int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
gc->to_irq = gpiochip_to_irq;
gc->irq.domain = domain;
gc->irq.domain_is_allocated_externally = true;
/*
* Using barrier() here to prevent compiler from reordering
* gc->irq.initialized before adding irqdomain.
*/
barrier();
gc->irq.initialized = true;
return 0;
}

View File

@ -348,6 +348,35 @@ static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
return false;
}
/**
* update_planes_and_stream_adapter() - Send planes to be updated in DC
*
* DC has a generic way to update planes and stream via
* dc_update_planes_and_stream function; however, DM might need some
* adjustments and preparation before calling it. This function is a wrapper
* for the dc_update_planes_and_stream that does any required configuration
* before passing control to DC.
*/
static inline bool update_planes_and_stream_adapter(struct dc *dc,
int update_type,
int planes_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
struct dc_surface_update *array_of_surface_update)
{
/*
* Previous frame finished and HW is ready for optimization.
*/
if (update_type == UPDATE_TYPE_FAST)
dc_post_update_surfaces_to_stream(dc);
return dc_update_planes_and_stream(dc,
array_of_surface_update,
planes_count,
stream,
stream_update);
}
/**
* dm_pflip_high_irq() - Handle pageflip interrupt
* @interrupt_params: ignored
@ -2632,10 +2661,13 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
bundle->surface_updates[m].surface->force_full_update =
true;
}
dc_commit_updates_for_stream(
dm->dc, bundle->surface_updates,
dc_state->stream_status->plane_count,
dc_state->streams[k], &bundle->stream_update, dc_state);
update_planes_and_stream_adapter(dm->dc,
UPDATE_TYPE_FULL,
dc_state->stream_status->plane_count,
dc_state->streams[k],
&bundle->stream_update,
bundle->surface_updates);
}
cleanup:
@ -7870,6 +7902,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
bundle->stream_update.abm_level = &acrtc_state->abm_level;
mutex_lock(&dm->dc_lock);
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
acrtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(acrtc_state->stream);
mutex_unlock(&dm->dc_lock);
/*
* If FreeSync state on the stream has changed then we need to
* re-adjust the min/max bounds now that DC doesn't handle this
@ -7883,16 +7921,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
mutex_lock(&dm->dc_lock);
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
acrtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(acrtc_state->stream);
dc_commit_updates_for_stream(dm->dc,
bundle->surface_updates,
planes_count,
acrtc_state->stream,
&bundle->stream_update,
dc_state);
update_planes_and_stream_adapter(dm->dc,
acrtc_state->update_type,
planes_count,
acrtc_state->stream,
&bundle->stream_update,
bundle->surface_updates);
/**
* Enable or disable the interrupts on the backend.
@ -8334,12 +8368,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
mutex_lock(&dm->dc_lock);
dc_commit_updates_for_stream(dm->dc,
dummy_updates,
status->plane_count,
dm_new_crtc_state->stream,
&stream_update,
dc_state);
dc_update_planes_and_stream(dm->dc,
dummy_updates,
status->plane_count,
dm_new_crtc_state->stream,
&stream_update);
mutex_unlock(&dm->dc_lock);
}

View File

@ -1335,7 +1335,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
/* Let the runqueue know that there is work to do. */
queue_work(g2d->g2d_workq, &g2d->runqueue_work);
if (runqueue_node->async)
if (req->async)
goto out;
wait_for_completion(&runqueue_node->complete);

View File

@ -469,8 +469,6 @@ static int vidi_remove(struct platform_device *pdev)
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
kfree(ctx->raw_edid);
ctx->raw_edid = NULL;
return -EINVAL;
}
component_del(&pdev->dev, &vidi_component_ops);

View File

@ -459,7 +459,6 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_set_domain *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
int r;
/* for now if someone requests domain CPU -
@ -472,13 +471,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
up_read(&rdev->exclusive_lock);
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
drm_gem_object_put(gobj);
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(robj->rdev, r);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}

View File

@ -2417,8 +2417,13 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
goto fail_quirks;
}
if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR)
if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR) {
error = hid_hw_open(hdev);
if (error) {
hid_err(hdev, "hw open failed\n");
goto fail_quirks;
}
}
wacom_set_shared_values(wacom_wac);
devres_close_group(&hdev->dev, wacom);

View File

@ -829,11 +829,22 @@ static void vmbus_wait_for_unload(void)
if (completion_done(&vmbus_connection.unload_event))
goto completed;
for_each_online_cpu(cpu) {
for_each_present_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
/*
* In a CoCo VM the synic_message_page is not allocated
* in hv_synic_alloc(). Instead it is set/cleared in
* hv_synic_enable_regs() and hv_synic_disable_regs()
* such that it is set only when the CPU is online. If
* not all present CPUs are online, the message page
* might be NULL, so skip such CPUs.
*/
page_addr = hv_cpu->synic_message_page;
if (!page_addr)
continue;
msg = (struct hv_message *)page_addr
+ VMBUS_MESSAGE_SINT;
@ -867,11 +878,14 @@ completed:
* maybe-pending messages on all CPUs to be able to receive new
* messages after we reconnect.
*/
for_each_online_cpu(cpu) {
for_each_present_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
page_addr = hv_cpu->synic_message_page;
if (!page_addr)
continue;
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
msg->header.message_type = HVMSG_NONE;
}

View File

@ -1504,7 +1504,7 @@ static int vmbus_bus_init(void)
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
hv_synic_init, hv_synic_cleanup);
if (ret < 0)
goto err_cpuhp;
goto err_alloc;
hyperv_cpuhp_online = ret;
ret = vmbus_connect();
@ -1555,9 +1555,8 @@ static int vmbus_bus_init(void)
err_connect:
cpuhp_remove_state(hyperv_cpuhp_online);
err_cpuhp:
hv_synic_free();
err_alloc:
hv_synic_free();
if (vmbus_irq == -1) {
hv_remove_vmbus_handler();
} else {

View File

@ -1118,8 +1118,10 @@ static int pci1xxxx_i2c_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(pci1xxxx_i2c_pm_ops, pci1xxxx_i2c_suspend,
pci1xxxx_i2c_resume);
static void pci1xxxx_i2c_shutdown(struct pci1xxxx_i2c *i2c)
static void pci1xxxx_i2c_shutdown(void *data)
{
struct pci1xxxx_i2c *i2c = data;
pci1xxxx_i2c_config_padctrl(i2c, false);
pci1xxxx_i2c_configure_core_reg(i2c, false);
}
@ -1156,7 +1158,7 @@ static int pci1xxxx_i2c_probe_pci(struct pci_dev *pdev,
init_completion(&i2c->i2c_xfer_done);
pci1xxxx_i2c_init(i2c);
ret = devm_add_action(dev, (void (*)(void *))pci1xxxx_i2c_shutdown, i2c);
ret = devm_add_action(dev, pci1xxxx_i2c_shutdown, i2c);
if (ret)
return ret;

View File

@ -108,6 +108,27 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = {
{} /* Terminating entry */
};
/*
* Some devices have a wrong entry which points to a GPIO which is
* required in another driver, so this driver must not claim it.
*/
static const struct dmi_system_id dmi_invalid_acpi_index[] = {
{
/*
* Lenovo Yoga Book X90F / X90L, the PNP0C40 home button entry
* points to a GPIO which is not a home button and which is
* required by the lenovo-yogabook driver.
*/
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
},
.driver_data = (void *)1l,
},
{} /* Terminating entry */
};
/*
* Get the Nth GPIO number from the ACPI object.
*/
@ -137,6 +158,8 @@ soc_button_device_create(struct platform_device *pdev,
struct platform_device *pd;
struct gpio_keys_button *gpio_keys;
struct gpio_keys_platform_data *gpio_keys_pdata;
const struct dmi_system_id *dmi_id;
int invalid_acpi_index = -1;
int error, gpio, irq;
int n_buttons = 0;
@ -154,10 +177,17 @@ soc_button_device_create(struct platform_device *pdev,
gpio_keys = (void *)(gpio_keys_pdata + 1);
n_buttons = 0;
dmi_id = dmi_first_match(dmi_invalid_acpi_index);
if (dmi_id)
invalid_acpi_index = (long)dmi_id->driver_data;
for (info = button_info; info->name; info++) {
if (info->autorepeat != autorepeat)
continue;
if (info->acpi_index == invalid_acpi_index)
continue;
error = soc_button_lookup_gpio(&pdev->dev, info->acpi_index, &gpio, &irq);
if (error || irq < 0) {
/*

View File

@ -1090,7 +1090,8 @@ void cec_received_msg_ts(struct cec_adapter *adap,
mutex_lock(&adap->lock);
dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg);
adap->last_initiator = 0xff;
if (!adap->transmit_in_progress)
adap->last_initiator = 0xff;
/* Check if this message was for us (directed or broadcast). */
if (!cec_msg_is_broadcast(msg))
@ -1582,7 +1583,7 @@ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
*
* This function is called with adap->lock held.
*/
static int cec_adap_enable(struct cec_adapter *adap)
int cec_adap_enable(struct cec_adapter *adap)
{
bool enable;
int ret = 0;
@ -1592,6 +1593,9 @@ static int cec_adap_enable(struct cec_adapter *adap)
if (adap->needs_hpd)
enable = enable && adap->phys_addr != CEC_PHYS_ADDR_INVALID;
if (adap->devnode.unregistered)
enable = false;
if (enable == adap->is_enabled)
return 0;

View File

@ -191,6 +191,8 @@ static void cec_devnode_unregister(struct cec_adapter *adap)
mutex_lock(&adap->lock);
__cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false);
__cec_s_log_addrs(adap, NULL, false);
// Disable the adapter (since adap->devnode.unregistered is true)
cec_adap_enable(adap);
mutex_unlock(&adap->lock);
cdev_device_del(&devnode->cdev, &devnode->dev);

View File

@ -47,6 +47,7 @@ int cec_monitor_pin_cnt_inc(struct cec_adapter *adap);
void cec_monitor_pin_cnt_dec(struct cec_adapter *adap);
int cec_adap_status(struct seq_file *file, void *priv);
int cec_thread_func(void *_adap);
int cec_adap_enable(struct cec_adapter *adap);
void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block);
int __cec_s_log_addrs(struct cec_adapter *adap,
struct cec_log_addrs *log_addrs, bool block);

View File

@ -1411,8 +1411,8 @@ static int bcm2835_probe(struct platform_device *pdev)
host->max_clk = clk_get_rate(clk);
host->irq = platform_get_irq(pdev, 0);
if (host->irq <= 0) {
ret = -EINVAL;
if (host->irq < 0) {
ret = host->irq;
goto err;
}

View File

@ -649,6 +649,7 @@ static struct platform_driver litex_mmc_driver = {
.driver = {
.name = "litex-mmc",
.of_match_table = litex_match,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
module_platform_driver(litex_mmc_driver);

View File

@ -1007,11 +1007,8 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
if (data && !cmd->error)
data->bytes_xfered = data->blksz * data->blocks;
if (meson_mmc_bounce_buf_read(data) ||
meson_mmc_get_next_command(cmd))
ret = IRQ_WAKE_THREAD;
else
ret = IRQ_HANDLED;
return IRQ_WAKE_THREAD;
}
out:
@ -1023,9 +1020,6 @@ out:
writel(start, host->regs + SD_EMMC_START);
}
if (ret == IRQ_HANDLED)
meson_mmc_request_done(host->mmc, cmd->mrq);
return ret;
}
@ -1233,8 +1227,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
}
host->irq = platform_get_irq(pdev, 0);
if (host->irq <= 0) {
ret = -EINVAL;
if (host->irq < 0) {
ret = host->irq;
goto free_host;
}

View File

@ -1735,7 +1735,8 @@ static void mmci_set_max_busy_timeout(struct mmc_host *mmc)
return;
if (host->variant->busy_timeout && mmc->actual_clock)
max_busy_timeout = ~0UL / (mmc->actual_clock / MSEC_PER_SEC);
max_busy_timeout = U32_MAX / DIV_ROUND_UP(mmc->actual_clock,
MSEC_PER_SEC);
mmc->max_busy_timeout = max_busy_timeout;
}

View File

@ -2658,7 +2658,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
ret = -EINVAL;
ret = host->irq;
goto host_free;
}

View File

@ -704,7 +704,7 @@ static int mvsd_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENXIO;
return irq;
mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
if (!mmc) {

View File

@ -1343,7 +1343,7 @@ static int mmc_omap_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENXIO;
return irq;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->virt_base = devm_ioremap_resource(&pdev->dev, res);

View File

@ -1791,9 +1791,11 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (res == NULL || irq < 0)
if (!res)
return -ENXIO;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))

View File

@ -638,7 +638,7 @@ static int owl_mmc_probe(struct platform_device *pdev)
owl_host->irq = platform_get_irq(pdev, 0);
if (owl_host->irq < 0) {
ret = -EINVAL;
ret = owl_host->irq;
goto err_release_channel;
}

View File

@ -829,7 +829,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
host->ops = &sdhci_acpi_ops_dflt;
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
err = -EINVAL;
err = host->irq;
goto err_free;
}

View File

@ -2486,6 +2486,9 @@ static inline void sdhci_msm_get_of_property(struct platform_device *pdev,
msm_host->ddr_config = DDR_CONFIG_POR_VAL;
of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
if (of_device_is_compatible(node, "qcom,msm8916-sdhci"))
host->quirks2 |= SDHCI_QUIRK2_BROKEN_64_BIT_DMA;
}
static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host)

View File

@ -65,8 +65,8 @@ static int sdhci_probe(struct platform_device *pdev)
host->hw_name = "sdhci";
host->ops = &sdhci_pltfm_ops;
host->irq = platform_get_irq(pdev, 0);
if (host->irq <= 0) {
ret = -EINVAL;
if (host->irq < 0) {
ret = host->irq;
goto err_host;
}
host->quirks = SDHCI_QUIRK_BROKEN_ADMA;

View File

@ -1400,7 +1400,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
irq[0] = platform_get_irq(pdev, 0);
irq[1] = platform_get_irq_optional(pdev, 1);
if (irq[0] < 0)
return -ENXIO;
return irq[0];
reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))

View File

@ -1350,8 +1350,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
return ret;
host->irq = platform_get_irq(pdev, 0);
if (host->irq <= 0) {
ret = -EINVAL;
if (host->irq < 0) {
ret = host->irq;
goto error_disable_mmc;
}

View File

@ -1757,8 +1757,10 @@ static int usdhi6_probe(struct platform_device *pdev)
irq_cd = platform_get_irq_byname(pdev, "card detect");
irq_sd = platform_get_irq_byname(pdev, "data");
irq_sdio = platform_get_irq_byname(pdev, "SDIO");
if (irq_sd < 0 || irq_sdio < 0)
return -ENODEV;
if (irq_sd < 0)
return irq_sd;
if (irq_sdio < 0)
return irq_sdio;
mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev);
if (!mmc)

View File

@ -998,6 +998,18 @@ unlock_exit:
mutex_unlock(&priv->reg_mutex);
}
static void
mt753x_trap_frames(struct mt7530_priv *priv)
{
/* Trap BPDUs to the CPU port(s) */
mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
MT753X_BPDU_CPU_ONLY);
/* Trap LLDP frames with :0E MAC DA to the CPU port(s) */
mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK,
MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY));
}
static int
mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
{
@ -1020,7 +1032,7 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
UNU_FFP(BIT(port)));
/* Set CPU port number */
if (priv->id == ID_MT7621)
if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
/* CPU port gets connected to all user ports of
@ -2219,6 +2231,8 @@ mt7530_setup(struct dsa_switch *ds)
priv->p6_interface = PHY_INTERFACE_MODE_NA;
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);
@ -2325,8 +2339,8 @@ mt7531_setup_common(struct dsa_switch *ds)
BIT(cpu_dp->index));
break;
}
mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
MT753X_BPDU_CPU_ONLY);
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);

View File

@ -65,6 +65,11 @@ enum mt753x_id {
#define MT753X_BPC 0x24
#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
/* Register for :03 and :0E MAC DA frame control */
#define MT753X_RGAC2 0x2c
#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16)
#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
enum mt753x_bpdu_port_fw {
MT753X_BPDU_FOLLOW_MFC,
MT753X_BPDU_CPU_EXCLUDE = 4,

View File

@ -1136,8 +1136,8 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
VLAN_ETH_HLEN : ETH_HLEN;
if (skb->len <= 60 &&
(lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
is_ipv4_pkt(skb)) {
(lancer_chip(adapter) || BE3_chip(adapter) ||
skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) {
ip = (struct iphdr *)ip_hdr(skb);
pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
}

View File

@ -1200,9 +1200,13 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
case DR_ACTION_TYP_TNL_L3_TO_L2:
{
u8 hw_actions[ACTION_CACHE_LINE_SIZE] = {};
u8 *hw_actions;
int ret;
hw_actions = kzalloc(ACTION_CACHE_LINE_SIZE, GFP_KERNEL);
if (!hw_actions)
return -ENOMEM;
ret = mlx5dr_ste_set_action_decap_l3_list(dmn->ste_ctx,
data, data_sz,
hw_actions,
@ -1210,6 +1214,7 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
&action->rewrite->num_of_actions);
if (ret) {
mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n");
kfree(hw_actions);
return ret;
}
@ -1217,6 +1222,7 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
DR_CHUNK_SIZE_8);
if (!action->rewrite->chunk) {
mlx5dr_dbg(dmn, "Failed allocating modify header chunk\n");
kfree(hw_actions);
return -ENOMEM;
}
@ -1230,6 +1236,7 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
if (ret) {
mlx5dr_dbg(dmn, "Writing decap l3 actions to ICM failed\n");
mlx5dr_icm_free_chunk(action->rewrite->chunk);
kfree(hw_actions);
return ret;
}
return 0;

View File

@ -582,8 +582,7 @@ qcaspi_spi_thread(void *data)
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
if ((qca->intr_req == qca->intr_svc) &&
(qca->txr.skb[qca->txr.head] == NULL) &&
(qca->sync == QCASPI_SYNC_READY))
!qca->txr.skb[qca->txr.head])
schedule();
set_current_state(TASK_RUNNING);

View File

@ -2950,7 +2950,7 @@ static u32 efx_ef10_extract_event_ts(efx_qword_t *event)
return tstamp;
}
static void
static int
efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
@ -2958,13 +2958,14 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
unsigned int tx_ev_desc_ptr;
unsigned int tx_ev_q_label;
unsigned int tx_ev_type;
int work_done;
u64 ts_part;
if (unlikely(READ_ONCE(efx->reset_pending)))
return;
return 0;
if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
return;
return 0;
/* Get the transmit queue */
tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
@ -2973,8 +2974,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
if (!tx_queue->timestamping) {
/* Transmit completion */
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
return;
return efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
}
/* Transmit timestamps are only available for 8XXX series. They result
@ -3000,6 +3000,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
* fields in the event.
*/
tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1);
work_done = 0;
switch (tx_ev_type) {
case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
@ -3016,6 +3017,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
tx_queue->completed_timestamp_major = ts_part;
efx_xmit_done_single(tx_queue);
work_done = 1;
break;
default:
@ -3026,6 +3028,8 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
EFX_QWORD_VAL(*event));
break;
}
return work_done;
}
static void
@ -3081,13 +3085,16 @@ static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
}
}
#define EFX_NAPI_MAX_TX 512
static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
{
struct efx_nic *efx = channel->efx;
efx_qword_t event, *p_event;
unsigned int read_ptr;
int ev_code;
int spent_tx = 0;
int spent = 0;
int ev_code;
if (quota <= 0)
return spent;
@ -3126,7 +3133,11 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
}
break;
case ESE_DZ_EV_CODE_TX_EV:
efx_ef10_handle_tx_event(channel, &event);
spent_tx += efx_ef10_handle_tx_event(channel, &event);
if (spent_tx >= EFX_NAPI_MAX_TX) {
spent = quota;
goto out;
}
break;
case ESE_DZ_EV_CODE_DRIVER_EV:
efx_ef10_handle_driver_event(channel, &event);

View File

@ -242,6 +242,8 @@ static void ef100_ev_read_ack(struct efx_channel *channel)
efx_reg(channel->efx, ER_GZ_EVQ_INT_PRIME));
}
#define EFX_NAPI_MAX_TX 512
static int ef100_ev_process(struct efx_channel *channel, int quota)
{
struct efx_nic *efx = channel->efx;
@ -249,6 +251,7 @@ static int ef100_ev_process(struct efx_channel *channel, int quota)
bool evq_phase, old_evq_phase;
unsigned int read_ptr;
efx_qword_t *p_event;
int spent_tx = 0;
int spent = 0;
bool ev_phase;
int ev_type;
@ -284,7 +287,9 @@ static int ef100_ev_process(struct efx_channel *channel, int quota)
efx_mcdi_process_event(channel, p_event);
break;
case ESE_GZ_EF100_EV_TX_COMPLETION:
ef100_ev_tx(channel, p_event);
spent_tx += ef100_ev_tx(channel, p_event);
if (spent_tx >= EFX_NAPI_MAX_TX)
spent = quota;
break;
case ESE_GZ_EF100_EV_DRIVER:
netif_info(efx, drv, efx->net_dev,

View File

@ -346,7 +346,7 @@ void ef100_tx_write(struct efx_tx_queue *tx_queue)
ef100_tx_push_buffers(tx_queue);
}
void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
int ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
{
unsigned int tx_done =
EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_NUM_DESC);
@ -357,7 +357,7 @@ void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
unsigned int tx_index = (tx_queue->read_count + tx_done - 1) &
tx_queue->ptr_mask;
efx_xmit_done(tx_queue, tx_index);
return efx_xmit_done(tx_queue, tx_index);
}
/* Add a socket buffer to a TX queue

View File

@ -20,7 +20,7 @@ void ef100_tx_init(struct efx_tx_queue *tx_queue);
void ef100_tx_write(struct efx_tx_queue *tx_queue);
unsigned int ef100_tx_max_skb_descs(struct efx_nic *efx);
void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event);
int ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event);
netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb,

View File

@ -249,7 +249,7 @@ void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
}
}
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
unsigned int efv_pkts_compl = 0;
@ -279,6 +279,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
}
efx_xmit_done_check_empty(tx_queue);
return pkts_compl + efv_pkts_compl;
}
/* Remove buffers put into a tx_queue for the current packet.

View File

@ -28,7 +28,7 @@ static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
}
void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
unsigned int insert_count);

View File

@ -522,7 +522,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
{
struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1];
struct hwsim_edge_info *einfo;
struct hwsim_edge_info *einfo, *einfo_old;
struct hwsim_phy *phy_v0;
struct hwsim_edge *e;
u32 v0, v1;
@ -560,8 +560,10 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
list_for_each_entry_rcu(e, &phy_v0->edges, list) {
if (e->endpoint->idx == v1) {
einfo->lqi = lqi;
rcu_assign_pointer(e->info, einfo);
einfo_old = rcu_replace_pointer(e->info, einfo,
lockdep_is_held(&hwsim_phys_lock));
rcu_read_unlock();
kfree_rcu(einfo_old, rcu);
mutex_unlock(&hwsim_phys_lock);
return 0;
}

View File

@ -905,7 +905,7 @@ static int dp83867_phy_reset(struct phy_device *phydev)
{
int err;
err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
if (err < 0)
return err;

View File

@ -547,6 +547,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x54F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
IWL_DEV_INFO(0x7A70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma_a0_gf4_a0, iwl_ax411_killer_1690s_name),

View File

@ -336,10 +336,6 @@ static struct dentry *nfcsim_debugfs_root;
static void nfcsim_debugfs_init(void)
{
nfcsim_debugfs_root = debugfs_create_dir("nfcsim", NULL);
if (!nfcsim_debugfs_root)
pr_err("Could not create debugfs entry\n");
}
static void nfcsim_debugfs_remove(void)

View File

@ -395,7 +395,16 @@ void nvme_complete_rq(struct request *req)
trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req);
if (ctrl->kas)
/*
* Completions of long-running commands should not be able to
* defer sending of periodic keep alives, since the controller
* may have completed processing such commands a long time ago
* (arbitrarily close to command submission time).
* req->deadline - req->timeout is the command submission time
* in jiffies.
*/
if (ctrl->kas &&
req->deadline - req->timeout >= ctrl->ka_last_check_time)
ctrl->comp_seen = true;
switch (nvme_decide_disposition(req)) {
@ -1198,9 +1207,25 @@ EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
* The host should send Keep Alive commands at half of the Keep Alive Timeout
* accounting for transport roundtrip times [..].
*/
static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
{
unsigned long delay = ctrl->kato * HZ / 2;
/*
* When using Traffic Based Keep Alive, we need to run
* nvme_keep_alive_work at twice the normal frequency, as one
* command completion can postpone sending a keep alive command
* by up to twice the delay between runs.
*/
if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS)
delay /= 2;
return delay;
}
static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
{
queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
queue_delayed_work(nvme_wq, &ctrl->ka_work,
nvme_keep_alive_work_period(ctrl));
}
static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
@ -1209,6 +1234,20 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
struct nvme_ctrl *ctrl = rq->end_io_data;
unsigned long flags;
bool startka = false;
unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
unsigned long delay = nvme_keep_alive_work_period(ctrl);
/*
* Subtract off the keepalive RTT so nvme_keep_alive_work runs
* at the desired frequency.
*/
if (rtt <= delay) {
delay -= rtt;
} else {
dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n",
jiffies_to_msecs(rtt));
delay = 0;
}
blk_mq_free_request(rq);
@ -1219,6 +1258,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
return RQ_END_IO_NONE;
}
ctrl->ka_last_check_time = jiffies;
ctrl->comp_seen = false;
spin_lock_irqsave(&ctrl->lock, flags);
if (ctrl->state == NVME_CTRL_LIVE ||
@ -1226,7 +1266,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
startka = true;
spin_unlock_irqrestore(&ctrl->lock, flags);
if (startka)
nvme_queue_keep_alive_work(ctrl);
queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
return RQ_END_IO_NONE;
}
@ -1237,6 +1277,8 @@ static void nvme_keep_alive_work(struct work_struct *work)
bool comp_seen = ctrl->comp_seen;
struct request *rq;
ctrl->ka_last_check_time = jiffies;
if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
dev_dbg(ctrl->device,
"reschedule traffic based keep-alive timer\n");

View File

@ -323,6 +323,7 @@ struct nvme_ctrl {
struct delayed_work ka_work;
struct delayed_work failfast_work;
struct nvme_command ka_cmd;
unsigned long ka_last_check_time;
struct work_struct fw_act_work;
unsigned long events;

View File

@ -489,7 +489,10 @@ struct hv_pcibus_device {
struct fwnode_handle *fwnode;
/* Protocol version negotiated with the host */
enum pci_protocol_version_t protocol_version;
struct mutex state_lock;
enum hv_pcibus_state state;
struct hv_device *hdev;
resource_size_t low_mmio_space;
resource_size_t high_mmio_space;
@ -553,19 +556,10 @@ struct hv_dr_state {
struct hv_pcidev_description func[];
};
enum hv_pcichild_state {
hv_pcichild_init = 0,
hv_pcichild_requirements,
hv_pcichild_resourced,
hv_pcichild_ejecting,
hv_pcichild_maximum
};
struct hv_pci_dev {
/* List protected by pci_rescan_remove_lock */
struct list_head list_entry;
refcount_t refs;
enum hv_pcichild_state state;
struct pci_slot *pci_slot;
struct hv_pcidev_description desc;
bool reported_missing;
@ -656,6 +650,11 @@ static void hv_arch_irq_unmask(struct irq_data *data)
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
int_desc = data->chip_data;
if (!int_desc) {
dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n",
__func__, data->irq);
return;
}
spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
@ -1924,12 +1923,6 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
hv_pci_onchannelcallback(hbus);
spin_unlock_irqrestore(&channel->sched_lock, flags);
if (hpdev->state == hv_pcichild_ejecting) {
dev_err_once(&hbus->hdev->device,
"the device is being ejected\n");
goto enable_tasklet;
}
udelay(100);
}
@ -2535,6 +2528,8 @@ static void pci_devices_present_work(struct work_struct *work)
if (!dr)
return;
mutex_lock(&hbus->state_lock);
/* First, mark all existing children as reported missing. */
spin_lock_irqsave(&hbus->device_list_lock, flags);
list_for_each_entry(hpdev, &hbus->children, list_entry) {
@ -2616,6 +2611,8 @@ static void pci_devices_present_work(struct work_struct *work)
break;
}
mutex_unlock(&hbus->state_lock);
kfree(dr);
}
@ -2764,7 +2761,7 @@ static void hv_eject_device_work(struct work_struct *work)
hpdev = container_of(work, struct hv_pci_dev, wrk);
hbus = hpdev->hbus;
WARN_ON(hpdev->state != hv_pcichild_ejecting);
mutex_lock(&hbus->state_lock);
/*
* Ejection can come before or after the PCI bus has been set up, so
@ -2802,6 +2799,8 @@ static void hv_eject_device_work(struct work_struct *work)
put_pcichild(hpdev);
put_pcichild(hpdev);
/* hpdev has been freed. Do not use it any more. */
mutex_unlock(&hbus->state_lock);
}
/**
@ -2822,7 +2821,6 @@ static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
return;
}
hpdev->state = hv_pcichild_ejecting;
get_pcichild(hpdev);
INIT_WORK(&hpdev->wrk, hv_eject_device_work);
queue_work(hbus->wq, &hpdev->wrk);
@ -3251,8 +3249,10 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
struct pci_bus_d0_entry *d0_entry;
struct hv_pci_compl comp_pkt;
struct pci_packet *pkt;
bool retry = true;
int ret;
enter_d0_retry:
/*
* Tell the host that the bus is ready to use, and moved into the
* powered-on state. This includes telling the host which region
@ -3279,6 +3279,38 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
if (ret)
goto exit;
/*
* In certain case (Kdump) the pci device of interest was
* not cleanly shut down and resource is still held on host
* side, the host could return invalid device status.
* We need to explicitly request host to release the resource
* and try to enter D0 again.
*/
if (comp_pkt.completion_status < 0 && retry) {
retry = false;
dev_err(&hdev->device, "Retrying D0 Entry\n");
/*
* Hv_pci_bus_exit() calls hv_send_resource_released()
* to free up resources of its child devices.
* In the kdump kernel we need to set the
* wslot_res_allocated to 255 so it scans all child
* devices to release resources allocated in the
* normal kernel before panic happened.
*/
hbus->wslot_res_allocated = 255;
ret = hv_pci_bus_exit(hdev, true);
if (ret == 0) {
kfree(pkt);
goto enter_d0_retry;
}
dev_err(&hdev->device,
"Retrying D0 failed with ret %d\n", ret);
}
if (comp_pkt.completion_status < 0) {
dev_err(&hdev->device,
"PCI Pass-through VSP failed D0 Entry with status %x\n",
@ -3321,6 +3353,24 @@ static int hv_pci_query_relations(struct hv_device *hdev)
if (!ret)
ret = wait_for_response(hdev, &comp);
/*
* In the case of fast device addition/removal, it's possible that
* vmbus_sendpacket() or wait_for_response() returns -ENODEV but we
* already got a PCI_BUS_RELATIONS* message from the host and the
* channel callback already scheduled a work to hbus->wq, which can be
* running pci_devices_present_work() -> survey_child_resources() ->
* complete(&hbus->survey_event), even after hv_pci_query_relations()
* exits and the stack variable 'comp' is no longer valid; as a result,
* a hang or a page fault may happen when the complete() calls
* raw_spin_lock_irqsave(). Flush hbus->wq before we exit from
* hv_pci_query_relations() to avoid the issues. Note: if 'ret' is
* -ENODEV, there can't be any more work item scheduled to hbus->wq
* after the flush_workqueue(): see vmbus_onoffer_rescind() ->
* vmbus_reset_channel_cb(), vmbus_rescind_cleanup() ->
* channel->rescind = true.
*/
flush_workqueue(hbus->wq);
return ret;
}
@ -3506,7 +3556,6 @@ static int hv_pci_probe(struct hv_device *hdev,
struct hv_pcibus_device *hbus;
u16 dom_req, dom;
char *name;
bool enter_d0_retry = true;
int ret;
/*
@ -3542,6 +3591,7 @@ static int hv_pci_probe(struct hv_device *hdev,
return -ENOMEM;
hbus->bridge = bridge;
mutex_init(&hbus->state_lock);
hbus->state = hv_pcibus_init;
hbus->wslot_res_allocated = -1;
@ -3646,49 +3696,15 @@ static int hv_pci_probe(struct hv_device *hdev,
if (ret)
goto free_fwnode;
retry:
ret = hv_pci_query_relations(hdev);
if (ret)
goto free_irq_domain;
mutex_lock(&hbus->state_lock);
ret = hv_pci_enter_d0(hdev);
/*
* In certain case (Kdump) the pci device of interest was
* not cleanly shut down and resource is still held on host
* side, the host could return invalid device status.
* We need to explicitly request host to release the resource
* and try to enter D0 again.
* Since the hv_pci_bus_exit() call releases structures
* of all its child devices, we need to start the retry from
* hv_pci_query_relations() call, requesting host to send
* the synchronous child device relations message before this
* information is needed in hv_send_resources_allocated()
* call later.
*/
if (ret == -EPROTO && enter_d0_retry) {
enter_d0_retry = false;
dev_err(&hdev->device, "Retrying D0 Entry\n");
/*
* Hv_pci_bus_exit() calls hv_send_resources_released()
* to free up resources of its child devices.
* In the kdump kernel we need to set the
* wslot_res_allocated to 255 so it scans all child
* devices to release resources allocated in the
* normal kernel before panic happened.
*/
hbus->wslot_res_allocated = 255;
ret = hv_pci_bus_exit(hdev, true);
if (ret == 0)
goto retry;
dev_err(&hdev->device,
"Retrying D0 failed with ret %d\n", ret);
}
if (ret)
goto free_irq_domain;
goto release_state_lock;
ret = hv_pci_allocate_bridge_windows(hbus);
if (ret)
@ -3706,12 +3722,15 @@ retry:
if (ret)
goto free_windows;
mutex_unlock(&hbus->state_lock);
return 0;
free_windows:
hv_pci_free_bridge_windows(hbus);
exit_d0:
(void) hv_pci_bus_exit(hdev, true);
release_state_lock:
mutex_unlock(&hbus->state_lock);
free_irq_domain:
irq_domain_remove(hbus->irq_domain);
free_fwnode:
@ -3965,20 +3984,26 @@ static int hv_pci_resume(struct hv_device *hdev)
if (ret)
goto out;
mutex_lock(&hbus->state_lock);
ret = hv_pci_enter_d0(hdev);
if (ret)
goto out;
goto release_state_lock;
ret = hv_send_resources_allocated(hdev);
if (ret)
goto out;
goto release_state_lock;
prepopulate_bars(hbus);
hv_pci_restore_msi_state(hbus);
hbus->state = hv_pcibus_installed;
mutex_unlock(&hbus->state_lock);
return 0;
release_state_lock:
mutex_unlock(&hbus->state_lock);
out:
vmbus_close(hdev->channel);
return ret;

View File

@ -297,6 +297,8 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev)
/* Enable Static Slider */
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
amd_pmf_init_sps(dev);
dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
power_supply_reg_notifier(&dev->pwr_src_notifier);
dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
}
@ -315,8 +317,10 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev)
static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
{
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
power_supply_unreg_notifier(&dev->pwr_src_notifier);
amd_pmf_deinit_sps(dev);
}
if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
amd_pmf_deinit_auto_mode(dev);
@ -399,9 +403,6 @@ static int amd_pmf_probe(struct platform_device *pdev)
apmf_install_handler(dev);
amd_pmf_dbgfs_register(dev);
dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
power_supply_reg_notifier(&dev->pwr_src_notifier);
dev_info(dev->dev, "registered PMF device successfully\n");
return 0;
@ -411,7 +412,6 @@ static int amd_pmf_remove(struct platform_device *pdev)
{
struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
power_supply_unreg_notifier(&dev->pwr_src_notifier);
amd_pmf_deinit_features(dev);
apmf_acpi_deinit(dev);
amd_pmf_dbgfs_unregister(dev);

View File

@ -1367,6 +1367,7 @@ void ccw_device_set_notoper(struct ccw_device *cdev)
enum io_sch_action {
IO_SCH_UNREG,
IO_SCH_ORPH_UNREG,
IO_SCH_UNREG_CDEV,
IO_SCH_ATTACH,
IO_SCH_UNREG_ATTACH,
IO_SCH_ORPH_ATTACH,
@ -1399,7 +1400,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
}
if ((sch->schib.pmcw.pam & sch->opm) == 0) {
if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
return IO_SCH_UNREG;
return IO_SCH_UNREG_CDEV;
return IO_SCH_DISC;
}
if (device_is_disconnected(cdev))
@ -1461,6 +1462,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
case IO_SCH_ORPH_ATTACH:
ccw_device_set_disconnected(cdev);
break;
case IO_SCH_UNREG_CDEV:
case IO_SCH_UNREG_ATTACH:
case IO_SCH_UNREG:
if (!cdev)
@ -1494,6 +1496,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
if (rc)
goto out;
break;
case IO_SCH_UNREG_CDEV:
case IO_SCH_UNREG_ATTACH:
spin_lock_irqsave(sch->lock, flags);
sch_set_cdev(sch, NULL);

View File

@ -99,6 +99,13 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
},
.driver_data = (void *)intel_tgl_bios,
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_BOARD_NAME, "8709"),
},
.driver_data = (void *)intel_tgl_bios,
},
{
/* quirk used for NUC15 'Bishop County' LAPBC510 and LAPBC710 skews */
.matches = {

View File

@ -1090,8 +1090,10 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream,
}
sruntime = sdw_alloc_stream(dai->name);
if (!sruntime)
return -ENOMEM;
if (!sruntime) {
ret = -ENOMEM;
goto err_alloc;
}
ctrl->sruntime[dai->id] = sruntime;
@ -1101,12 +1103,19 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream,
if (ret < 0 && ret != -ENOTSUPP) {
dev_err(dai->dev, "Failed to set sdw stream on %s\n",
codec_dai->name);
sdw_release_stream(sruntime);
return ret;
goto err_set_stream;
}
}
return 0;
err_set_stream:
sdw_release_stream(sruntime);
err_alloc:
pm_runtime_mark_last_busy(ctrl->dev);
pm_runtime_put_autosuspend(ctrl->dev);
return ret;
}
static void qcom_swrm_shutdown(struct snd_pcm_substream *substream,

View File

@ -923,8 +923,10 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
if (ret < 0)
dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
else
/* disable LPSPI module IRQ when enable DMA mode successfully,
* to prevent the unexpected LPSPI module IRQ events*/
/*
* disable LPSPI module IRQ when enable DMA mode successfully,
* to prevent the unexpected LPSPI module IRQ events.
*/
disable_irq(irq);
ret = devm_spi_register_controller(&pdev->dev, controller);

View File

@ -595,6 +595,8 @@ static int spi_geni_init(struct spi_geni_master *mas)
geni_se_select_mode(se, GENI_GPI_DMA);
dev_dbg(mas->dev, "Using GPI DMA mode for SPI\n");
break;
} else if (ret == -EPROBE_DEFER) {
goto out_pm;
}
/*
* in case of failure to get dma channel, we can still do the

View File

@ -1128,6 +1128,7 @@ int iscsi_target_locate_portal(
iscsi_target_set_sock_callbacks(conn);
login->np = np;
conn->tpg = NULL;
login_req = (struct iscsi_login_req *) login->req;
payload_length = ntoh24(login_req->dlength);
@ -1195,7 +1196,6 @@ int iscsi_target_locate_portal(
*/
sessiontype = strncmp(s_buf, DISCOVERY, 9);
if (!sessiontype) {
conn->tpg = iscsit_global->discovery_tpg;
if (!login->leading_connection)
goto get_target;
@ -1212,9 +1212,11 @@ int iscsi_target_locate_portal(
* Serialize access across the discovery struct iscsi_portal_group to
* process login attempt.
*/
conn->tpg = iscsit_global->discovery_tpg;
if (iscsit_access_np(np, conn->tpg) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
conn->tpg = NULL;
ret = -1;
goto out;
}

View File

@ -346,6 +346,7 @@ static struct lpuart_soc_data imxrt1050_data = {
.devtype = IMXRT1050_LPUART,
.iotype = UPIO_MEM32,
.reg_off = IMX_REG_OFF,
.rx_watermark = 1,
};
static const struct of_device_id lpuart_dt_ids[] = {

View File

@ -170,6 +170,9 @@ static int udc_pci_probe(
retval = -ENODEV;
goto err_probe;
}
udc = dev;
return 0;
err_probe:

View File

@ -934,13 +934,18 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
bool retry = err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS;
if (zcopy_used) {
if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
vhost_net_ubuf_put(ubufs);
nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
% UIO_MAXIOV;
if (retry)
nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
% UIO_MAXIOV;
else
vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
}
if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
if (retry) {
vhost_discard_vq_desc(vq, 1);
vhost_net_enable_vq(net, vq);
break;

View File

@ -377,7 +377,10 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
struct vhost_dev *d = &v->vdev;
u64 actual_features;
u64 features;
int i;
/*
* It's not allowed to change the features after they have
@ -392,6 +395,16 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
if (vdpa_set_features(vdpa, features))
return -EINVAL;
/* let the vqs know what has been configured */
actual_features = ops->get_driver_features(vdpa);
for (i = 0; i < d->nvqs; ++i) {
struct vhost_virtqueue *vq = d->vqs[i];
mutex_lock(&vq->mutex);
vq->acked_features = actual_features;
mutex_unlock(&vq->mutex);
}
return 0;
}

View File

@ -371,14 +371,7 @@ config NFS_V4_2_SSC_HELPER
source "net/sunrpc/Kconfig"
source "fs/ceph/Kconfig"
source "fs/cifs/Kconfig"
source "fs/ksmbd/Kconfig"
config SMBFS_COMMON
tristate
default y if CIFS=y || SMB_SERVER=y
default m if CIFS=m || SMB_SERVER=m
source "fs/smb/Kconfig"
source "fs/coda/Kconfig"
source "fs/afs/Kconfig"
source "fs/9p/Kconfig"

View File

@ -94,9 +94,7 @@ obj-$(CONFIG_LOCKD) += lockd/
obj-$(CONFIG_NLS) += nls/
obj-y += unicode/
obj-$(CONFIG_SYSV_FS) += sysv/
obj-$(CONFIG_SMBFS_COMMON) += smbfs_common/
obj-$(CONFIG_CIFS) += cifs/
obj-$(CONFIG_SMB_SERVER) += ksmbd/
obj-$(CONFIG_SMBFS) += smb/
obj-$(CONFIG_HPFS_FS) += hpfs/
obj-$(CONFIG_NTFS_FS) += ntfs/
obj-$(CONFIG_NTFS3_FS) += ntfs3/

View File

@ -6205,7 +6205,7 @@ static int log_delayed_deletions_incremental(struct btrfs_trans_handle *trans,
{
struct btrfs_root *log = inode->root->log_root;
const struct btrfs_delayed_item *curr;
u64 last_range_start;
u64 last_range_start = 0;
u64 last_range_end = 0;
struct btrfs_key key;

View File

@ -783,9 +783,13 @@ static inline bool should_fault_in_pages(struct iov_iter *i,
if (!user_backed_iter(i))
return false;
/*
* Try to fault in multiple pages initially. When that doesn't result
* in any progress, fall back to a single page.
*/
size = PAGE_SIZE;
offs = offset_in_page(iocb->ki_pos);
if (*prev_count != count || !*window_size) {
if (*prev_count != count) {
size_t nr_dirtied;
nr_dirtied = max(current->nr_dirtied_pause -
@ -869,6 +873,7 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
struct gfs2_inode *ip = GFS2_I(inode);
size_t prev_count = 0, window_size = 0;
size_t written = 0;
bool enough_retries;
ssize_t ret;
/*
@ -912,11 +917,17 @@ retry:
if (ret > 0)
written = ret;
enough_retries = prev_count == iov_iter_count(from) &&
window_size <= PAGE_SIZE;
if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
gfs2_glock_dq(gh);
window_size -= fault_in_iov_iter_readable(from, window_size);
if (window_size)
goto retry;
if (window_size) {
if (!enough_retries)
goto retry;
/* fall back to buffered I/O */
ret = 0;
}
}
out_unlock:
if (gfs2_holder_queued(gh))

View File

@ -369,7 +369,15 @@ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
struct page *page = pvec.pages[i];
lock_page(page);
nilfs_clear_dirty_page(page, silent);
/*
* This page may have been removed from the address
* space by truncation or invalidation when the lock
* was acquired. Skip processing in that case.
*/
if (likely(page->mapping == mapping))
nilfs_clear_dirty_page(page, silent);
unlock_page(page);
}
pagevec_release(&pvec);

View File

@ -101,6 +101,12 @@ int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf)
if (unlikely(!bh))
return -ENOMEM;
lock_buffer(bh);
if (!buffer_uptodate(bh)) {
memset(bh->b_data, 0, bh->b_size);
set_buffer_uptodate(bh);
}
unlock_buffer(bh);
nilfs_segbuf_add_segsum_buffer(segbuf, bh);
return 0;
}

View File

@ -979,10 +979,13 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
unsigned int isz, srsz;
bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
lock_buffer(bh_sr);
raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
isz = nilfs->ns_inode_size;
srsz = NILFS_SR_BYTES(isz);
raw_sr->sr_sum = 0; /* Ensure initialization within this update */
raw_sr->sr_bytes = cpu_to_le16(srsz);
raw_sr->sr_nongc_ctime
= cpu_to_le64(nilfs_doing_gc() ?
@ -996,6 +999,8 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
NILFS_SR_SUFILE_OFFSET(isz), 1);
memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
set_buffer_uptodate(bh_sr);
unlock_buffer(bh_sr);
}
static void nilfs_redirty_inodes(struct list_head *head)
@ -1778,6 +1783,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
list_for_each_entry(segbuf, logs, sb_list) {
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
b_assoc_buffers) {
clear_buffer_uptodate(bh);
if (bh->b_page != bd_page) {
if (bd_page)
end_page_writeback(bd_page);
@ -1789,6 +1795,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
b_assoc_buffers) {
clear_buffer_async_write(bh);
if (bh == segbuf->sb_super_root) {
clear_buffer_uptodate(bh);
if (bh->b_page != bd_page) {
end_page_writeback(bd_page);
bd_page = bh->b_page;

Some files were not shown because too many files have changed in this diff Show More