This is the 4.14.181 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl7Ey28ACgkQONu9yGCS
 aT5HHBAApSN5pEsCeogd9V1h20Gsx9TteDrn1qVqIxa4k7FczL7TuhGZw3SH9JjI
 oK7xvZK8JknVvr+GSnk+OO7oU64L3qtZ+icfrdqVecBIsxiyu3gopmazjLP+QxEo
 x+9xqR9clqTjOgQx3S8rH9s09fgsZMNAp1Ga8juyGZWxFkPoLiSyB+SDEIFwL43v
 IYeC2uJc5lnv8+vNGAcEHAJiphxKeWJLd/etmelIaFrp+kkmO0nIoszR9uLNkr8i
 yuCqt2tCSd3vVaQqjSOpg/3u1PnQpmMqvKqWXuDKBOkr9nz7cgOf+6uWeuo3Fvro
 Ji8q0Dtay1xNJLgwCGH3c98OsiRE5OMX0dIpadcDCteFwJOSryu7tkf5ODp7BA+Q
 EjZx5DIhvNa/7auqarqMJvblconocZnJ+8zcN2aGL8Yn57Q0bsfyiHyB6bMW98+/
 J0dMSuXl0c9MPLKa28+31hrmeThs5kG15EpTUzBrkXcTbsLGxPoJVC4IFIACwqlg
 lyhokwuZ87slEZfnz91R3V2Ehdyl5d8ci2/DBzzZiPjgGsUoxWH1pwmb5WO2agNf
 K9l9VVsGCAl+gqY41kI9UCf3BNzv/sc2uScjlnOIjpGrNI4IVc/bGq1y1ktIY4UC
 WV3Qux5GvwHbS/Dbrapv7B5Tt9EtbLmAPbnCCJ93e1mXXEkUw3o=
 =6R5g
 -----END PGP SIGNATURE-----

Merge tag 'v4.14.181' into v4.14/base

This is the 4.14.181 stable release
This commit is contained in:
Bruce Ashfield 2020-05-28 14:37:00 -04:00
commit 8946be5259
128 changed files with 892 additions and 443 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 14 PATCHLEVEL = 14
SUBLEVEL = 180 SUBLEVEL = 181
EXTRAVERSION = EXTRAVERSION =
NAME = Petit Gorille NAME = Petit Gorille
@ -656,20 +656,14 @@ KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias) KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,) KBUILD_CFLAGS += -Os
else
ifdef CONFIG_PROFILE_ALL_BRANCHES
KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
else else
KBUILD_CFLAGS += -O2 KBUILD_CFLAGS += -O2
endif endif
endif
KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
$(call cc-disable-warning,maybe-uninitialized,))
# Tell gcc to never replace conditional load with a non-conditional one # Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
KBUILD_CFLAGS += $(call cc-option,-fno-allow-store-data-races)
# check for 'asm goto' # check for 'asm goto'
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
@ -807,6 +801,17 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
# disable stringop warnings in gcc 8+ # disable stringop warnings in gcc 8+
KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
# We'll want to enable this eventually, but it's not going away for 5.7 at least
KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds)
KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds)
KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow)
# Another good warning that we'll want to enable eventually
KBUILD_CFLAGS += $(call cc-disable-warning, restrict)
# Enabled with W=2, disabled by default as noisy
KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized)
# disable invalid "can't wrap" optimizations for signed / pointers # disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)

View File

@ -289,6 +289,7 @@
#address-cells = <1>; #address-cells = <1>;
ranges = <0x51000000 0x51000000 0x3000 ranges = <0x51000000 0x51000000 0x3000
0x0 0x20000000 0x10000000>; 0x0 0x20000000 0x10000000>;
dma-ranges;
/** /**
* To enable PCI endpoint mode, disable the pcie1_rc * To enable PCI endpoint mode, disable the pcie1_rc
* node and enable pcie1_ep mode. * node and enable pcie1_ep mode.
@ -303,7 +304,6 @@
device_type = "pci"; device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000 ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x20013000 0x13000 0 0xffed000>; 0x82000000 0 0x20013000 0x13000 0 0xffed000>;
dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
bus-range = <0x00 0xff>; bus-range = <0x00 0xff>;
#interrupt-cells = <1>; #interrupt-cells = <1>;
num-lanes = <1>; num-lanes = <1>;
@ -347,6 +347,7 @@
#address-cells = <1>; #address-cells = <1>;
ranges = <0x51800000 0x51800000 0x3000 ranges = <0x51800000 0x51800000 0x3000
0x0 0x30000000 0x10000000>; 0x0 0x30000000 0x10000000>;
dma-ranges;
status = "disabled"; status = "disabled";
pcie@51800000 { pcie@51800000 {
compatible = "ti,dra7-pcie"; compatible = "ti,dra7-pcie";
@ -358,7 +359,6 @@
device_type = "pci"; device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000 ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x30013000 0x13000 0 0xffed000>; 0x82000000 0 0x30013000 0x13000 0 0xffed000>;
dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
bus-range = <0x00 0xff>; bus-range = <0x00 0xff>;
#interrupt-cells = <1>; #interrupt-cells = <1>;
num-lanes = <1>; num-lanes = <1>;

View File

@ -81,8 +81,8 @@
imx27-phycard-s-rdk { imx27-phycard-s-rdk {
pinctrl_i2c1: i2c1grp { pinctrl_i2c1: i2c1grp {
fsl,pins = < fsl,pins = <
MX27_PAD_I2C2_SDA__I2C2_SDA 0x0 MX27_PAD_I2C_DATA__I2C_DATA 0x0
MX27_PAD_I2C2_SCL__I2C2_SCL 0x0 MX27_PAD_I2C_CLK__I2C_CLK 0x0
>; >;
}; };

View File

@ -133,7 +133,14 @@
cmt1: timer@e6130000 { cmt1: timer@e6130000 {
compatible = "renesas,cmt-48-r8a73a4", "renesas,cmt-48-gen2"; compatible = "renesas,cmt-48-r8a73a4", "renesas,cmt-48-gen2";
reg = <0 0xe6130000 0 0x1004>; reg = <0 0xe6130000 0 0x1004>;
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A73A4_CLK_CMT1>; clocks = <&mstp3_clks R8A73A4_CLK_CMT1>;
clock-names = "fck"; clock-names = "fck";
power-domains = <&pd_c5>; power-domains = <&pd_c5>;

View File

@ -467,7 +467,7 @@
cpg_clocks: cpg_clocks@e6150000 { cpg_clocks: cpg_clocks@e6150000 {
compatible = "renesas,r8a7740-cpg-clocks"; compatible = "renesas,r8a7740-cpg-clocks";
reg = <0xe6150000 0x10000>; reg = <0xe6150000 0x10000>;
clocks = <&extal1_clk>, <&extalr_clk>; clocks = <&extal1_clk>, <&extal2_clk>, <&extalr_clk>;
#clock-cells = <1>; #clock-cells = <1>;
clock-output-names = "system", "pllc0", "pllc1", clock-output-names = "system", "pllc0", "pllc1",
"pllc2", "r", "pllc2", "r",

View File

@ -91,7 +91,7 @@
&i2c1 { &i2c1 {
status = "okay"; status = "okay";
rk805: rk805@18 { rk805: pmic@18 {
compatible = "rockchip,rk805"; compatible = "rockchip,rk805";
reg = <0x18>; reg = <0x18>;
interrupt-parent = <&gpio2>; interrupt-parent = <&gpio2>;

View File

@ -149,7 +149,7 @@
&i2c1 { &i2c1 {
status = "okay"; status = "okay";
rk805: rk805@18 { rk805: pmic@18 {
compatible = "rockchip,rk805"; compatible = "rockchip,rk805";
reg = <0x18>; reg = <0x18>;
interrupt-parent = <&gpio2>; interrupt-parent = <&gpio2>;

View File

@ -402,7 +402,7 @@
"bus_clk", "grf_clk"; "bus_clk", "grf_clk";
status = "disabled"; status = "disabled";
usbdrd_dwc3_0: dwc3 { usbdrd_dwc3_0: usb@fe800000 {
compatible = "snps,dwc3"; compatible = "snps,dwc3";
reg = <0x0 0xfe800000 0x0 0x100000>; reg = <0x0 0xfe800000 0x0 0x100000>;
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH 0>; interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH 0>;
@ -430,7 +430,7 @@
"bus_clk", "grf_clk"; "bus_clk", "grf_clk";
status = "disabled"; status = "disabled";
usbdrd_dwc3_1: dwc3 { usbdrd_dwc3_1: usb@fe900000 {
compatible = "snps,dwc3"; compatible = "snps,dwc3";
reg = <0x0 0xfe900000 0x0 0x100000>; reg = <0x0 0xfe900000 0x0 0x100000>;
interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH 0>; interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH 0>;

View File

@ -98,13 +98,6 @@ For 32-bit we have the following conventions - kernel is built with
#define SIZEOF_PTREGS 21*8 #define SIZEOF_PTREGS 21*8
.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0 .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
/*
* Push registers and sanitize registers of values that a
* speculation attack might otherwise want to exploit. The
* lower registers are likely clobbered well before they
* could be put to use in a speculative execution gadget.
* Interleave XOR with PUSH for better uop scheduling:
*/
.if \save_ret .if \save_ret
pushq %rsi /* pt_regs->si */ pushq %rsi /* pt_regs->si */
movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
@ -117,29 +110,40 @@ For 32-bit we have the following conventions - kernel is built with
pushq %rcx /* pt_regs->cx */ pushq %rcx /* pt_regs->cx */
pushq \rax /* pt_regs->ax */ pushq \rax /* pt_regs->ax */
pushq %r8 /* pt_regs->r8 */ pushq %r8 /* pt_regs->r8 */
xorl %r8d, %r8d /* nospec r8 */
pushq %r9 /* pt_regs->r9 */ pushq %r9 /* pt_regs->r9 */
xorl %r9d, %r9d /* nospec r9 */
pushq %r10 /* pt_regs->r10 */ pushq %r10 /* pt_regs->r10 */
xorl %r10d, %r10d /* nospec r10 */
pushq %r11 /* pt_regs->r11 */ pushq %r11 /* pt_regs->r11 */
xorl %r11d, %r11d /* nospec r11*/
pushq %rbx /* pt_regs->rbx */ pushq %rbx /* pt_regs->rbx */
xorl %ebx, %ebx /* nospec rbx*/
pushq %rbp /* pt_regs->rbp */ pushq %rbp /* pt_regs->rbp */
xorl %ebp, %ebp /* nospec rbp*/
pushq %r12 /* pt_regs->r12 */ pushq %r12 /* pt_regs->r12 */
xorl %r12d, %r12d /* nospec r12*/
pushq %r13 /* pt_regs->r13 */ pushq %r13 /* pt_regs->r13 */
xorl %r13d, %r13d /* nospec r13*/
pushq %r14 /* pt_regs->r14 */ pushq %r14 /* pt_regs->r14 */
xorl %r14d, %r14d /* nospec r14*/
pushq %r15 /* pt_regs->r15 */ pushq %r15 /* pt_regs->r15 */
xorl %r15d, %r15d /* nospec r15*/
UNWIND_HINT_REGS UNWIND_HINT_REGS
.if \save_ret .if \save_ret
pushq %rsi /* return address on top of stack */ pushq %rsi /* return address on top of stack */
.endif .endif
/*
* Sanitize registers of values that a speculation attack might
* otherwise want to exploit. The lower registers are likely clobbered
* well before they could be put to use in a speculative execution
* gadget.
*/
xorl %edx, %edx /* nospec dx */
xorl %ecx, %ecx /* nospec cx */
xorl %r8d, %r8d /* nospec r8 */
xorl %r9d, %r9d /* nospec r9 */
xorl %r10d, %r10d /* nospec r10 */
xorl %r11d, %r11d /* nospec r11 */
xorl %ebx, %ebx /* nospec rbx */
xorl %ebp, %ebp /* nospec rbp */
xorl %r12d, %r12d /* nospec r12 */
xorl %r13d, %r13d /* nospec r13 */
xorl %r14d, %r14d /* nospec r14 */
xorl %r15d, %r15d /* nospec r15 */
.endm .endm
.macro POP_REGS pop_rdi=1 skip_r11rcx=0 .macro POP_REGS pop_rdi=1 skip_r11rcx=0

View File

@ -302,7 +302,6 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
*/ */
syscall_return_via_sysret: syscall_return_via_sysret:
/* rcx and r11 are already restored (see code above) */ /* rcx and r11 are already restored (see code above) */
UNWIND_HINT_EMPTY
POP_REGS pop_rdi=0 skip_r11rcx=1 POP_REGS pop_rdi=0 skip_r11rcx=1
/* /*
@ -311,6 +310,7 @@ syscall_return_via_sysret:
*/ */
movq %rsp, %rdi movq %rsp, %rdi
movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
UNWIND_HINT_EMPTY
pushq RSP-RDI(%rdi) /* RSP */ pushq RSP-RDI(%rdi) /* RSP */
pushq (%rdi) /* RDI */ pushq (%rdi) /* RDI */
@ -606,6 +606,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
*/ */
movq %rsp, %rdi movq %rsp, %rdi
movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
UNWIND_HINT_EMPTY
/* Copy the IRET frame to the trampoline stack. */ /* Copy the IRET frame to the trampoline stack. */
pushq 6*8(%rdi) /* SS */ pushq 6*8(%rdi) /* SS */
@ -1648,7 +1649,7 @@ ENTRY(rewind_stack_do_exit)
movq PER_CPU_VAR(cpu_current_top_of_stack), %rax movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
leaq -PTREGS_SIZE(%rax), %rsp leaq -PTREGS_SIZE(%rax), %rsp
UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE UNWIND_HINT_REGS
call do_exit call do_exit
END(rewind_stack_do_exit) END(rewind_stack_do_exit)

View File

@ -78,7 +78,7 @@ set_bit(long nr, volatile unsigned long *addr)
: "iq" ((u8)CONST_MASK(nr)) : "iq" ((u8)CONST_MASK(nr))
: "memory"); : "memory");
} else { } else {
asm volatile(LOCK_PREFIX "bts %1,%0" asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
: BITOP_ADDR(addr) : "Ir" (nr) : "memory"); : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
} }
} }
@ -94,7 +94,7 @@ set_bit(long nr, volatile unsigned long *addr)
*/ */
static __always_inline void __set_bit(long nr, volatile unsigned long *addr) static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
{ {
asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
} }
/** /**
@ -115,7 +115,7 @@ clear_bit(long nr, volatile unsigned long *addr)
: CONST_MASK_ADDR(nr, addr) : CONST_MASK_ADDR(nr, addr)
: "iq" ((u8)~CONST_MASK(nr))); : "iq" ((u8)~CONST_MASK(nr)));
} else { } else {
asm volatile(LOCK_PREFIX "btr %1,%0" asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
: BITOP_ADDR(addr) : BITOP_ADDR(addr)
: "Ir" (nr)); : "Ir" (nr));
} }
@ -137,7 +137,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad
static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
{ {
asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
} }
static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
@ -182,7 +182,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *
*/ */
static __always_inline void __change_bit(long nr, volatile unsigned long *addr) static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
{ {
asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
} }
/** /**
@ -201,7 +201,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
: CONST_MASK_ADDR(nr, addr) : CONST_MASK_ADDR(nr, addr)
: "iq" ((u8)CONST_MASK(nr))); : "iq" ((u8)CONST_MASK(nr)));
} else { } else {
asm volatile(LOCK_PREFIX "btc %1,%0" asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
: BITOP_ADDR(addr) : BITOP_ADDR(addr)
: "Ir" (nr)); : "Ir" (nr));
} }
@ -217,7 +217,8 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
*/ */
static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{ {
GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c); GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts),
*addr, "Ir", nr, "%0", c);
} }
/** /**
@ -246,7 +247,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
{ {
bool oldbit; bool oldbit;
asm("bts %2,%1" asm(__ASM_SIZE(bts) " %2,%1"
CC_SET(c) CC_SET(c)
: CC_OUT(c) (oldbit), ADDR : CC_OUT(c) (oldbit), ADDR
: "Ir" (nr)); : "Ir" (nr));
@ -263,7 +264,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
*/ */
static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{ {
GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c); GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr),
*addr, "Ir", nr, "%0", c);
} }
/** /**
@ -286,7 +288,7 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
{ {
bool oldbit; bool oldbit;
asm volatile("btr %2,%1" asm volatile(__ASM_SIZE(btr) " %2,%1"
CC_SET(c) CC_SET(c)
: CC_OUT(c) (oldbit), ADDR : CC_OUT(c) (oldbit), ADDR
: "Ir" (nr)); : "Ir" (nr));
@ -298,7 +300,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
{ {
bool oldbit; bool oldbit;
asm volatile("btc %2,%1" asm volatile(__ASM_SIZE(btc) " %2,%1"
CC_SET(c) CC_SET(c)
: CC_OUT(c) (oldbit), ADDR : CC_OUT(c) (oldbit), ADDR
: "Ir" (nr) : "memory"); : "Ir" (nr) : "memory");
@ -316,7 +318,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
*/ */
static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{ {
GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c); GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc),
*addr, "Ir", nr, "%0", c);
} }
static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
@ -329,7 +332,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
{ {
bool oldbit; bool oldbit;
asm volatile("bt %2,%1" asm volatile(__ASM_SIZE(bt) " %2,%1"
CC_SET(c) CC_SET(c)
: CC_OUT(c) (oldbit) : CC_OUT(c) (oldbit)
: "m" (*(unsigned long *)addr), "Ir" (nr)); : "m" (*(unsigned long *)addr), "Ir" (nr));

View File

@ -526,7 +526,7 @@ static inline bool x86_this_cpu_variable_test_bit(int nr,
{ {
bool oldbit; bool oldbit;
asm volatile("bt "__percpu_arg(2)",%1" asm volatile("btl "__percpu_arg(2)",%1"
CC_SET(c) CC_SET(c)
: CC_OUT(c) (oldbit) : CC_OUT(c) (oldbit)
: "m" (*(unsigned long __percpu *)addr), "Ir" (nr)); : "m" (*(unsigned long __percpu *)addr), "Ir" (nr));

View File

@ -55,8 +55,13 @@
/* /*
* Initialize the stackprotector canary value. * Initialize the stackprotector canary value.
* *
* NOTE: this must only be called from functions that never return, * NOTE: this must only be called from functions that never return
* and it must always be inlined. * and it must always be inlined.
*
* In addition, it should be called from a compilation unit for which
* stack protector is disabled. Alternatively, the caller should not end
* with a function call which gets tail-call optimized as that would
* lead to checking a modified canary value.
*/ */
static __always_inline void boot_init_stack_canary(void) static __always_inline void boot_init_stack_canary(void)
{ {

View File

@ -270,6 +270,14 @@ static void notrace start_secondary(void *unused)
wmb(); wmb();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
/*
* Prevent tail call to cpu_startup_entry() because the stack protector
* guard has been changed a couple of function calls up, in
* boot_init_stack_canary() and must not be checked before tail calling
* another function.
*/
prevent_tail_call_optimization();
} }
/** /**

View File

@ -90,9 +90,6 @@ static struct orc_entry null_orc_entry = {
static struct orc_entry *orc_find(unsigned long ip) static struct orc_entry *orc_find(unsigned long ip)
{ {
if (!orc_init)
return NULL;
if (ip == 0) if (ip == 0)
return &null_orc_entry; return &null_orc_entry;
@ -460,7 +457,7 @@ bool unwind_next_frame(struct unwind_state *state)
default: default:
orc_warn("unknown .orc_unwind entry type %d for ip %pB\n", orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
orc->type, (void *)orig_ip); orc->type, (void *)orig_ip);
break; goto done;
} }
/* Find BP: */ /* Find BP: */
@ -511,17 +508,20 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
memset(state, 0, sizeof(*state)); memset(state, 0, sizeof(*state));
state->task = task; state->task = task;
if (!orc_init)
goto err;
/* /*
* Refuse to unwind the stack of a task while it's executing on another * Refuse to unwind the stack of a task while it's executing on another
* CPU. This check is racy, but that's ok: the unwinder has other * CPU. This check is racy, but that's ok: the unwinder has other
* checks to prevent it from going off the rails. * checks to prevent it from going off the rails.
*/ */
if (task_on_another_cpu(task)) if (task_on_another_cpu(task))
goto done; goto err;
if (regs) { if (regs) {
if (user_mode(regs)) if (user_mode(regs))
goto done; goto the_end;
state->ip = regs->ip; state->ip = regs->ip;
state->sp = kernel_stack_pointer(regs); state->sp = kernel_stack_pointer(regs);
@ -554,6 +554,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
* generate some kind of backtrace if this happens. * generate some kind of backtrace if this happens.
*/ */
void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp); void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
state->error = true;
if (get_stack_info(next_page, state->task, &state->stack_info, if (get_stack_info(next_page, state->task, &state->stack_info,
&state->stack_mask)) &state->stack_mask))
return; return;
@ -574,13 +575,14 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
/* Otherwise, skip ahead to the user-specified starting frame: */ /* Otherwise, skip ahead to the user-specified starting frame: */
while (!unwind_done(state) && while (!unwind_done(state) &&
(!on_stack(&state->stack_info, first_frame, sizeof(long)) || (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
state->sp <= (unsigned long)first_frame)) state->sp < (unsigned long)first_frame))
unwind_next_frame(state); unwind_next_frame(state);
return; return;
done: err:
state->error = true;
the_end:
state->stack_info.type = STACK_TYPE_UNKNOWN; state->stack_info.type = STACK_TYPE_UNKNOWN;
return;
} }
EXPORT_SYMBOL_GPL(__unwind_start); EXPORT_SYMBOL_GPL(__unwind_start);

View File

@ -3214,7 +3214,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
unsigned bank_num = mcg_cap & 0xff, bank; unsigned bank_num = mcg_cap & 0xff, bank;
r = -EINVAL; r = -EINVAL;
if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) if (!bank_num || bank_num > KVM_MAX_MCE_BANKS)
goto out; goto out;
if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000)) if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
goto out; goto out;

View File

@ -89,6 +89,7 @@ asmlinkage __visible void cpu_bringup_and_idle(void)
{ {
cpu_bringup(); cpu_bringup();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
prevent_tail_call_optimization();
} }
void xen_smp_intr_free_pv(unsigned int cpu) void xen_smp_intr_free_pv(unsigned int cpu)

View File

@ -531,7 +531,7 @@ static void exit_tfm(struct crypto_skcipher *tfm)
crypto_free_skcipher(ctx->child); crypto_free_skcipher(ctx->child);
} }
static void free(struct skcipher_instance *inst) static void free_inst(struct skcipher_instance *inst)
{ {
crypto_drop_skcipher(skcipher_instance_ctx(inst)); crypto_drop_skcipher(skcipher_instance_ctx(inst));
kfree(inst); kfree(inst);
@ -642,7 +642,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.encrypt = encrypt; inst->alg.encrypt = encrypt;
inst->alg.decrypt = decrypt; inst->alg.decrypt = decrypt;
inst->free = free; inst->free = free_inst;
err = skcipher_register_instance(tmpl, inst); err = skcipher_register_instance(tmpl, inst);
if (err) if (err)

View File

@ -469,7 +469,7 @@ static void exit_tfm(struct crypto_skcipher *tfm)
crypto_free_cipher(ctx->tweak); crypto_free_cipher(ctx->tweak);
} }
static void free(struct skcipher_instance *inst) static void free_inst(struct skcipher_instance *inst)
{ {
crypto_drop_skcipher(skcipher_instance_ctx(inst)); crypto_drop_skcipher(skcipher_instance_ctx(inst));
kfree(inst); kfree(inst);
@ -580,7 +580,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.encrypt = encrypt; inst->alg.encrypt = encrypt;
inst->alg.decrypt = decrypt; inst->alg.decrypt = decrypt;
inst->free = free; inst->free = free_inst;
err = skcipher_register_instance(tmpl, inst); err = skcipher_register_instance(tmpl, inst);
if (err) if (err)

View File

@ -31,6 +31,15 @@ struct virtio_blk_vq {
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct virtio_blk { struct virtio_blk {
/*
* This mutex must be held by anything that may run after
* virtblk_remove() sets vblk->vdev to NULL.
*
* blk-mq, virtqueue processing, and sysfs attribute code paths are
* shut down before vblk->vdev is set to NULL and therefore do not need
* to hold this mutex.
*/
struct mutex vdev_mutex;
struct virtio_device *vdev; struct virtio_device *vdev;
/* The disk structure for the kernel. */ /* The disk structure for the kernel. */
@ -42,6 +51,13 @@ struct virtio_blk {
/* Process context for config space updates */ /* Process context for config space updates */
struct work_struct config_work; struct work_struct config_work;
/*
* Tracks references from block_device_operations open/release and
* virtio_driver probe/remove so this object can be freed once no
* longer in use.
*/
refcount_t refs;
/* What host tells us, plus 2 for header & tailer. */ /* What host tells us, plus 2 for header & tailer. */
unsigned int sg_elems; unsigned int sg_elems;
@ -315,10 +331,55 @@ out:
return err; return err;
} }
static void virtblk_get(struct virtio_blk *vblk)
{
refcount_inc(&vblk->refs);
}
static void virtblk_put(struct virtio_blk *vblk)
{
if (refcount_dec_and_test(&vblk->refs)) {
ida_simple_remove(&vd_index_ida, vblk->index);
mutex_destroy(&vblk->vdev_mutex);
kfree(vblk);
}
}
static int virtblk_open(struct block_device *bd, fmode_t mode)
{
struct virtio_blk *vblk = bd->bd_disk->private_data;
int ret = 0;
mutex_lock(&vblk->vdev_mutex);
if (vblk->vdev)
virtblk_get(vblk);
else
ret = -ENXIO;
mutex_unlock(&vblk->vdev_mutex);
return ret;
}
static void virtblk_release(struct gendisk *disk, fmode_t mode)
{
struct virtio_blk *vblk = disk->private_data;
virtblk_put(vblk);
}
/* We provide getgeo only to please some old bootloader/partitioning tools */ /* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{ {
struct virtio_blk *vblk = bd->bd_disk->private_data; struct virtio_blk *vblk = bd->bd_disk->private_data;
int ret = 0;
mutex_lock(&vblk->vdev_mutex);
if (!vblk->vdev) {
ret = -ENXIO;
goto out;
}
/* see if the host passed in geometry config */ /* see if the host passed in geometry config */
if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
@ -334,12 +395,16 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
geo->sectors = 1 << 5; geo->sectors = 1 << 5;
geo->cylinders = get_capacity(bd->bd_disk) >> 11; geo->cylinders = get_capacity(bd->bd_disk) >> 11;
} }
return 0; out:
mutex_unlock(&vblk->vdev_mutex);
return ret;
} }
static const struct block_device_operations virtblk_fops = { static const struct block_device_operations virtblk_fops = {
.ioctl = virtblk_ioctl, .ioctl = virtblk_ioctl,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = virtblk_open,
.release = virtblk_release,
.getgeo = virtblk_getgeo, .getgeo = virtblk_getgeo,
}; };
@ -659,6 +724,10 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_free_index; goto out_free_index;
} }
/* This reference is dropped in virtblk_remove(). */
refcount_set(&vblk->refs, 1);
mutex_init(&vblk->vdev_mutex);
vblk->vdev = vdev; vblk->vdev = vdev;
vblk->sg_elems = sg_elems; vblk->sg_elems = sg_elems;
@ -821,8 +890,6 @@ out:
static void virtblk_remove(struct virtio_device *vdev) static void virtblk_remove(struct virtio_device *vdev)
{ {
struct virtio_blk *vblk = vdev->priv; struct virtio_blk *vblk = vdev->priv;
int index = vblk->index;
int refc;
/* Make sure no work handler is accessing the device. */ /* Make sure no work handler is accessing the device. */
flush_work(&vblk->config_work); flush_work(&vblk->config_work);
@ -832,18 +899,21 @@ static void virtblk_remove(struct virtio_device *vdev)
blk_mq_free_tag_set(&vblk->tag_set); blk_mq_free_tag_set(&vblk->tag_set);
mutex_lock(&vblk->vdev_mutex);
/* Stop all the virtqueues. */ /* Stop all the virtqueues. */
vdev->config->reset(vdev); vdev->config->reset(vdev);
refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref); /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
vblk->vdev = NULL;
put_disk(vblk->disk); put_disk(vblk->disk);
vdev->config->del_vqs(vdev); vdev->config->del_vqs(vdev);
kfree(vblk->vqs); kfree(vblk->vqs);
kfree(vblk);
/* Only free device id if we don't have any users */ mutex_unlock(&vblk->vdev_mutex);
if (refc == 1)
ida_simple_remove(&vd_index_ida, index); virtblk_put(vblk);
} }
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP

View File

@ -1731,7 +1731,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
out: out:
if (rv) { if (rv) {
addr_info->client = NULL; if (addr_info)
addr_info->client = NULL;
dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv); dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv);
kfree(ssif_info); kfree(ssif_info);
} }

View File

@ -163,8 +163,6 @@ PNAME(mux_i2s_out_p) = { "i2s1_pre", "xin12m" };
PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" }; PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" };
PNAME(mux_sclk_spdif_p) = { "sclk_spdif_src", "spdif_frac", "xin12m" }; PNAME(mux_sclk_spdif_p) = { "sclk_spdif_src", "spdif_frac", "xin12m" };
PNAME(mux_aclk_gpu_pre_p) = { "cpll_gpu", "gpll_gpu", "hdmiphy_gpu", "usb480m_gpu" };
PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
@ -475,16 +473,9 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(24), 6, 10, DFLAGS, RK2928_CLKSEL_CON(24), 6, 10, DFLAGS,
RK2928_CLKGATE_CON(2), 8, GFLAGS), RK2928_CLKGATE_CON(2), 8, GFLAGS),
GATE(0, "cpll_gpu", "cpll", 0, COMPOSITE(0, "aclk_gpu_pre", mux_pll_src_4plls_p, 0,
RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 13, GFLAGS), RK2928_CLKGATE_CON(3), 13, GFLAGS),
GATE(0, "gpll_gpu", "gpll", 0,
RK2928_CLKGATE_CON(3), 13, GFLAGS),
GATE(0, "hdmiphy_gpu", "hdmiphy", 0,
RK2928_CLKGATE_CON(3), 13, GFLAGS),
GATE(0, "usb480m_gpu", "usb480m", 0,
RK2928_CLKGATE_CON(3), 13, GFLAGS),
COMPOSITE_NOGATE(0, "aclk_gpu_pre", mux_aclk_gpu_pre_p, 0,
RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS),
COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_2plls_p, 0, COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_2plls_p, 0,
RK2928_CLKSEL_CON(25), 8, 1, MFLAGS, 0, 7, DFLAGS, RK2928_CLKSEL_CON(25), 8, 1, MFLAGS, 0, 7, DFLAGS,
@ -589,8 +580,8 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(0, "pclk_peri_noc", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 2, GFLAGS), GATE(0, "pclk_peri_noc", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 2, GFLAGS),
/* PD_GPU */ /* PD_GPU */
GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 14, GFLAGS), GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 14, GFLAGS),
GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 15, GFLAGS), GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS),
/* PD_BUS */ /* PD_BUS */
GATE(0, "sclk_initmem_mbist", "aclk_cpu", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS), GATE(0, "sclk_initmem_mbist", "aclk_cpu", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS),

View File

@ -935,7 +935,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
update_turbo_state(); update_turbo_state();
if (global.turbo_disabled) { if (global.turbo_disabled) {
pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
mutex_unlock(&intel_pstate_limits_lock); mutex_unlock(&intel_pstate_limits_lock);
mutex_unlock(&intel_pstate_driver_lock); mutex_unlock(&intel_pstate_driver_lock);
return -EPERM; return -EPERM;

View File

@ -362,6 +362,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
size); size);
tdmac->desc_arr = NULL; tdmac->desc_arr = NULL;
if (tdmac->status == DMA_ERROR)
tdmac->status = DMA_COMPLETE;
return; return;
} }

View File

@ -873,6 +873,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
} }
pci_set_master(pdev); pci_set_master(pdev);
pd->dma.dev = &pdev->dev;
err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd); err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
if (err) { if (err) {
@ -888,7 +889,6 @@ static int pch_dma_probe(struct pci_dev *pdev,
goto err_free_irq; goto err_free_irq;
} }
pd->dma.dev = &pdev->dev;
INIT_LIST_HEAD(&pd->dma.channels); INIT_LIST_HEAD(&pd->dma.channels);

View File

@ -210,7 +210,8 @@ qxl_image_init_helper(struct qxl_device *qdev,
break; break;
default: default:
DRM_ERROR("unsupported image bit depth\n"); DRM_ERROR("unsupported image bit depth\n");
return -EINVAL; /* TODO: cleanup */ qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
return -EINVAL;
} }
image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN; image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
image->u.bitmap.x = width; image->u.bitmap.x = width;

View File

@ -680,16 +680,21 @@ static int usbhid_open(struct hid_device *hid)
struct usbhid_device *usbhid = hid->driver_data; struct usbhid_device *usbhid = hid->driver_data;
int res; int res;
mutex_lock(&usbhid->mutex);
set_bit(HID_OPENED, &usbhid->iofl); set_bit(HID_OPENED, &usbhid->iofl);
if (hid->quirks & HID_QUIRK_ALWAYS_POLL) if (hid->quirks & HID_QUIRK_ALWAYS_POLL) {
return 0; res = 0;
goto Done;
}
res = usb_autopm_get_interface(usbhid->intf); res = usb_autopm_get_interface(usbhid->intf);
/* the device must be awake to reliably request remote wakeup */ /* the device must be awake to reliably request remote wakeup */
if (res < 0) { if (res < 0) {
clear_bit(HID_OPENED, &usbhid->iofl); clear_bit(HID_OPENED, &usbhid->iofl);
return -EIO; res = -EIO;
goto Done;
} }
usbhid->intf->needs_remote_wakeup = 1; usbhid->intf->needs_remote_wakeup = 1;
@ -723,6 +728,9 @@ static int usbhid_open(struct hid_device *hid)
msleep(50); msleep(50);
clear_bit(HID_RESUME_RUNNING, &usbhid->iofl); clear_bit(HID_RESUME_RUNNING, &usbhid->iofl);
Done:
mutex_unlock(&usbhid->mutex);
return res; return res;
} }
@ -730,6 +738,8 @@ static void usbhid_close(struct hid_device *hid)
{ {
struct usbhid_device *usbhid = hid->driver_data; struct usbhid_device *usbhid = hid->driver_data;
mutex_lock(&usbhid->mutex);
/* /*
* Make sure we don't restart data acquisition due to * Make sure we don't restart data acquisition due to
* a resumption we no longer care about by avoiding racing * a resumption we no longer care about by avoiding racing
@ -741,12 +751,13 @@ static void usbhid_close(struct hid_device *hid)
clear_bit(HID_IN_POLLING, &usbhid->iofl); clear_bit(HID_IN_POLLING, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock); spin_unlock_irq(&usbhid->lock);
if (hid->quirks & HID_QUIRK_ALWAYS_POLL) if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) {
return; hid_cancel_delayed_stuff(usbhid);
usb_kill_urb(usbhid->urbin);
usbhid->intf->needs_remote_wakeup = 0;
}
hid_cancel_delayed_stuff(usbhid); mutex_unlock(&usbhid->mutex);
usb_kill_urb(usbhid->urbin);
usbhid->intf->needs_remote_wakeup = 0;
} }
/* /*
@ -1056,6 +1067,8 @@ static int usbhid_start(struct hid_device *hid)
unsigned int n, insize = 0; unsigned int n, insize = 0;
int ret; int ret;
mutex_lock(&usbhid->mutex);
clear_bit(HID_DISCONNECTED, &usbhid->iofl); clear_bit(HID_DISCONNECTED, &usbhid->iofl);
usbhid->bufsize = HID_MIN_BUFFER_SIZE; usbhid->bufsize = HID_MIN_BUFFER_SIZE;
@ -1170,6 +1183,8 @@ static int usbhid_start(struct hid_device *hid)
usbhid_set_leds(hid); usbhid_set_leds(hid);
device_set_wakeup_enable(&dev->dev, 1); device_set_wakeup_enable(&dev->dev, 1);
} }
mutex_unlock(&usbhid->mutex);
return 0; return 0;
fail: fail:
@ -1180,6 +1195,7 @@ fail:
usbhid->urbout = NULL; usbhid->urbout = NULL;
usbhid->urbctrl = NULL; usbhid->urbctrl = NULL;
hid_free_buffers(dev, hid); hid_free_buffers(dev, hid);
mutex_unlock(&usbhid->mutex);
return ret; return ret;
} }
@ -1195,6 +1211,8 @@ static void usbhid_stop(struct hid_device *hid)
usbhid->intf->needs_remote_wakeup = 0; usbhid->intf->needs_remote_wakeup = 0;
} }
mutex_lock(&usbhid->mutex);
clear_bit(HID_STARTED, &usbhid->iofl); clear_bit(HID_STARTED, &usbhid->iofl);
spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
set_bit(HID_DISCONNECTED, &usbhid->iofl); set_bit(HID_DISCONNECTED, &usbhid->iofl);
@ -1215,6 +1233,8 @@ static void usbhid_stop(struct hid_device *hid)
usbhid->urbout = NULL; usbhid->urbout = NULL;
hid_free_buffers(hid_to_usb_dev(hid), hid); hid_free_buffers(hid_to_usb_dev(hid), hid);
mutex_unlock(&usbhid->mutex);
} }
static int usbhid_power(struct hid_device *hid, int lvl) static int usbhid_power(struct hid_device *hid, int lvl)
@ -1375,6 +1395,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
INIT_WORK(&usbhid->reset_work, hid_reset); INIT_WORK(&usbhid->reset_work, hid_reset);
setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid); setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
spin_lock_init(&usbhid->lock); spin_lock_init(&usbhid->lock);
mutex_init(&usbhid->mutex);
ret = hid_add_device(hid); ret = hid_add_device(hid);
if (ret) { if (ret) {

View File

@ -93,6 +93,7 @@ struct usbhid_device {
dma_addr_t outbuf_dma; /* Output buffer dma */ dma_addr_t outbuf_dma; /* Output buffer dma */
unsigned long last_out; /* record of last output for timeouts */ unsigned long last_out; /* record of last output for timeouts */
struct mutex mutex; /* start/stop/open/close */
spinlock_t lock; /* fifo spinlock */ spinlock_t lock; /* fifo spinlock */
unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */ unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */
struct timer_list io_retry; /* Retry timer */ struct timer_list io_retry; /* Retry timer */

View File

@ -132,9 +132,11 @@ static void wacom_feature_mapping(struct hid_device *hdev,
data[0] = field->report->id; data[0] = field->report->id;
ret = wacom_get_report(hdev, HID_FEATURE_REPORT, ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
data, n, WAC_CMD_RETRIES); data, n, WAC_CMD_RETRIES);
if (ret == n) { if (ret == n && features->type == HID_GENERIC) {
ret = hid_report_raw_event(hdev, ret = hid_report_raw_event(hdev,
HID_FEATURE_REPORT, data, n, 0); HID_FEATURE_REPORT, data, n, 0);
} else if (ret == 2 && features->type != HID_GENERIC) {
features->touch_max = data[1];
} else { } else {
features->touch_max = 16; features->touch_max = 16;
hid_warn(hdev, "wacom_feature_mapping: " hid_warn(hdev, "wacom_feature_mapping: "

View File

@ -250,9 +250,9 @@ static ssize_t da9052_read_tsi(struct device *dev,
int channel = to_sensor_dev_attr(devattr)->index; int channel = to_sensor_dev_attr(devattr)->index;
int ret; int ret;
mutex_lock(&hwmon->hwmon_lock); mutex_lock(&hwmon->da9052->auxadc_lock);
ret = __da9052_read_tsi(dev, channel); ret = __da9052_read_tsi(dev, channel);
mutex_unlock(&hwmon->hwmon_lock); mutex_unlock(&hwmon->da9052->auxadc_lock);
if (ret < 0) if (ret < 0)
return ret; return ret;

View File

@ -450,16 +450,15 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
struct flowi6 fl6; struct flowi6 fl6;
struct dst_entry *dst; struct dst_entry *dst;
struct rt6_info *rt; struct rt6_info *rt;
int ret;
memset(&fl6, 0, sizeof fl6); memset(&fl6, 0, sizeof fl6);
fl6.daddr = dst_in->sin6_addr; fl6.daddr = dst_in->sin6_addr;
fl6.saddr = src_in->sin6_addr; fl6.saddr = src_in->sin6_addr;
fl6.flowi6_oif = addr->bound_dev_if; fl6.flowi6_oif = addr->bound_dev_if;
ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6); dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL);
if (ret < 0) if (IS_ERR(dst))
return ret; return PTR_ERR(dst);
rt = (struct rt6_info *)dst; rt = (struct rt6_info *)dst;
if (ipv6_addr_any(&src_in->sin6_addr)) { if (ipv6_addr_any(&src_in->sin6_addr)) {

View File

@ -483,7 +483,7 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
int arp_index; int arp_index;
arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action); arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
if (arp_index == -1) if (arp_index < 0)
return; return;
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
if (!cqp_request) if (!cqp_request)

View File

@ -2917,6 +2917,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
int send_size; int send_size;
int header_size; int header_size;
int spc; int spc;
int err;
int i; int i;
if (wr->wr.opcode != IB_WR_SEND) if (wr->wr.opcode != IB_WR_SEND)
@ -2951,7 +2952,9 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
sqp->ud_header.lrh.virtual_lane = 0; sqp->ud_header.lrh.virtual_lane = 0;
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
if (err)
return err;
sqp->ud_header.bth.pkey = cpu_to_be16(pkey); sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
@ -3240,9 +3243,14 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
} }
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
if (!sqp->qp.ibqp.qp_num) if (!sqp->qp.ibqp.qp_num)
ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index,
&pkey);
else else
ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey); err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index,
&pkey);
if (err)
return err;
sqp->ud_header.bth.pkey = cpu_to_be16(pkey); sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));

View File

@ -154,10 +154,12 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev,
memcpy(&fl6.daddr, daddr, sizeof(*daddr)); memcpy(&fl6.daddr, daddr, sizeof(*daddr));
fl6.flowi6_proto = IPPROTO_UDP; fl6.flowi6_proto = IPPROTO_UDP;
if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk), ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk),
recv_sockets.sk6->sk, &ndst, &fl6))) { recv_sockets.sk6->sk, &fl6,
NULL);
if (unlikely(IS_ERR(ndst))) {
pr_err_ratelimited("no route to %pI6\n", daddr); pr_err_ratelimited("no route to %pI6\n", daddr);
goto put; return NULL;
} }
if (unlikely(ndst->error)) { if (unlikely(ndst->error)) {

View File

@ -357,6 +357,7 @@ static void __exit dsa_loop_exit(void)
} }
module_exit(dsa_loop_exit); module_exit(dsa_loop_exit);
MODULE_SOFTDEP("pre: dsa_loop_bdinfo");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Fainelli"); MODULE_AUTHOR("Florian Fainelli");
MODULE_DESCRIPTION("DSA loopback driver"); MODULE_DESCRIPTION("DSA loopback driver");

View File

@ -6827,6 +6827,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
netdev_features_t features) netdev_features_t features)
{ {
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
netdev_features_t vlan_features;
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE; features &= ~NETIF_F_NTUPLE;
@ -6834,12 +6835,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
/* Both CTAG and STAG VLAN accelaration on the RX side have to be /* Both CTAG and STAG VLAN accelaration on the RX side have to be
* turned on or off together. * turned on or off together.
*/ */
if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX |
(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { NETIF_F_HW_VLAN_STAG_RX);
if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX)) {
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
features &= ~(NETIF_F_HW_VLAN_CTAG_RX | features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX); NETIF_F_HW_VLAN_STAG_RX);
else else if (vlan_features)
features |= NETIF_F_HW_VLAN_CTAG_RX | features |= NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX; NETIF_F_HW_VLAN_STAG_RX;
} }
@ -8420,8 +8423,11 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
} }
} }
if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) if (result != PCI_ERS_RESULT_RECOVERED) {
dev_close(netdev); if (netif_running(netdev))
dev_close(netdev);
pci_disable_device(pdev);
}
rtnl_unlock(); rtnl_unlock();
@ -8432,7 +8438,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
err); /* non-fatal, continue */ err); /* non-fatal, continue */
} }
return PCI_ERS_RESULT_RECOVERED; return result;
} }
/** /**

View File

@ -774,7 +774,6 @@ struct bnxt_vf_info {
#define BNXT_VF_SPOOFCHK 0x2 #define BNXT_VF_SPOOFCHK 0x2
#define BNXT_VF_LINK_FORCED 0x4 #define BNXT_VF_LINK_FORCED 0x4
#define BNXT_VF_LINK_UP 0x8 #define BNXT_VF_LINK_UP 0x8
u32 func_flags; /* func cfg flags */
u32 min_tx_rate; u32 min_tx_rate;
u32 max_tx_rate; u32 max_tx_rate;
void *hwrm_cmd_req_addr; void *hwrm_cmd_req_addr;

View File

@ -99,11 +99,10 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
if (old_setting == setting) if (old_setting == setting)
return 0; return 0;
func_flags = vf->func_flags;
if (setting) if (setting)
func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
else else
func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
/*TODO: if the driver supports VLAN filter on guest VLAN, /*TODO: if the driver supports VLAN filter on guest VLAN,
* the spoof check should also include vlan anti-spoofing * the spoof check should also include vlan anti-spoofing
*/ */
@ -112,7 +111,6 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
req.flags = cpu_to_le32(func_flags); req.flags = cpu_to_le32(func_flags);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) { if (!rc) {
vf->func_flags = func_flags;
if (setting) if (setting)
vf->flags |= BNXT_VF_SPOOFCHK; vf->flags |= BNXT_VF_SPOOFCHK;
else else
@ -176,7 +174,6 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
memcpy(vf->mac_addr, mac, ETH_ALEN); memcpy(vf->mac_addr, mac, ETH_ALEN);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid); req.fid = cpu_to_le16(vf->fw_fid);
req.flags = cpu_to_le32(vf->func_flags);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
memcpy(req.dflt_mac_addr, mac, ETH_ALEN); memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@ -214,7 +211,6 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid); req.fid = cpu_to_le16(vf->fw_fid);
req.flags = cpu_to_le32(vf->func_flags);
req.dflt_vlan = cpu_to_le16(vlan_tag); req.dflt_vlan = cpu_to_le16(vlan_tag);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@ -253,7 +249,6 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
return 0; return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid); req.fid = cpu_to_le16(vf->fw_fid);
req.flags = cpu_to_le32(vf->func_flags);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
req.max_bw = cpu_to_le32(max_tx_rate); req.max_bw = cpu_to_le32(max_tx_rate);
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);

View File

@ -54,6 +54,8 @@
#define MGMT_MSG_TIMEOUT 5000 #define MGMT_MSG_TIMEOUT 5000
#define SET_FUNC_PORT_MGMT_TIMEOUT 25000
#define mgmt_to_pfhwdev(pf_mgmt) \ #define mgmt_to_pfhwdev(pf_mgmt) \
container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt) container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
@ -247,12 +249,13 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
u8 *buf_in, u16 in_size, u8 *buf_in, u16 in_size,
u8 *buf_out, u16 *out_size, u8 *buf_out, u16 *out_size,
enum mgmt_direction_type direction, enum mgmt_direction_type direction,
u16 resp_msg_id) u16 resp_msg_id, u32 timeout)
{ {
struct hinic_hwif *hwif = pf_to_mgmt->hwif; struct hinic_hwif *hwif = pf_to_mgmt->hwif;
struct pci_dev *pdev = hwif->pdev; struct pci_dev *pdev = hwif->pdev;
struct hinic_recv_msg *recv_msg; struct hinic_recv_msg *recv_msg;
struct completion *recv_done; struct completion *recv_done;
unsigned long timeo;
u16 msg_id; u16 msg_id;
int err; int err;
@ -276,8 +279,9 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
goto unlock_sync_msg; goto unlock_sync_msg;
} }
if (!wait_for_completion_timeout(recv_done, timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
msecs_to_jiffies(MGMT_MSG_TIMEOUT))) {
if (!wait_for_completion_timeout(recv_done, timeo)) {
dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id); dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
err = -ETIMEDOUT; err = -ETIMEDOUT;
goto unlock_sync_msg; goto unlock_sync_msg;
@ -351,6 +355,7 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
{ {
struct hinic_hwif *hwif = pf_to_mgmt->hwif; struct hinic_hwif *hwif = pf_to_mgmt->hwif;
struct pci_dev *pdev = hwif->pdev; struct pci_dev *pdev = hwif->pdev;
u32 timeout = 0;
if (sync != HINIC_MGMT_MSG_SYNC) { if (sync != HINIC_MGMT_MSG_SYNC) {
dev_err(&pdev->dev, "Invalid MGMT msg type\n"); dev_err(&pdev->dev, "Invalid MGMT msg type\n");
@ -362,9 +367,12 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
return -EINVAL; return -EINVAL;
} }
if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
buf_out, out_size, MGMT_DIRECT_SEND, buf_out, out_size, MGMT_DIRECT_SEND,
MSG_NOT_RESP); MSG_NOT_RESP, timeout);
} }
/** /**

View File

@ -473,7 +473,6 @@ static int hinic_close(struct net_device *netdev)
{ {
struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_dev *nic_dev = netdev_priv(netdev);
unsigned int flags; unsigned int flags;
int err;
down(&nic_dev->mgmt_lock); down(&nic_dev->mgmt_lock);
@ -487,20 +486,9 @@ static int hinic_close(struct net_device *netdev)
up(&nic_dev->mgmt_lock); up(&nic_dev->mgmt_lock);
err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
if (err) {
netif_err(nic_dev, drv, netdev,
"Failed to set func port state\n");
nic_dev->flags |= (flags & HINIC_INTF_UP);
return err;
}
err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
if (err) {
netif_err(nic_dev, drv, netdev, "Failed to set port state\n");
nic_dev->flags |= (flags & HINIC_INTF_UP);
return err;
}
free_rxqs(nic_dev); free_rxqs(nic_dev);
free_txqs(nic_dev); free_txqs(nic_dev);

View File

@ -2503,6 +2503,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
if (!err || err == -ENOSPC) { if (!err || err == -ENOSPC) {
priv->def_counter[port] = idx; priv->def_counter[port] = idx;
err = 0;
} else if (err == -ENOENT) { } else if (err == -ENOENT) {
err = 0; err = 0;
continue; continue;
@ -2553,7 +2554,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage)
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!err) if (!err)
*idx = get_param_l(&out_param); *idx = get_param_l(&out_param);
if (WARN_ON(err == -ENOSPC))
err = -EINVAL;
return err; return err;
} }
return __mlx4_counter_alloc(dev, idx); return __mlx4_counter_alloc(dev, idx);

View File

@ -831,7 +831,6 @@ static void cmd_work_handler(struct work_struct *work)
} }
cmd->ent_arr[ent->idx] = ent; cmd->ent_arr[ent->idx] = ent;
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
lay = get_inst(cmd, ent->idx); lay = get_inst(cmd, ent->idx);
ent->lay = lay; ent->lay = lay;
memset(lay, 0, sizeof(*lay)); memset(lay, 0, sizeof(*lay));
@ -853,6 +852,7 @@ static void cmd_work_handler(struct work_struct *work)
if (ent->callback) if (ent->callback)
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
/* Skip sending command to fw if internal error */ /* Skip sending command to fw if internal error */
if (pci_channel_offline(dev->pdev) || if (pci_channel_offline(dev->pdev) ||
@ -865,6 +865,10 @@ static void cmd_work_handler(struct work_struct *work)
MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
/* no doorbell, no need to keep the entry */
free_ent(cmd, ent->idx);
if (ent->callback)
free_cmd(ent);
return; return;
} }

View File

@ -1550,12 +1550,11 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int ret;
ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst, dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6,
fl6); NULL);
if (ret < 0) if (IS_ERR(dst))
return ret; return PTR_ERR(dst);
*out_ttl = ip6_dst_hoplimit(dst); *out_ttl = ip6_dst_hoplimit(dst);
@ -1754,7 +1753,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN; int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
struct ip_tunnel_key *tun_key = &e->tun_info.key; struct ip_tunnel_key *tun_key = &e->tun_info.key;
struct net_device *out_dev; struct net_device *out_dev = NULL;
struct neighbour *n = NULL; struct neighbour *n = NULL;
struct flowi6 fl6 = {}; struct flowi6 fl6 = {};
char *encap_header; char *encap_header;

View File

@ -561,7 +561,7 @@ static int moxart_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev); struct net_device *ndev = platform_get_drvdata(pdev);
unregister_netdev(ndev); unregister_netdev(ndev);
free_irq(ndev->irq, ndev); devm_free_irq(&pdev->dev, ndev->irq, ndev);
moxart_mac_free_memory(ndev); moxart_mac_free_memory(ndev);
free_netdev(ndev); free_netdev(ndev);

View File

@ -247,13 +247,15 @@ static int jazz_sonic_probe(struct platform_device *pdev)
goto out; goto out;
err = register_netdev(dev); err = register_netdev(dev);
if (err) if (err)
goto out1; goto undo_probe1;
printk("%s: MAC %pM IRQ %d\n", dev->name, dev->dev_addr, dev->irq); printk("%s: MAC %pM IRQ %d\n", dev->name, dev->dev_addr, dev->irq);
return 0; return 0;
out1: undo_probe1:
dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
lp->descriptors, lp->descriptors_laddr);
release_mem_region(dev->base_addr, SONIC_MEM_SIZE); release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
out: out:
free_netdev(dev); free_netdev(dev);

View File

@ -96,7 +96,7 @@ struct stmmac_priv {
struct net_device *dev; struct net_device *dev;
struct device *device; struct device *device;
struct mac_device_info *hw; struct mac_device_info *hw;
spinlock_t lock; struct mutex lock;
/* RX Queue */ /* RX Queue */
struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES]; struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];

View File

@ -392,13 +392,13 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev,
ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full); ADVERTISED_10baseT_Full);
spin_lock(&priv->lock); mutex_lock(&priv->lock);
if (priv->hw->mac->pcs_ctrl_ane) if (priv->hw->mac->pcs_ctrl_ane)
priv->hw->mac->pcs_ctrl_ane(priv->ioaddr, 1, priv->hw->mac->pcs_ctrl_ane(priv->ioaddr, 1,
priv->hw->ps, 0); priv->hw->ps, 0);
spin_unlock(&priv->lock); mutex_unlock(&priv->lock);
return 0; return 0;
} }
@ -615,12 +615,12 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
spin_lock_irq(&priv->lock); mutex_lock(&priv->lock);
if (device_can_wakeup(priv->device)) { if (device_can_wakeup(priv->device)) {
wol->supported = WAKE_MAGIC | WAKE_UCAST; wol->supported = WAKE_MAGIC | WAKE_UCAST;
wol->wolopts = priv->wolopts; wol->wolopts = priv->wolopts;
} }
spin_unlock_irq(&priv->lock); mutex_unlock(&priv->lock);
} }
static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@ -649,9 +649,9 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
disable_irq_wake(priv->wol_irq); disable_irq_wake(priv->wol_irq);
} }
spin_lock_irq(&priv->lock); mutex_lock(&priv->lock);
priv->wolopts = wol->wolopts; priv->wolopts = wol->wolopts;
spin_unlock_irq(&priv->lock); mutex_unlock(&priv->lock);
return 0; return 0;
} }

View File

@ -365,7 +365,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
{ {
struct net_device *ndev = priv->dev; struct net_device *ndev = priv->dev;
int interface = priv->plat->interface; int interface = priv->plat->interface;
unsigned long flags;
bool ret = false; bool ret = false;
if ((interface != PHY_INTERFACE_MODE_MII) && if ((interface != PHY_INTERFACE_MODE_MII) &&
@ -392,7 +391,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
* changed). * changed).
* In that case the driver disable own timers. * In that case the driver disable own timers.
*/ */
spin_lock_irqsave(&priv->lock, flags); mutex_lock(&priv->lock);
if (priv->eee_active) { if (priv->eee_active) {
netdev_dbg(priv->dev, "disable EEE\n"); netdev_dbg(priv->dev, "disable EEE\n");
del_timer_sync(&priv->eee_ctrl_timer); del_timer_sync(&priv->eee_ctrl_timer);
@ -400,11 +399,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
tx_lpi_timer); tx_lpi_timer);
} }
priv->eee_active = 0; priv->eee_active = 0;
spin_unlock_irqrestore(&priv->lock, flags); mutex_unlock(&priv->lock);
goto out; goto out;
} }
/* Activate the EEE and start timers */ /* Activate the EEE and start timers */
spin_lock_irqsave(&priv->lock, flags); mutex_lock(&priv->lock);
if (!priv->eee_active) { if (!priv->eee_active) {
priv->eee_active = 1; priv->eee_active = 1;
setup_timer(&priv->eee_ctrl_timer, setup_timer(&priv->eee_ctrl_timer,
@ -421,7 +420,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link); priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
ret = true; ret = true;
spin_unlock_irqrestore(&priv->lock, flags); mutex_unlock(&priv->lock);
netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
} }
@ -799,13 +798,12 @@ static void stmmac_adjust_link(struct net_device *dev)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev; struct phy_device *phydev = dev->phydev;
unsigned long flags;
bool new_state = false; bool new_state = false;
if (!phydev) if (!phydev)
return; return;
spin_lock_irqsave(&priv->lock, flags); mutex_lock(&priv->lock);
if (phydev->link) { if (phydev->link) {
u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
@ -864,7 +862,7 @@ static void stmmac_adjust_link(struct net_device *dev)
if (new_state && netif_msg_link(priv)) if (new_state && netif_msg_link(priv))
phy_print_status(phydev); phy_print_status(phydev);
spin_unlock_irqrestore(&priv->lock, flags); mutex_unlock(&priv->lock);
if (phydev->is_pseudo_fixed_link) if (phydev->is_pseudo_fixed_link)
/* Stop PHY layer to call the hook to adjust the link in case /* Stop PHY layer to call the hook to adjust the link in case
@ -4284,7 +4282,7 @@ int stmmac_dvr_probe(struct device *device,
(8 * priv->plat->rx_queues_to_use)); (8 * priv->plat->rx_queues_to_use));
} }
spin_lock_init(&priv->lock); mutex_init(&priv->lock);
/* If a specific clk_csr value is passed from the platform /* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be * this means that the CSR Clock Range selection cannot be
@ -4375,6 +4373,7 @@ int stmmac_dvr_remove(struct device *dev)
priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev); stmmac_mdio_unregister(ndev);
mutex_destroy(&priv->lock);
free_netdev(ndev); free_netdev(ndev);
return 0; return 0;
@ -4392,7 +4391,6 @@ int stmmac_suspend(struct device *dev)
{ {
struct net_device *ndev = dev_get_drvdata(dev); struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev); struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long flags;
if (!ndev || !netif_running(ndev)) if (!ndev || !netif_running(ndev))
return 0; return 0;
@ -4400,7 +4398,7 @@ int stmmac_suspend(struct device *dev)
if (ndev->phydev) if (ndev->phydev)
phy_stop(ndev->phydev); phy_stop(ndev->phydev);
spin_lock_irqsave(&priv->lock, flags); mutex_lock(&priv->lock);
netif_device_detach(ndev); netif_device_detach(ndev);
stmmac_stop_all_queues(priv); stmmac_stop_all_queues(priv);
@ -4423,7 +4421,7 @@ int stmmac_suspend(struct device *dev)
clk_disable_unprepare(priv->plat->pclk); clk_disable_unprepare(priv->plat->pclk);
clk_disable_unprepare(priv->plat->stmmac_clk); clk_disable_unprepare(priv->plat->stmmac_clk);
} }
spin_unlock_irqrestore(&priv->lock, flags); mutex_unlock(&priv->lock);
priv->oldlink = false; priv->oldlink = false;
priv->speed = SPEED_UNKNOWN; priv->speed = SPEED_UNKNOWN;
@ -4467,7 +4465,6 @@ int stmmac_resume(struct device *dev)
{ {
struct net_device *ndev = dev_get_drvdata(dev); struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev); struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long flags;
if (!netif_running(ndev)) if (!netif_running(ndev))
return 0; return 0;
@ -4479,9 +4476,9 @@ int stmmac_resume(struct device *dev)
* from another devices (e.g. serial console). * from another devices (e.g. serial console).
*/ */
if (device_may_wakeup(priv->device)) { if (device_may_wakeup(priv->device)) {
spin_lock_irqsave(&priv->lock, flags); mutex_lock(&priv->lock);
priv->hw->mac->pmt(priv->hw, 0); priv->hw->mac->pmt(priv->hw, 0);
spin_unlock_irqrestore(&priv->lock, flags); mutex_unlock(&priv->lock);
priv->irq_wake = 0; priv->irq_wake = 0;
} else { } else {
pinctrl_pm_select_default_state(priv->device); pinctrl_pm_select_default_state(priv->device);
@ -4497,7 +4494,7 @@ int stmmac_resume(struct device *dev)
netif_device_attach(ndev); netif_device_attach(ndev);
spin_lock_irqsave(&priv->lock, flags); mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv); stmmac_reset_queues_param(priv);
@ -4516,7 +4513,7 @@ int stmmac_resume(struct device *dev)
stmmac_start_all_queues(priv); stmmac_start_all_queues(priv);
spin_unlock_irqrestore(&priv->lock, flags); mutex_unlock(&priv->lock);
if (ndev->phydev) if (ndev->phydev)
phy_start(ndev->phydev); phy_start(ndev->phydev);

View File

@ -796,7 +796,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
if (dst) if (dst)
return dst; return dst;
} }
if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) { dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6,
NULL);
if (IS_ERR(dst)) {
netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr); netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr);
return ERR_PTR(-ENETUNREACH); return ERR_PTR(-ENETUNREACH);
} }
@ -1369,21 +1371,33 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
} }
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) { if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) {
#if IS_ENABLED(CONFIG_IPV6)
if (changelink) { if (changelink) {
attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_TX; attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_TX;
goto change_notsup; goto change_notsup;
} }
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]))
info->key.tun_flags &= ~TUNNEL_CSUM; info->key.tun_flags &= ~TUNNEL_CSUM;
#else
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX],
"IPv6 support not enabled in the kernel");
return -EPFNOSUPPORT;
#endif
} }
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) { if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) {
#if IS_ENABLED(CONFIG_IPV6)
if (changelink) { if (changelink) {
attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_RX; attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_RX;
goto change_notsup; goto change_notsup;
} }
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX])) if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]))
*use_udp6_rx_checksums = false; *use_udp6_rx_checksums = false;
#else
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX],
"IPv6 support not enabled in the kernel");
return -EPFNOSUPPORT;
#endif
} }
return 0; return 0;
@ -1559,11 +1573,13 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
goto nla_put_failure; goto nla_put_failure;
if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA)) if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
goto nla_put_failure; goto nla_put_failure;
#if IS_ENABLED(CONFIG_IPV6)
if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
!geneve->use_udp6_rx_checksums)) !geneve->use_udp6_rx_checksums))
goto nla_put_failure; goto nla_put_failure;
#endif
return 0; return 0;

View File

@ -1309,7 +1309,8 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
struct crypto_aead *tfm; struct crypto_aead *tfm;
int ret; int ret;
tfm = crypto_alloc_aead("gcm(aes)", 0, 0); /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) if (IS_ERR(tfm))
return tfm; return tfm;

View File

@ -1110,7 +1110,7 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus)
goto out; goto out;
} }
dp83640_clock_init(clock, bus); dp83640_clock_init(clock, bus);
list_add_tail(&phyter_clocks, &clock->list); list_add_tail(&clock->list, &phyter_clocks);
out: out:
mutex_unlock(&phyter_clocks_lock); mutex_unlock(&phyter_clocks_lock);

View File

@ -674,8 +674,8 @@ static void kszphy_get_strings(struct phy_device *phydev, u8 *data)
int i; int i;
for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) { for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) {
memcpy(data + i * ETH_GSTRING_LEN, strlcpy(data + i * ETH_GSTRING_LEN,
kszphy_hw_stats[i].string, ETH_GSTRING_LEN); kszphy_hw_stats[i].string, ETH_GSTRING_LEN);
} }
} }

View File

@ -1257,9 +1257,11 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
/* Restart autonegotiation so the new modes get sent to the /* Restart autonegotiation so the new modes get sent to the
* link partner. * link partner.
*/ */
ret = phy_restart_aneg(phydev); if (phydev->autoneg == AUTONEG_ENABLE) {
if (ret < 0) ret = phy_restart_aneg(phydev);
return ret; if (ret < 0)
return ret;
}
} }
return 0; return 0;

View File

@ -1283,6 +1283,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
{QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/

View File

@ -1962,7 +1962,6 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
bool use_cache = ip_tunnel_dst_cache_usable(skb, info); bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct dst_entry *ndst; struct dst_entry *ndst;
struct flowi6 fl6; struct flowi6 fl6;
int err;
if (!sock6) if (!sock6)
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
@ -1985,10 +1984,9 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
fl6.fl6_dport = dport; fl6.fl6_dport = dport;
fl6.fl6_sport = sport; fl6.fl6_sport = sport;
err = ipv6_stub->ipv6_dst_lookup(vxlan->net, ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk,
sock6->sock->sk, &fl6, NULL);
&ndst, &fl6); if (unlikely(IS_ERR(ndst))) {
if (unlikely(err < 0)) {
netdev_dbg(dev, "no route to %pI6\n", daddr); netdev_dbg(dev, "no route to %pI6\n", daddr);
return ERR_PTR(-ENETUNREACH); return ERR_PTR(-ENETUNREACH);
} }

View File

@ -1503,6 +1503,7 @@ static const struct gpio_chip byt_gpio_chip = {
.direction_output = byt_gpio_direction_output, .direction_output = byt_gpio_direction_output,
.get = byt_gpio_get, .get = byt_gpio_get,
.set = byt_gpio_set, .set = byt_gpio_set,
.set_config = gpiochip_generic_config,
.dbg_show = byt_gpio_dbg_show, .dbg_show = byt_gpio_dbg_show,
}; };

View File

@ -1514,11 +1514,15 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
struct chv_pinctrl *pctrl = gpiochip_get_data(gc); struct chv_pinctrl *pctrl = gpiochip_get_data(gc);
struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long pending; unsigned long pending;
unsigned long flags;
u32 intr_line; u32 intr_line;
chained_irq_enter(chip, desc); chained_irq_enter(chip, desc);
raw_spin_lock_irqsave(&chv_lock, flags);
pending = readl(pctrl->regs + CHV_INTSTAT); pending = readl(pctrl->regs + CHV_INTSTAT);
raw_spin_unlock_irqrestore(&chv_lock, flags);
for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) { for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) {
unsigned irq, offset; unsigned irq, offset;

View File

@ -695,8 +695,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
hp->flags = input_size; /* structure abuse ... */ hp->flags = input_size; /* structure abuse ... */
hp->pack_id = old_hdr.pack_id; hp->pack_id = old_hdr.pack_id;
hp->usr_ptr = NULL; hp->usr_ptr = NULL;
if (__copy_from_user(cmnd, buf, cmd_size)) if (__copy_from_user(cmnd, buf, cmd_size)) {
sg_remove_request(sfp, srp);
return -EFAULT; return -EFAULT;
}
/* /*
* SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
* but is is possible that the app intended SG_DXFER_TO_DEV, because there * but is is possible that the app intended SG_DXFER_TO_DEV, because there

View File

@ -37,6 +37,7 @@
#define USB_VENDOR_GENESYS_LOGIC 0x05e3 #define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define USB_VENDOR_SMSC 0x0424 #define USB_VENDOR_SMSC 0x0424
#define USB_PRODUCT_USB5534B 0x5534
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01 #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02 #define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
@ -5317,8 +5318,11 @@ out_hdev_lock:
} }
static const struct usb_device_id hub_id_table[] = { static const struct usb_device_id hub_id_table[] = {
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS, { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_PRODUCT
| USB_DEVICE_ID_MATCH_INT_CLASS,
.idVendor = USB_VENDOR_SMSC, .idVendor = USB_VENDOR_SMSC,
.idProduct = USB_PRODUCT_USB5534B,
.bInterfaceClass = USB_CLASS_HUB, .bInterfaceClass = USB_CLASS_HUB,
.driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND}, .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR { .match_flags = USB_DEVICE_ID_MATCH_VENDOR

View File

@ -259,6 +259,9 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
char *name; char *name;
int ret; int ret;
if (strlen(page) < len)
return -EOVERFLOW;
name = kstrdup(page, GFP_KERNEL); name = kstrdup(page, GFP_KERNEL);
if (!name) if (!name)
return -ENOMEM; return -ENOMEM;

View File

@ -303,8 +303,10 @@ static int audio_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc; struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(cdev->gadget); usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
if (!usb_desc) if (!usb_desc) {
status = -ENOMEM;
goto fail; goto fail;
}
usb_otg_descriptor_init(cdev->gadget, usb_desc); usb_otg_descriptor_init(cdev->gadget, usb_desc);
otg_desc[0] = usb_desc; otg_desc[0] = usb_desc;
otg_desc[1] = NULL; otg_desc[1] = NULL;

View File

@ -183,8 +183,10 @@ static int cdc_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc; struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget); usb_desc = usb_otg_descriptor_alloc(gadget);
if (!usb_desc) if (!usb_desc) {
status = -ENOMEM;
goto fail1; goto fail1;
}
usb_otg_descriptor_init(gadget, usb_desc); usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc; otg_desc[0] = usb_desc;
otg_desc[1] = NULL; otg_desc[1] = NULL;

View File

@ -162,8 +162,10 @@ static int gncm_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc; struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget); usb_desc = usb_otg_descriptor_alloc(gadget);
if (!usb_desc) if (!usb_desc) {
status = -ENOMEM;
goto fail; goto fail;
}
usb_otg_descriptor_init(gadget, usb_desc); usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc; otg_desc[0] = usb_desc;
otg_desc[1] = NULL; otg_desc[1] = NULL;

View File

@ -2666,6 +2666,8 @@ net2272_plat_probe(struct platform_device *pdev)
err_req: err_req:
release_mem_region(base, len); release_mem_region(base, len);
err: err:
kfree(dev);
return ret; return ret;
} }

View File

@ -334,6 +334,7 @@ static int xhci_plat_remove(struct platform_device *dev)
struct clk *clk = xhci->clk; struct clk *clk = xhci->clk;
struct usb_hcd *shared_hcd = xhci->shared_hcd; struct usb_hcd *shared_hcd = xhci->shared_hcd;
pm_runtime_get_sync(&dev->dev);
xhci->xhc_state |= XHCI_STATE_REMOVING; xhci->xhc_state |= XHCI_STATE_REMOVING;
usb_remove_hcd(shared_hcd); usb_remove_hcd(shared_hcd);
@ -347,8 +348,9 @@ static int xhci_plat_remove(struct platform_device *dev)
clk_disable_unprepare(clk); clk_disable_unprepare(clk);
usb_put_hcd(hcd); usb_put_hcd(hcd);
pm_runtime_set_suspended(&dev->dev);
pm_runtime_disable(&dev->dev); pm_runtime_disable(&dev->dev);
pm_runtime_put_noidle(&dev->dev);
pm_runtime_set_suspended(&dev->dev);
return 0; return 0;
} }

View File

@ -3403,8 +3403,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* New sg entry */ /* New sg entry */
--num_sgs; --num_sgs;
sent_len -= block_len; sent_len -= block_len;
if (num_sgs != 0) { sg = sg_next(sg);
sg = sg_next(sg); if (num_sgs != 0 && sg) {
block_len = sg_dma_len(sg); block_len = sg_dma_len(sg);
addr = (u64) sg_dma_address(sg); addr = (u64) sg_dma_address(sg);
addr += sent_len; addr += sent_len;

View File

@ -1161,8 +1161,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p,
send it directly to the tty port */ send it directly to the tty port */
if (garmin_data_p->flags & FLAGS_QUEUING) { if (garmin_data_p->flags & FLAGS_QUEUING) {
pkt_add(garmin_data_p, data, data_length); pkt_add(garmin_data_p, data, data_length);
} else if (bulk_data || } else if (bulk_data || (data_length >= sizeof(u32) &&
getLayerId(data) == GARMIN_LAYERID_APPL) { getLayerId(data) == GARMIN_LAYERID_APPL)) {
spin_lock_irqsave(&garmin_data_p->lock, flags); spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= APP_RESP_SEEN; garmin_data_p->flags |= APP_RESP_SEEN;

View File

@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */
{DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */ {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
{DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */ {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
{DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */ {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */

View File

@ -41,6 +41,13 @@
* and don't forget to CC: the USB development list <linux-usb@vger.kernel.org> * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
*/ */
/* Reported-by: Julian Groß <julian.g@posteo.de> */
UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
"LaCie",
"2Big Quadra USB3",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES),
/* /*
* Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
* commands in UAS mode. Observed with the 1.28 firmware; are there others? * commands in UAS mode. Observed with the 1.28 firmware; are there others?

View File

@ -758,6 +758,14 @@ void do_coredump(const siginfo_t *siginfo)
if (displaced) if (displaced)
put_files_struct(displaced); put_files_struct(displaced);
if (!dump_interrupted()) { if (!dump_interrupted()) {
/*
* umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
* have this set to NULL.
*/
if (!cprm.file) {
pr_info("Core dump to |%s disabled\n", cn.corename);
goto close_fail;
}
file_start_write(cprm.file); file_start_write(cprm.file);
core_dumped = binfmt->core_dump(&cprm); core_dumped = binfmt->core_dump(&cprm);
file_end_write(cprm.file); file_end_write(cprm.file);

View File

@ -1265,6 +1265,8 @@ int flush_old_exec(struct linux_binprm * bprm)
*/ */
set_mm_exe_file(bprm->mm, bprm->file); set_mm_exe_file(bprm->mm, bprm->file);
would_dump(bprm, bprm->file);
/* /*
* Release all of the old mmap stuff * Release all of the old mmap stuff
*/ */
@ -1798,8 +1800,6 @@ static int do_execveat_common(int fd, struct filename *filename,
if (retval < 0) if (retval < 0)
goto out; goto out;
would_dump(bprm, bprm->file);
retval = exec_binprm(bprm); retval = exec_binprm(bprm);
if (retval < 0) if (retval < 0)
goto out; goto out;

View File

@ -201,12 +201,17 @@ static inline const struct xattr_handler *f2fs_xattr_handler(int index)
return handler; return handler;
} }
static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int index, static struct f2fs_xattr_entry *__find_xattr(void *base_addr,
size_t len, const char *name) void *last_base_addr, int index,
size_t len, const char *name)
{ {
struct f2fs_xattr_entry *entry; struct f2fs_xattr_entry *entry;
list_for_each_xattr(entry, base_addr) { list_for_each_xattr(entry, base_addr) {
if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
(void *)XATTR_NEXT_ENTRY(entry) > last_base_addr)
return NULL;
if (entry->e_name_index != index) if (entry->e_name_index != index)
continue; continue;
if (entry->e_name_len != len) if (entry->e_name_len != len)
@ -241,65 +246,89 @@ static struct f2fs_xattr_entry *__find_inline_xattr(void *base_addr,
return entry; return entry;
} }
static int read_inline_xattr(struct inode *inode, struct page *ipage,
void *txattr_addr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int inline_size = inline_xattr_size(inode);
struct page *page = NULL;
void *inline_addr;
if (ipage) {
inline_addr = inline_xattr_addr(ipage);
} else {
page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(page))
return PTR_ERR(page);
inline_addr = inline_xattr_addr(page);
}
memcpy(txattr_addr, inline_addr, inline_size);
f2fs_put_page(page, 1);
return 0;
}
static int read_xattr_block(struct inode *inode, void *txattr_addr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
unsigned int inline_size = inline_xattr_size(inode);
struct page *xpage;
void *xattr_addr;
/* The inode already has an extended attribute block. */
xpage = get_node_page(sbi, xnid);
if (IS_ERR(xpage))
return PTR_ERR(xpage);
xattr_addr = page_address(xpage);
memcpy(txattr_addr + inline_size, xattr_addr, VALID_XATTR_BLOCK_SIZE);
f2fs_put_page(xpage, 1);
return 0;
}
static int lookup_all_xattrs(struct inode *inode, struct page *ipage, static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
unsigned int index, unsigned int len, unsigned int index, unsigned int len,
const char *name, struct f2fs_xattr_entry **xe, const char *name, struct f2fs_xattr_entry **xe,
void **base_addr) void **base_addr, int *base_size)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); void *cur_addr, *txattr_addr, *last_txattr_addr;
void *cur_addr, *txattr_addr, *last_addr = NULL; void *last_addr = NULL;
nid_t xnid = F2FS_I(inode)->i_xattr_nid; nid_t xnid = F2FS_I(inode)->i_xattr_nid;
unsigned int size = xnid ? VALID_XATTR_BLOCK_SIZE : 0;
unsigned int inline_size = inline_xattr_size(inode); unsigned int inline_size = inline_xattr_size(inode);
int err = 0; int err = 0;
if (!size && !inline_size) if (!xnid && !inline_size)
return -ENODATA; return -ENODATA;
txattr_addr = kzalloc(inline_size + size + XATTR_PADDING_SIZE, *base_size = XATTR_SIZE(xnid, inode) + XATTR_PADDING_SIZE;
GFP_F2FS_ZERO); txattr_addr = kzalloc(*base_size, GFP_F2FS_ZERO);
if (!txattr_addr) if (!txattr_addr)
return -ENOMEM; return -ENOMEM;
last_txattr_addr = (void *)txattr_addr + XATTR_SIZE(xnid, inode);
/* read from inline xattr */ /* read from inline xattr */
if (inline_size) { if (inline_size) {
struct page *page = NULL; err = read_inline_xattr(inode, ipage, txattr_addr);
void *inline_addr; if (err)
goto out;
if (ipage) {
inline_addr = inline_xattr_addr(ipage);
} else {
page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto out;
}
inline_addr = inline_xattr_addr(page);
}
memcpy(txattr_addr, inline_addr, inline_size);
f2fs_put_page(page, 1);
*xe = __find_inline_xattr(txattr_addr, &last_addr, *xe = __find_inline_xattr(txattr_addr, &last_addr,
index, len, name); index, len, name);
if (*xe) if (*xe) {
*base_size = inline_size;
goto check; goto check;
}
} }
/* read from xattr node block */ /* read from xattr node block */
if (xnid) { if (xnid) {
struct page *xpage; err = read_xattr_block(inode, txattr_addr);
void *xattr_addr; if (err)
/* The inode already has an extended attribute block. */
xpage = get_node_page(sbi, xnid);
if (IS_ERR(xpage)) {
err = PTR_ERR(xpage);
goto out; goto out;
}
xattr_addr = page_address(xpage);
memcpy(txattr_addr + inline_size, xattr_addr, size);
f2fs_put_page(xpage, 1);
} }
if (last_addr) if (last_addr)
@ -307,7 +336,11 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
else else
cur_addr = txattr_addr; cur_addr = txattr_addr;
*xe = __find_xattr(cur_addr, index, len, name); *xe = __find_xattr(cur_addr, last_txattr_addr, index, len, name);
if (!*xe) {
err = -EFAULT;
goto out;
}
check: check:
if (IS_XATTR_LAST_ENTRY(*xe)) { if (IS_XATTR_LAST_ENTRY(*xe)) {
err = -ENODATA; err = -ENODATA;
@ -324,7 +357,6 @@ out:
static int read_all_xattrs(struct inode *inode, struct page *ipage, static int read_all_xattrs(struct inode *inode, struct page *ipage,
void **base_addr) void **base_addr)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_xattr_header *header; struct f2fs_xattr_header *header;
nid_t xnid = F2FS_I(inode)->i_xattr_nid; nid_t xnid = F2FS_I(inode)->i_xattr_nid;
unsigned int size = VALID_XATTR_BLOCK_SIZE; unsigned int size = VALID_XATTR_BLOCK_SIZE;
@ -339,38 +371,16 @@ static int read_all_xattrs(struct inode *inode, struct page *ipage,
/* read from inline xattr */ /* read from inline xattr */
if (inline_size) { if (inline_size) {
struct page *page = NULL; err = read_inline_xattr(inode, ipage, txattr_addr);
void *inline_addr; if (err)
goto fail;
if (ipage) {
inline_addr = inline_xattr_addr(ipage);
} else {
page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
}
inline_addr = inline_xattr_addr(page);
}
memcpy(txattr_addr, inline_addr, inline_size);
f2fs_put_page(page, 1);
} }
/* read from xattr node block */ /* read from xattr node block */
if (xnid) { if (xnid) {
struct page *xpage; err = read_xattr_block(inode, txattr_addr);
void *xattr_addr; if (err)
/* The inode already has an extended attribute block. */
xpage = get_node_page(sbi, xnid);
if (IS_ERR(xpage)) {
err = PTR_ERR(xpage);
goto fail; goto fail;
}
xattr_addr = page_address(xpage);
memcpy(txattr_addr + inline_size, xattr_addr, size);
f2fs_put_page(xpage, 1);
} }
header = XATTR_HDR(txattr_addr); header = XATTR_HDR(txattr_addr);
@ -465,6 +475,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
int error = 0; int error = 0;
unsigned int size, len; unsigned int size, len;
void *base_addr = NULL; void *base_addr = NULL;
int base_size;
if (name == NULL) if (name == NULL)
return -EINVAL; return -EINVAL;
@ -475,7 +486,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
down_read(&F2FS_I(inode)->i_xattr_sem); down_read(&F2FS_I(inode)->i_xattr_sem);
error = lookup_all_xattrs(inode, ipage, index, len, name, error = lookup_all_xattrs(inode, ipage, index, len, name,
&entry, &base_addr); &entry, &base_addr, &base_size);
up_read(&F2FS_I(inode)->i_xattr_sem); up_read(&F2FS_I(inode)->i_xattr_sem);
if (error) if (error)
return error; return error;
@ -489,6 +500,11 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
if (buffer) { if (buffer) {
char *pval = entry->e_name + entry->e_name_len; char *pval = entry->e_name + entry->e_name_len;
if (base_size - (pval - (char *)base_addr) < size) {
error = -ERANGE;
goto out;
}
memcpy(buffer, pval, size); memcpy(buffer, pval, size);
} }
error = size; error = size;
@ -500,8 +516,9 @@ out:
ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{ {
struct inode *inode = d_inode(dentry); struct inode *inode = d_inode(dentry);
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
struct f2fs_xattr_entry *entry; struct f2fs_xattr_entry *entry;
void *base_addr; void *base_addr, *last_base_addr;
int error = 0; int error = 0;
size_t rest = buffer_size; size_t rest = buffer_size;
@ -511,6 +528,8 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
if (error) if (error)
return error; return error;
last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode);
list_for_each_xattr(entry, base_addr) { list_for_each_xattr(entry, base_addr) {
const struct xattr_handler *handler = const struct xattr_handler *handler =
f2fs_xattr_handler(entry->e_name_index); f2fs_xattr_handler(entry->e_name_index);
@ -518,6 +537,16 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
size_t prefix_len; size_t prefix_len;
size_t size; size_t size;
if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
(void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
f2fs_msg(dentry->d_sb, KERN_ERR,
"inode (%lu) has corrupted xattr",
inode->i_ino);
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
error = -EFSCORRUPTED;
goto cleanup;
}
if (!handler || (handler->list && !handler->list(dentry))) if (!handler || (handler->list && !handler->list(dentry)))
continue; continue;
@ -557,7 +586,8 @@ static int __f2fs_setxattr(struct inode *inode, int index,
struct page *ipage, int flags) struct page *ipage, int flags)
{ {
struct f2fs_xattr_entry *here, *last; struct f2fs_xattr_entry *here, *last;
void *base_addr; void *base_addr, *last_base_addr;
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
int found, newsize; int found, newsize;
size_t len; size_t len;
__u32 new_hsize; __u32 new_hsize;
@ -581,8 +611,14 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (error) if (error)
return error; return error;
last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode);
/* find entry with wanted name. */ /* find entry with wanted name. */
here = __find_xattr(base_addr, index, len, name); here = __find_xattr(base_addr, last_base_addr, index, len, name);
if (!here) {
error = -EFAULT;
goto exit;
}
found = IS_XATTR_LAST_ENTRY(here) ? 0 : 1; found = IS_XATTR_LAST_ENTRY(here) ? 0 : 1;

View File

@ -74,6 +74,8 @@ struct f2fs_xattr_entry {
entry = XATTR_NEXT_ENTRY(entry)) entry = XATTR_NEXT_ENTRY(entry))
#define VALID_XATTR_BLOCK_SIZE (PAGE_SIZE - sizeof(struct node_footer)) #define VALID_XATTR_BLOCK_SIZE (PAGE_SIZE - sizeof(struct node_footer))
#define XATTR_PADDING_SIZE (sizeof(__u32)) #define XATTR_PADDING_SIZE (sizeof(__u32))
#define XATTR_SIZE(x,i) (((x) ? VALID_XATTR_BLOCK_SIZE : 0) + \
(inline_xattr_size(i)))
#define MIN_OFFSET(i) XATTR_ALIGN(inline_xattr_size(i) + \ #define MIN_OFFSET(i) XATTR_ALIGN(inline_xattr_size(i) + \
VALID_XATTR_BLOCK_SIZE) VALID_XATTR_BLOCK_SIZE)

View File

@ -568,7 +568,7 @@ struct request_queue {
unsigned int sg_reserved_size; unsigned int sg_reserved_size;
int node; int node;
#ifdef CONFIG_BLK_DEV_IO_TRACE #ifdef CONFIG_BLK_DEV_IO_TRACE
struct blk_trace *blk_trace; struct blk_trace __rcu *blk_trace;
struct mutex blk_trace_mutex; struct mutex blk_trace_mutex;
#endif #endif
/* /*

View File

@ -51,9 +51,13 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
**/ **/
#define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \ #define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \
do { \ do { \
struct blk_trace *bt = (q)->blk_trace; \ struct blk_trace *bt; \
\
rcu_read_lock(); \
bt = rcu_dereference((q)->blk_trace); \
if (unlikely(bt)) \ if (unlikely(bt)) \
__trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\ __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
rcu_read_unlock(); \
} while (0) } while (0)
#define blk_add_trace_msg(q, fmt, ...) \ #define blk_add_trace_msg(q, fmt, ...) \
blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__) blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
@ -61,10 +65,14 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
static inline bool blk_trace_note_message_enabled(struct request_queue *q) static inline bool blk_trace_note_message_enabled(struct request_queue *q)
{ {
struct blk_trace *bt = q->blk_trace; struct blk_trace *bt;
if (likely(!bt)) bool ret;
return false;
return bt->act_mask & BLK_TC_NOTIFY; rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
ret = bt && (bt->act_mask & BLK_TC_NOTIFY);
rcu_read_unlock();
return ret;
} }
extern void blk_add_driver_data(struct request_queue *q, struct request *rq, extern void blk_add_driver_data(struct request_queue *q, struct request *rq,

View File

@ -382,4 +382,10 @@ unsigned long read_word_at_a_time(const void *addr)
(_________p1); \ (_________p1); \
}) })
/*
* This is needed in functions which generate the stack canary, see
* arch/x86/kernel/smpboot.c::start_secondary() for an example.
*/
#define prevent_tail_call_optimization() mb()
#endif /* __LINUX_COMPILER_H */ #endif /* __LINUX_COMPILER_H */

View File

@ -900,7 +900,7 @@ struct file_handle {
__u32 handle_bytes; __u32 handle_bytes;
int handle_type; int handle_type;
/* file identifier */ /* file identifier */
unsigned char f_handle[0]; unsigned char f_handle[];
}; };
static inline struct file *get_file(struct file *f) static inline struct file *get_file(struct file *f)

View File

@ -220,10 +220,8 @@ struct pnp_card {
#define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list) #define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list)
#define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list) #define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list)
#define to_pnp_card(n) container_of(n, struct pnp_card, dev) #define to_pnp_card(n) container_of(n, struct pnp_card, dev)
#define pnp_for_each_card(card) \ #define pnp_for_each_card(card) \
for((card) = global_to_pnp_card(pnp_cards.next); \ list_for_each_entry(card, &pnp_cards, global_list)
(card) != global_to_pnp_card(&pnp_cards); \
(card) = global_to_pnp_card((card)->global_list.next))
struct pnp_card_link { struct pnp_card_link {
struct pnp_card *card; struct pnp_card *card;
@ -276,14 +274,9 @@ struct pnp_dev {
#define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list) #define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list)
#define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list) #define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list)
#define to_pnp_dev(n) container_of(n, struct pnp_dev, dev) #define to_pnp_dev(n) container_of(n, struct pnp_dev, dev)
#define pnp_for_each_dev(dev) \ #define pnp_for_each_dev(dev) list_for_each_entry(dev, &pnp_global, global_list)
for((dev) = global_to_pnp_dev(pnp_global.next); \ #define card_for_each_dev(card, dev) \
(dev) != global_to_pnp_dev(&pnp_global); \ list_for_each_entry(dev, &(card)->devices, card_list)
(dev) = global_to_pnp_dev((dev)->global_list.next))
#define card_for_each_dev(card,dev) \
for((dev) = card_to_pnp_dev((card)->devices.next); \
(dev) != card_to_pnp_dev(&(card)->devices); \
(dev) = card_to_pnp_dev((dev)->card_list.next))
#define pnp_dev_name(dev) (dev)->name #define pnp_dev_name(dev) (dev)->name
static inline void *pnp_get_drvdata(struct pnp_dev *pdev) static inline void *pnp_get_drvdata(struct pnp_dev *pdev)
@ -437,14 +430,10 @@ struct pnp_protocol {
}; };
#define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list) #define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list)
#define protocol_for_each_card(protocol,card) \ #define protocol_for_each_card(protocol, card) \
for((card) = protocol_to_pnp_card((protocol)->cards.next); \ list_for_each_entry(card, &(protocol)->cards, protocol_list)
(card) != protocol_to_pnp_card(&(protocol)->cards); \ #define protocol_for_each_dev(protocol, dev) \
(card) = protocol_to_pnp_card((card)->protocol_list.next)) list_for_each_entry(dev, &(protocol)->devices, protocol_list)
#define protocol_for_each_dev(protocol,dev) \
for((dev) = protocol_to_pnp_dev((protocol)->devices.next); \
(dev) != protocol_to_pnp_dev(&(protocol)->devices); \
(dev) = protocol_to_pnp_dev((dev)->protocol_list.next))
extern struct bus_type pnp_bus_type; extern struct bus_type pnp_bus_type;

View File

@ -65,7 +65,7 @@ struct tty_buffer {
int read; int read;
int flags; int flags;
/* Data points here */ /* Data points here */
unsigned long data[0]; unsigned long data[];
}; };
/* Values for .flags field of tty_buffer */ /* Values for .flags field of tty_buffer */

View File

@ -3,6 +3,8 @@
#define _LINUX_VIRTIO_NET_H #define _LINUX_VIRTIO_NET_H
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <uapi/linux/tcp.h>
#include <uapi/linux/udp.h>
#include <uapi/linux/virtio_net.h> #include <uapi/linux/virtio_net.h>
static inline int virtio_net_hdr_set_proto(struct sk_buff *skb, static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
@ -28,17 +30,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
bool little_endian) bool little_endian)
{ {
unsigned int gso_type = 0; unsigned int gso_type = 0;
unsigned int thlen = 0;
unsigned int ip_proto;
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4: case VIRTIO_NET_HDR_GSO_TCPV4:
gso_type = SKB_GSO_TCPV4; gso_type = SKB_GSO_TCPV4;
ip_proto = IPPROTO_TCP;
thlen = sizeof(struct tcphdr);
break; break;
case VIRTIO_NET_HDR_GSO_TCPV6: case VIRTIO_NET_HDR_GSO_TCPV6:
gso_type = SKB_GSO_TCPV6; gso_type = SKB_GSO_TCPV6;
ip_proto = IPPROTO_TCP;
thlen = sizeof(struct tcphdr);
break; break;
case VIRTIO_NET_HDR_GSO_UDP: case VIRTIO_NET_HDR_GSO_UDP:
gso_type = SKB_GSO_UDP; gso_type = SKB_GSO_UDP;
ip_proto = IPPROTO_UDP;
thlen = sizeof(struct udphdr);
break; break;
default: default:
return -EINVAL; return -EINVAL;
@ -57,16 +67,20 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
if (!skb_partial_csum_set(skb, start, off)) if (!skb_partial_csum_set(skb, start, off))
return -EINVAL; return -EINVAL;
if (skb_transport_offset(skb) + thlen > skb_headlen(skb))
return -EINVAL;
} else { } else {
/* gso packets without NEEDS_CSUM do not set transport_offset. /* gso packets without NEEDS_CSUM do not set transport_offset.
* probe and drop if does not match one of the above types. * probe and drop if does not match one of the above types.
*/ */
if (gso_type && skb->network_header) { if (gso_type && skb->network_header) {
struct flow_keys keys;
if (!skb->protocol) if (!skb->protocol)
virtio_net_hdr_set_proto(skb, hdr); virtio_net_hdr_set_proto(skb, hdr);
retry: retry:
skb_probe_transport_header(skb, -1); if (!skb_flow_dissect_flow_keys(skb, &keys, 0)) {
if (!skb_transport_header_was_set(skb)) {
/* UFO does not specify ipv4 or 6: try both */ /* UFO does not specify ipv4 or 6: try both */
if (gso_type & SKB_GSO_UDP && if (gso_type & SKB_GSO_UDP &&
skb->protocol == htons(ETH_P_IP)) { skb->protocol == htons(ETH_P_IP)) {
@ -75,6 +89,12 @@ retry:
} }
return -EINVAL; return -EINVAL;
} }
if (keys.control.thoff + thlen > skb_headlen(skb) ||
keys.basic.ip_proto != ip_proto)
return -EINVAL;
skb_set_transport_header(skb, keys.control.thoff);
} }
} }

View File

@ -223,8 +223,10 @@ struct ipv6_stub {
const struct in6_addr *addr); const struct in6_addr *addr);
int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex, int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
const struct in6_addr *addr); const struct in6_addr *addr);
int (*ipv6_dst_lookup)(struct net *net, struct sock *sk, struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net,
struct dst_entry **dst, struct flowi6 *fl6); const struct sock *sk,
struct flowi6 *fl6,
const struct in6_addr *final_dst);
void (*udpv6_encap_enable)(void); void (*udpv6_encap_enable)(void);
void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr, void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr,
const struct in6_addr *solicited_addr, const struct in6_addr *solicited_addr,

View File

@ -862,7 +862,7 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
struct flowi6 *fl6); struct flowi6 *fl6);
struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst); const struct in6_addr *final_dst);
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst); const struct in6_addr *final_dst);

View File

@ -80,7 +80,7 @@ struct nf_conn {
struct hlist_node nat_bysource; struct hlist_node nat_bysource;
#endif #endif
/* all members below initialized via memset */ /* all members below initialized via memset */
u8 __nfct_init_offset[0]; struct { } __nfct_init_offset;
/* If we were expected by an expectation, this will be it */ /* If we were expected by an expectation, this will be it */
struct nf_conn *master; struct nf_conn *master;

View File

@ -76,6 +76,7 @@ struct snd_rawmidi_runtime {
size_t avail_min; /* min avail for wakeup */ size_t avail_min; /* min avail for wakeup */
size_t avail; /* max used buffer for wakeup */ size_t avail; /* max used buffer for wakeup */
size_t xruns; /* over/underruns counter */ size_t xruns; /* over/underruns counter */
int buffer_ref; /* buffer reference count */
/* misc */ /* misc */
spinlock_t lock; spinlock_t lock;
wait_queue_head_t sleep; wait_queue_head_t sleep;

View File

@ -706,6 +706,8 @@ asmlinkage __visible void __init start_kernel(void)
/* Do the rest non-__init'ed, we're now alive */ /* Do the rest non-__init'ed, we're now alive */
rest_init(); rest_init();
prevent_tail_call_optimization();
} }
/* Call all constructor functions linked into the kernel. */ /* Call all constructor functions linked into the kernel. */

View File

@ -750,21 +750,21 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
total++; total++;
} }
*new_pos = pos + 1; ipc = NULL;
if (total >= ids->in_use) if (total >= ids->in_use)
return NULL; goto out;
for (; pos < IPCMNI; pos++) { for (; pos < IPCMNI; pos++) {
ipc = idr_find(&ids->ipcs_idr, pos); ipc = idr_find(&ids->ipcs_idr, pos);
if (ipc != NULL) { if (ipc != NULL) {
rcu_read_lock(); rcu_read_lock();
ipc_lock_object(ipc); ipc_lock_object(ipc);
return ipc; break;
} }
} }
out:
/* Out of range - return NULL to terminate iteration */ *new_pos = pos + 1;
return NULL; return ipc;
} }
static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)

View File

@ -348,11 +348,12 @@ static void put_probe_ref(void)
static void blk_trace_cleanup(struct blk_trace *bt) static void blk_trace_cleanup(struct blk_trace *bt)
{ {
synchronize_rcu();
blk_trace_free(bt); blk_trace_free(bt);
put_probe_ref(); put_probe_ref();
} }
int blk_trace_remove(struct request_queue *q) static int __blk_trace_remove(struct request_queue *q)
{ {
struct blk_trace *bt; struct blk_trace *bt;
@ -365,6 +366,17 @@ int blk_trace_remove(struct request_queue *q)
return 0; return 0;
} }
int blk_trace_remove(struct request_queue *q)
{
int ret;
mutex_lock(&q->blk_trace_mutex);
ret = __blk_trace_remove(q);
mutex_unlock(&q->blk_trace_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(blk_trace_remove); EXPORT_SYMBOL_GPL(blk_trace_remove);
static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
@ -565,9 +577,8 @@ err:
return ret; return ret;
} }
int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
struct block_device *bdev, struct block_device *bdev, char __user *arg)
char __user *arg)
{ {
struct blk_user_trace_setup buts; struct blk_user_trace_setup buts;
int ret; int ret;
@ -581,11 +592,24 @@ int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
return ret; return ret;
if (copy_to_user(arg, &buts, sizeof(buts))) { if (copy_to_user(arg, &buts, sizeof(buts))) {
blk_trace_remove(q); __blk_trace_remove(q);
return -EFAULT; return -EFAULT;
} }
return 0; return 0;
} }
int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
struct block_device *bdev,
char __user *arg)
{
int ret;
mutex_lock(&q->blk_trace_mutex);
ret = __blk_trace_setup(q, name, dev, bdev, arg);
mutex_unlock(&q->blk_trace_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(blk_trace_setup); EXPORT_SYMBOL_GPL(blk_trace_setup);
#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
@ -614,7 +638,7 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
return ret; return ret;
if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) { if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
blk_trace_remove(q); __blk_trace_remove(q);
return -EFAULT; return -EFAULT;
} }
@ -622,11 +646,13 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
} }
#endif #endif
int blk_trace_startstop(struct request_queue *q, int start) static int __blk_trace_startstop(struct request_queue *q, int start)
{ {
int ret; int ret;
struct blk_trace *bt = q->blk_trace; struct blk_trace *bt;
bt = rcu_dereference_protected(q->blk_trace,
lockdep_is_held(&q->blk_trace_mutex));
if (bt == NULL) if (bt == NULL)
return -EINVAL; return -EINVAL;
@ -661,6 +687,17 @@ int blk_trace_startstop(struct request_queue *q, int start)
return ret; return ret;
} }
int blk_trace_startstop(struct request_queue *q, int start)
{
int ret;
mutex_lock(&q->blk_trace_mutex);
ret = __blk_trace_startstop(q, start);
mutex_unlock(&q->blk_trace_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(blk_trace_startstop); EXPORT_SYMBOL_GPL(blk_trace_startstop);
/* /*
@ -691,7 +728,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
switch (cmd) { switch (cmd) {
case BLKTRACESETUP: case BLKTRACESETUP:
bdevname(bdev, b); bdevname(bdev, b);
ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
break; break;
#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
case BLKTRACESETUP32: case BLKTRACESETUP32:
@ -702,10 +739,10 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
case BLKTRACESTART: case BLKTRACESTART:
start = 1; start = 1;
case BLKTRACESTOP: case BLKTRACESTOP:
ret = blk_trace_startstop(q, start); ret = __blk_trace_startstop(q, start);
break; break;
case BLKTRACETEARDOWN: case BLKTRACETEARDOWN:
ret = blk_trace_remove(q); ret = __blk_trace_remove(q);
break; break;
default: default:
ret = -ENOTTY; ret = -ENOTTY;
@ -723,18 +760,24 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
**/ **/
void blk_trace_shutdown(struct request_queue *q) void blk_trace_shutdown(struct request_queue *q)
{ {
if (q->blk_trace) { mutex_lock(&q->blk_trace_mutex);
blk_trace_startstop(q, 0); if (rcu_dereference_protected(q->blk_trace,
blk_trace_remove(q); lockdep_is_held(&q->blk_trace_mutex))) {
__blk_trace_startstop(q, 0);
__blk_trace_remove(q);
} }
mutex_unlock(&q->blk_trace_mutex);
} }
#ifdef CONFIG_BLK_CGROUP #ifdef CONFIG_BLK_CGROUP
static union kernfs_node_id * static union kernfs_node_id *
blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
{ {
struct blk_trace *bt = q->blk_trace; struct blk_trace *bt;
/* We don't use the 'bt' value here except as an optimization... */
bt = rcu_dereference_protected(q->blk_trace, 1);
if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
return NULL; return NULL;
@ -779,10 +822,14 @@ static void blk_add_trace_rq(struct request *rq, int error,
unsigned int nr_bytes, u32 what, unsigned int nr_bytes, u32 what,
union kernfs_node_id *cgid) union kernfs_node_id *cgid)
{ {
struct blk_trace *bt = rq->q->blk_trace; struct blk_trace *bt;
if (likely(!bt)) rcu_read_lock();
bt = rcu_dereference(rq->q->blk_trace);
if (likely(!bt)) {
rcu_read_unlock();
return; return;
}
if (blk_rq_is_passthrough(rq)) if (blk_rq_is_passthrough(rq))
what |= BLK_TC_ACT(BLK_TC_PC); what |= BLK_TC_ACT(BLK_TC_PC);
@ -791,6 +838,7 @@ static void blk_add_trace_rq(struct request *rq, int error,
__blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq), __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
rq->cmd_flags, what, error, 0, NULL, cgid); rq->cmd_flags, what, error, 0, NULL, cgid);
rcu_read_unlock();
} }
static void blk_add_trace_rq_insert(void *ignore, static void blk_add_trace_rq_insert(void *ignore,
@ -836,13 +884,18 @@ static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
u32 what, int error, union kernfs_node_id *cgid) u32 what, int error, union kernfs_node_id *cgid)
{ {
struct blk_trace *bt = q->blk_trace; struct blk_trace *bt;
if (likely(!bt)) rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
if (likely(!bt)) {
rcu_read_unlock();
return; return;
}
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
bio_op(bio), bio->bi_opf, what, error, 0, NULL, cgid); bio_op(bio), bio->bi_opf, what, error, 0, NULL, cgid);
rcu_read_unlock();
} }
static void blk_add_trace_bio_bounce(void *ignore, static void blk_add_trace_bio_bounce(void *ignore,
@ -893,11 +946,14 @@ static void blk_add_trace_getrq(void *ignore,
blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0, blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0,
blk_trace_bio_get_cgid(q, bio)); blk_trace_bio_get_cgid(q, bio));
else { else {
struct blk_trace *bt = q->blk_trace; struct blk_trace *bt;
rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
if (bt) if (bt)
__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0, __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
NULL, NULL); NULL, NULL);
rcu_read_unlock();
} }
} }
@ -910,27 +966,35 @@ static void blk_add_trace_sleeprq(void *ignore,
blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0, blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0,
blk_trace_bio_get_cgid(q, bio)); blk_trace_bio_get_cgid(q, bio));
else { else {
struct blk_trace *bt = q->blk_trace; struct blk_trace *bt;
rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
if (bt) if (bt)
__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ, __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
0, 0, NULL, NULL); 0, 0, NULL, NULL);
rcu_read_unlock();
} }
} }
static void blk_add_trace_plug(void *ignore, struct request_queue *q) static void blk_add_trace_plug(void *ignore, struct request_queue *q)
{ {
struct blk_trace *bt = q->blk_trace; struct blk_trace *bt;
rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
if (bt) if (bt)
__blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL); __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL);
rcu_read_unlock();
} }
static void blk_add_trace_unplug(void *ignore, struct request_queue *q, static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
unsigned int depth, bool explicit) unsigned int depth, bool explicit)
{ {
struct blk_trace *bt = q->blk_trace; struct blk_trace *bt;
rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
if (bt) { if (bt) {
__be64 rpdu = cpu_to_be64(depth); __be64 rpdu = cpu_to_be64(depth);
u32 what; u32 what;
@ -942,14 +1006,17 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
__blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL); __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL);
} }
rcu_read_unlock();
} }
static void blk_add_trace_split(void *ignore, static void blk_add_trace_split(void *ignore,
struct request_queue *q, struct bio *bio, struct request_queue *q, struct bio *bio,
unsigned int pdu) unsigned int pdu)
{ {
struct blk_trace *bt = q->blk_trace; struct blk_trace *bt;
rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
if (bt) { if (bt) {
__be64 rpdu = cpu_to_be64(pdu); __be64 rpdu = cpu_to_be64(pdu);
@ -958,6 +1025,7 @@ static void blk_add_trace_split(void *ignore,
BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu), BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
&rpdu, blk_trace_bio_get_cgid(q, bio)); &rpdu, blk_trace_bio_get_cgid(q, bio));
} }
rcu_read_unlock();
} }
/** /**
@ -977,11 +1045,15 @@ static void blk_add_trace_bio_remap(void *ignore,
struct request_queue *q, struct bio *bio, struct request_queue *q, struct bio *bio,
dev_t dev, sector_t from) dev_t dev, sector_t from)
{ {
struct blk_trace *bt = q->blk_trace; struct blk_trace *bt;
struct blk_io_trace_remap r; struct blk_io_trace_remap r;
if (likely(!bt)) rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
if (likely(!bt)) {
rcu_read_unlock();
return; return;
}
r.device_from = cpu_to_be32(dev); r.device_from = cpu_to_be32(dev);
r.device_to = cpu_to_be32(bio_dev(bio)); r.device_to = cpu_to_be32(bio_dev(bio));
@ -990,6 +1062,7 @@ static void blk_add_trace_bio_remap(void *ignore,
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status, bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
sizeof(r), &r, blk_trace_bio_get_cgid(q, bio)); sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
rcu_read_unlock();
} }
/** /**
@ -1010,11 +1083,15 @@ static void blk_add_trace_rq_remap(void *ignore,
struct request *rq, dev_t dev, struct request *rq, dev_t dev,
sector_t from) sector_t from)
{ {
struct blk_trace *bt = q->blk_trace; struct blk_trace *bt;
struct blk_io_trace_remap r; struct blk_io_trace_remap r;
if (likely(!bt)) rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
if (likely(!bt)) {
rcu_read_unlock();
return; return;
}
r.device_from = cpu_to_be32(dev); r.device_from = cpu_to_be32(dev);
r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
@ -1023,6 +1100,7 @@ static void blk_add_trace_rq_remap(void *ignore,
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
rq_data_dir(rq), 0, BLK_TA_REMAP, 0, rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
sizeof(r), &r, blk_trace_request_get_cgid(q, rq)); sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
rcu_read_unlock();
} }
/** /**
@ -1040,14 +1118,19 @@ void blk_add_driver_data(struct request_queue *q,
struct request *rq, struct request *rq,
void *data, size_t len) void *data, size_t len)
{ {
struct blk_trace *bt = q->blk_trace; struct blk_trace *bt;
if (likely(!bt)) rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
if (likely(!bt)) {
rcu_read_unlock();
return; return;
}
__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0, __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
BLK_TA_DRV_DATA, 0, len, data, BLK_TA_DRV_DATA, 0, len, data,
blk_trace_request_get_cgid(q, rq)); blk_trace_request_get_cgid(q, rq));
rcu_read_unlock();
} }
EXPORT_SYMBOL_GPL(blk_add_driver_data); EXPORT_SYMBOL_GPL(blk_add_driver_data);
@ -1574,6 +1657,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
return -EINVAL; return -EINVAL;
put_probe_ref(); put_probe_ref();
synchronize_rcu();
blk_trace_free(bt); blk_trace_free(bt);
return 0; return 0;
} }
@ -1735,6 +1819,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
struct hd_struct *p = dev_to_part(dev); struct hd_struct *p = dev_to_part(dev);
struct request_queue *q; struct request_queue *q;
struct block_device *bdev; struct block_device *bdev;
struct blk_trace *bt;
ssize_t ret = -ENXIO; ssize_t ret = -ENXIO;
bdev = bdget(part_devt(p)); bdev = bdget(part_devt(p));
@ -1747,21 +1832,23 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
mutex_lock(&q->blk_trace_mutex); mutex_lock(&q->blk_trace_mutex);
bt = rcu_dereference_protected(q->blk_trace,
lockdep_is_held(&q->blk_trace_mutex));
if (attr == &dev_attr_enable) { if (attr == &dev_attr_enable) {
ret = sprintf(buf, "%u\n", !!q->blk_trace); ret = sprintf(buf, "%u\n", !!bt);
goto out_unlock_bdev; goto out_unlock_bdev;
} }
if (q->blk_trace == NULL) if (bt == NULL)
ret = sprintf(buf, "disabled\n"); ret = sprintf(buf, "disabled\n");
else if (attr == &dev_attr_act_mask) else if (attr == &dev_attr_act_mask)
ret = blk_trace_mask2str(buf, q->blk_trace->act_mask); ret = blk_trace_mask2str(buf, bt->act_mask);
else if (attr == &dev_attr_pid) else if (attr == &dev_attr_pid)
ret = sprintf(buf, "%u\n", q->blk_trace->pid); ret = sprintf(buf, "%u\n", bt->pid);
else if (attr == &dev_attr_start_lba) else if (attr == &dev_attr_start_lba)
ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba); ret = sprintf(buf, "%llu\n", bt->start_lba);
else if (attr == &dev_attr_end_lba) else if (attr == &dev_attr_end_lba)
ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba); ret = sprintf(buf, "%llu\n", bt->end_lba);
out_unlock_bdev: out_unlock_bdev:
mutex_unlock(&q->blk_trace_mutex); mutex_unlock(&q->blk_trace_mutex);
@ -1778,6 +1865,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
struct block_device *bdev; struct block_device *bdev;
struct request_queue *q; struct request_queue *q;
struct hd_struct *p; struct hd_struct *p;
struct blk_trace *bt;
u64 value; u64 value;
ssize_t ret = -EINVAL; ssize_t ret = -EINVAL;
@ -1808,8 +1896,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
mutex_lock(&q->blk_trace_mutex); mutex_lock(&q->blk_trace_mutex);
bt = rcu_dereference_protected(q->blk_trace,
lockdep_is_held(&q->blk_trace_mutex));
if (attr == &dev_attr_enable) { if (attr == &dev_attr_enable) {
if (!!value == !!q->blk_trace) { if (!!value == !!bt) {
ret = 0; ret = 0;
goto out_unlock_bdev; goto out_unlock_bdev;
} }
@ -1821,18 +1911,21 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
} }
ret = 0; ret = 0;
if (q->blk_trace == NULL) if (bt == NULL) {
ret = blk_trace_setup_queue(q, bdev); ret = blk_trace_setup_queue(q, bdev);
bt = rcu_dereference_protected(q->blk_trace,
lockdep_is_held(&q->blk_trace_mutex));
}
if (ret == 0) { if (ret == 0) {
if (attr == &dev_attr_act_mask) if (attr == &dev_attr_act_mask)
q->blk_trace->act_mask = value; bt->act_mask = value;
else if (attr == &dev_attr_pid) else if (attr == &dev_attr_pid)
q->blk_trace->pid = value; bt->pid = value;
else if (attr == &dev_attr_start_lba) else if (attr == &dev_attr_start_lba)
q->blk_trace->start_lba = value; bt->start_lba = value;
else if (attr == &dev_attr_end_lba) else if (attr == &dev_attr_end_lba)
q->blk_trace->end_lba = value; bt->end_lba = value;
} }
out_unlock_bdev: out_unlock_bdev:

View File

@ -7666,6 +7666,19 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
*/ */
allocate_snapshot = false; allocate_snapshot = false;
#endif #endif
/*
* Because of some magic with the way alloc_percpu() works on
* x86_64, we need to synchronize the pgd of all the tables,
* otherwise the trace events that happen in x86_64 page fault
* handlers can't cope with accessing the chance that a
* alloc_percpu()'d memory might be touched in the page fault trace
* event. Oh, and we need to audit all other alloc_percpu() and vmalloc()
* calls in tracing, because something might get triggered within a
* page fault trace event!
*/
vmalloc_sync_mappings();
return 0; return 0;
} }

View File

@ -404,6 +404,11 @@ EXPORT_SYMBOL(call_usermodehelper_setup);
* Runs a user-space application. The application is started * Runs a user-space application. The application is started
* asynchronously if wait is not set, and runs as a child of system workqueues. * asynchronously if wait is not set, and runs as a child of system workqueues.
* (ie. it runs with full root capabilities and optimized affinity). * (ie. it runs with full root capabilities and optimized affinity).
*
* Note: successful return value does not guarantee the helper was called at
* all. You can't rely on sub_info->{init,cleanup} being called even for
* UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers
* into a successful no-op.
*/ */
int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
{ {

View File

@ -1405,6 +1405,7 @@ void set_zone_contiguous(struct zone *zone)
if (!__pageblock_pfn_to_page(block_start_pfn, if (!__pageblock_pfn_to_page(block_start_pfn,
block_end_pfn, zone)) block_end_pfn, zone))
return; return;
cond_resched();
} }
/* We confirm that there is no hole */ /* We confirm that there is no hole */

View File

@ -2129,7 +2129,11 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_inode_info *info = SHMEM_I(inode);
int retval = -ENOMEM; int retval = -ENOMEM;
spin_lock_irq(&info->lock); /*
* What serializes the accesses to info->flags?
* ipc_lock_object() when called from shmctl_do_lock(),
* no serialization needed when called from shm_destroy().
*/
if (lock && !(info->flags & VM_LOCKED)) { if (lock && !(info->flags & VM_LOCKED)) {
if (!user_shm_lock(inode->i_size, user)) if (!user_shm_lock(inode->i_size, user))
goto out_nomem; goto out_nomem;
@ -2144,7 +2148,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
retval = 0; retval = 0;
out_nomem: out_nomem:
spin_unlock_irq(&info->lock);
return retval; return retval;
} }

View File

@ -734,7 +734,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig); orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig);
if (!orig_node) if (!orig_node)
return; goto out;
neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming, neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming,
ethhdr->h_source); ethhdr->h_source);

View File

@ -1017,15 +1017,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
*/ */
static u8 batadv_nc_random_weight_tq(u8 tq) static u8 batadv_nc_random_weight_tq(u8 tq)
{ {
u8 rand_val, rand_tq;
get_random_bytes(&rand_val, sizeof(rand_val));
/* randomize the estimated packet loss (max TQ - estimated TQ) */ /* randomize the estimated packet loss (max TQ - estimated TQ) */
rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq); u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq);
/* normalize the randomized packet loss */
rand_tq /= BATADV_TQ_MAX_VALUE;
/* convert to (randomized) estimated tq again */ /* convert to (randomized) estimated tq again */
return BATADV_TQ_MAX_VALUE - rand_tq; return BATADV_TQ_MAX_VALUE - rand_tq;

View File

@ -1081,7 +1081,7 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
ret = batadv_parse_throughput(net_dev, buff, "throughput_override", ret = batadv_parse_throughput(net_dev, buff, "throughput_override",
&tp_override); &tp_override);
if (!ret) if (!ret)
return count; goto out;
old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override); old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
if (old_tp_override == tp_override) if (old_tp_override == tp_override)
@ -1114,6 +1114,7 @@ static ssize_t batadv_show_throughput_override(struct kobject *kobj,
tp_override = atomic_read(&hard_iface->bat_v.throughput_override); tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
batadv_hardif_put(hard_iface);
return sprintf(buff, "%u.%u MBit\n", tp_override / 10, return sprintf(buff, "%u.%u MBit\n", tp_override / 10,
tp_override % 10); tp_override % 10);
} }

View File

@ -7282,11 +7282,13 @@ static void netdev_sync_lower_features(struct net_device *upper,
netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
&feature, lower->name); &feature, lower->name);
lower->wanted_features &= ~feature; lower->wanted_features &= ~feature;
netdev_update_features(lower); __netdev_update_features(lower);
if (unlikely(lower->features & feature)) if (unlikely(lower->features & feature))
netdev_WARN(upper, "failed to disable %pNF on %s!\n", netdev_WARN(upper, "failed to disable %pNF on %s!\n",
&feature, lower->name); &feature, lower->name);
else
netdev_features_change(lower);
} }
} }
} }

View File

@ -154,6 +154,7 @@ static void sched_send_work(unsigned long _data)
static void trace_drop_common(struct sk_buff *skb, void *location) static void trace_drop_common(struct sk_buff *skb, void *location)
{ {
struct net_dm_alert_msg *msg; struct net_dm_alert_msg *msg;
struct net_dm_drop_point *point;
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
struct nlattr *nla; struct nlattr *nla;
int i; int i;
@ -172,11 +173,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
nlh = (struct nlmsghdr *)dskb->data; nlh = (struct nlmsghdr *)dskb->data;
nla = genlmsg_data(nlmsg_data(nlh)); nla = genlmsg_data(nlmsg_data(nlh));
msg = nla_data(nla); msg = nla_data(nla);
point = msg->points;
for (i = 0; i < msg->entries; i++) { for (i = 0; i < msg->entries; i++) {
if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { if (!memcmp(&location, &point->pc, sizeof(void *))) {
msg->points[i].count++; point->count++;
goto out; goto out;
} }
point++;
} }
if (msg->entries == dm_hit_limit) if (msg->entries == dm_hit_limit)
goto out; goto out;
@ -185,8 +188,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
*/ */
__nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point)); __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); memcpy(point->pc, &location, sizeof(void *));
msg->points[msg->entries].count = 1; point->count = 1;
msg->entries++; msg->entries++;
if (!timer_pending(&data->send_timer)) { if (!timer_pending(&data->send_timer)) {

View File

@ -241,6 +241,8 @@ static void net_prio_attach(struct cgroup_taskset *tset)
struct task_struct *p; struct task_struct *p;
struct cgroup_subsys_state *css; struct cgroup_subsys_state *css;
cgroup_sk_alloc_disable();
cgroup_taskset_for_each(p, css, tset) { cgroup_taskset_for_each(p, css, tset) {
void *v = (void *)(unsigned long)css->cgroup->id; void *v = (void *)(unsigned long)css->cgroup->id;

View File

@ -211,7 +211,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final); final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
rcu_read_unlock(); rcu_read_unlock();
dst = ip6_dst_lookup_flow(sk, &fl6, final_p); dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
if (IS_ERR(dst)) { if (IS_ERR(dst)) {
err = PTR_ERR(dst); err = PTR_ERR(dst);
dst = NULL; dst = NULL;
@ -282,7 +282,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6)); security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
/* sk = NULL, but it is safe for now. RST socket required. */ /* sk = NULL, but it is safe for now. RST socket required. */
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
if (!IS_ERR(dst)) { if (!IS_ERR(dst)) {
skb_dst_set(skb, dst); skb_dst_set(skb, dst);
ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0); ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0);
@ -912,7 +912,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
final_p = fl6_update_dst(&fl6, opt, &final); final_p = fl6_update_dst(&fl6, opt, &final);
dst = ip6_dst_lookup_flow(sk, &fl6, final_p); dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
if (IS_ERR(dst)) { if (IS_ERR(dst)) {
err = PTR_ERR(dst); err = PTR_ERR(dst);
goto failure; goto failure;

View File

@ -1272,7 +1272,8 @@ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def,
return ret_val; return ret_val;
} }
secattr->flags |= NETLBL_SECATTR_MLS_CAT; if (secattr->attr.mls.cat)
secattr->flags |= NETLBL_SECATTR_MLS_CAT;
} }
return 0; return 0;
@ -1453,7 +1454,8 @@ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def,
return ret_val; return ret_val;
} }
secattr->flags |= NETLBL_SECATTR_MLS_CAT; if (secattr->attr.mls.cat)
secattr->flags |= NETLBL_SECATTR_MLS_CAT;
} }
return 0; return 0;

View File

@ -921,7 +921,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
/* Check for load limit; set rate_last to the latest sent /* Check for load limit; set rate_last to the latest sent
* redirect. * redirect.
*/ */
if (peer->rate_tokens == 0 || if (peer->n_redirects == 0 ||
time_after(jiffies, time_after(jiffies,
(peer->rate_last + (peer->rate_last +
(ip_rt_redirect_load << peer->n_redirects)))) { (ip_rt_redirect_load << peer->n_redirects)))) {

Some files were not shown because too many files have changed in this diff Show More