Merge branch 'v5.4/standard/base' into v5.4/standard/tiny/base

This commit is contained in:
Bruce Ashfield 2024-05-13 10:21:05 -04:00
commit 1fa6b01026
287 changed files with 3433 additions and 1769 deletions

View File

@ -484,11 +484,14 @@ Spectre variant 2
Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at
boot, by setting the IBRS bit, and they're automatically protected against
Spectre v2 variant attacks, including cross-thread branch target injections
on SMT systems (STIBP). In other words, eIBRS enables STIBP too.
Spectre v2 variant attacks.
Legacy IBRS systems clear the IBRS bit on exit to userspace and
therefore explicitly enable STIBP for that
On Intel's enhanced IBRS systems, this includes cross-thread branch target
injections on SMT systems (STIBP). In other words, Intel eIBRS enables
STIBP, too.
AMD Automatic IBRS does not protect userspace, and Legacy IBRS systems clear
the IBRS bit on exit to userspace, therefore both explicitly enable STIBP.
The retpoline mitigation is turned on by default on vulnerable
CPUs. It can be forced on or off by the administrator
@ -622,9 +625,10 @@ kernel command line.
retpoline,generic Retpolines
retpoline,lfence LFENCE; indirect branch
retpoline,amd alias for retpoline,lfence
eibrs enhanced IBRS
eibrs,retpoline enhanced IBRS + Retpolines
eibrs,lfence enhanced IBRS + LFENCE
eibrs Enhanced/Auto IBRS
eibrs,retpoline Enhanced/Auto IBRS + Retpolines
eibrs,lfence Enhanced/Auto IBRS + LFENCE
ibrs use IBRS to protect kernel
Not specifying this option is equivalent to
spectre_v2=auto.

View File

@ -4600,9 +4600,9 @@
retpoline,generic - Retpolines
retpoline,lfence - LFENCE; indirect branch
retpoline,amd - alias for retpoline,lfence
eibrs - enhanced IBRS
eibrs,retpoline - enhanced IBRS + Retpolines
eibrs,lfence - enhanced IBRS + LFENCE
eibrs - Enhanced/Auto IBRS
eibrs,retpoline - Enhanced/Auto IBRS + Retpolines
eibrs,lfence - Enhanced/Auto IBRS + LFENCE
ibrs - use IBRS to protect kernel
Not specifying this option is equivalent to

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 273
SUBLEVEL = 275
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -205,7 +205,6 @@
};
gmac: ethernet@8000 {
#interrupt-cells = <1>;
compatible = "snps,dwmac";
reg = <0x8000 0x2000>;
interrupts = <10>;

View File

@ -19,176 +19,174 @@
device_type = "memory";
reg = <0x00000000 0x08000000>;
};
};
soc {
apb@d4000000 {
uart3: uart@d4018000 {
status = "okay";
};
twsi1: i2c@d4011000 {
status = "okay";
pmic: max8925@3c {
compatible = "maxium,max8925";
reg = <0x3c>;
interrupts = <1>;
interrupt-parent = <&intcmux4>;
interrupt-controller;
#interrupt-cells = <1>;
maxim,tsc-irq = <0>;
&uart3 {
status = "okay";
};
regulators {
SDV1 {
regulator-min-microvolt = <637500>;
regulator-max-microvolt = <1425000>;
regulator-boot-on;
regulator-always-on;
};
SDV2 {
regulator-min-microvolt = <650000>;
regulator-max-microvolt = <2225000>;
regulator-boot-on;
regulator-always-on;
};
SDV3 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO1 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO2 {
regulator-min-microvolt = <650000>;
regulator-max-microvolt = <2250000>;
regulator-boot-on;
regulator-always-on;
};
LDO3 {
regulator-min-microvolt = <650000>;
regulator-max-microvolt = <2250000>;
regulator-boot-on;
regulator-always-on;
};
LDO4 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO5 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO6 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO7 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO8 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO9 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO10 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
};
LDO11 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO12 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO13 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO14 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO15 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO16 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO17 {
regulator-min-microvolt = <650000>;
regulator-max-microvolt = <2250000>;
regulator-boot-on;
regulator-always-on;
};
LDO18 {
regulator-min-microvolt = <650000>;
regulator-max-microvolt = <2250000>;
regulator-boot-on;
regulator-always-on;
};
LDO19 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO20 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
};
backlight {
maxim,max8925-dual-string = <0>;
};
charger {
batt-detect = <0>;
topoff-threshold = <1>;
fast-charge = <7>;
no-temp-support = <0>;
no-insert-detect = <0>;
};
};
&twsi1 {
status = "okay";
pmic: max8925@3c {
compatible = "maxim,max8925";
reg = <0x3c>;
interrupts = <1>;
interrupt-parent = <&intcmux4>;
interrupt-controller;
#interrupt-cells = <1>;
maxim,tsc-irq = <0>;
regulators {
SDV1 {
regulator-min-microvolt = <637500>;
regulator-max-microvolt = <1425000>;
regulator-boot-on;
regulator-always-on;
};
rtc: rtc@d4010000 {
status = "okay";
SDV2 {
regulator-min-microvolt = <650000>;
regulator-max-microvolt = <2225000>;
regulator-boot-on;
regulator-always-on;
};
SDV3 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO1 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO2 {
regulator-min-microvolt = <650000>;
regulator-max-microvolt = <2250000>;
regulator-boot-on;
regulator-always-on;
};
LDO3 {
regulator-min-microvolt = <650000>;
regulator-max-microvolt = <2250000>;
regulator-boot-on;
regulator-always-on;
};
LDO4 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO5 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO6 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO7 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO8 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO9 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO10 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
};
LDO11 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO12 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO13 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO14 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO15 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO16 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO17 {
regulator-min-microvolt = <650000>;
regulator-max-microvolt = <2250000>;
regulator-boot-on;
regulator-always-on;
};
LDO18 {
regulator-min-microvolt = <650000>;
regulator-max-microvolt = <2250000>;
regulator-boot-on;
regulator-always-on;
};
LDO19 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
LDO20 {
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3900000>;
regulator-boot-on;
regulator-always-on;
};
};
backlight {
maxim,max8925-dual-string = <0>;
};
charger {
batt-detect = <0>;
topoff-threshold = <1>;
fast-charge = <7>;
no-temp-support = <0>;
no-insert-detect = <0>;
};
};
};
&rtc {
status = "okay";
};

View File

@ -105,15 +105,89 @@
proc-supply = <&cpus_fixed_vproc1>;
};
&eth {
phy-mode ="rgmii-rxid";
phy-handle = <&ethernet_phy0>;
mediatek,tx-delay-ps = <1530>;
snps,reset-gpio = <&pio 87 GPIO_ACTIVE_LOW>;
pinctrl-names = "default", "sleep";
pinctrl-0 = <&eth_default>;
pinctrl-1 = <&eth_sleep>;
status = "okay";
mdio {
compatible = "snps,dwmac-mdio";
#address-cells = <1>;
#size-cells = <0>;
ethernet_phy0: ethernet-phy@5 {
compatible = "ethernet-phy-id0243.0d90";
reg = <0x5>;
};
};
};
&pio {
usb0_id_pins_float: usb0_iddig {
eth_default: eth-default-pins {
tx_pins {
pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GBE_TXD3>,
<MT2712_PIN_72_GBE_TXD2__FUNC_GBE_TXD2>,
<MT2712_PIN_73_GBE_TXD1__FUNC_GBE_TXD1>,
<MT2712_PIN_74_GBE_TXD0__FUNC_GBE_TXD0>,
<MT2712_PIN_75_GBE_TXC__FUNC_GBE_TXC>,
<MT2712_PIN_76_GBE_TXEN__FUNC_GBE_TXEN>;
drive-strength = <MTK_DRIVE_8mA>;
};
rx_pins {
pinmux = <MT2712_PIN_78_GBE_RXD3__FUNC_GBE_RXD3>,
<MT2712_PIN_79_GBE_RXD2__FUNC_GBE_RXD2>,
<MT2712_PIN_80_GBE_RXD1__FUNC_GBE_RXD1>,
<MT2712_PIN_81_GBE_RXD0__FUNC_GBE_RXD0>,
<MT2712_PIN_82_GBE_RXDV__FUNC_GBE_RXDV>,
<MT2712_PIN_84_GBE_RXC__FUNC_GBE_RXC>;
input-enable;
};
mdio_pins {
pinmux = <MT2712_PIN_85_GBE_MDC__FUNC_GBE_MDC>,
<MT2712_PIN_86_GBE_MDIO__FUNC_GBE_MDIO>;
drive-strength = <MTK_DRIVE_8mA>;
input-enable;
};
};
eth_sleep: eth-sleep-pins {
tx_pins {
pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GPIO71>,
<MT2712_PIN_72_GBE_TXD2__FUNC_GPIO72>,
<MT2712_PIN_73_GBE_TXD1__FUNC_GPIO73>,
<MT2712_PIN_74_GBE_TXD0__FUNC_GPIO74>,
<MT2712_PIN_75_GBE_TXC__FUNC_GPIO75>,
<MT2712_PIN_76_GBE_TXEN__FUNC_GPIO76>;
};
rx_pins {
pinmux = <MT2712_PIN_78_GBE_RXD3__FUNC_GPIO78>,
<MT2712_PIN_79_GBE_RXD2__FUNC_GPIO79>,
<MT2712_PIN_80_GBE_RXD1__FUNC_GPIO80>,
<MT2712_PIN_81_GBE_RXD0__FUNC_GPIO81>,
<MT2712_PIN_82_GBE_RXDV__FUNC_GPIO82>,
<MT2712_PIN_84_GBE_RXC__FUNC_GPIO84>;
input-disable;
};
mdio_pins {
pinmux = <MT2712_PIN_85_GBE_MDC__FUNC_GPIO85>,
<MT2712_PIN_86_GBE_MDIO__FUNC_GPIO86>;
input-disable;
bias-disable;
};
};
usb0_id_pins_float: usb0-iddig-pins {
pins_iddig {
pinmux = <MT2712_PIN_12_IDDIG_P0__FUNC_IDDIG_A>;
bias-pull-up;
};
};
usb1_id_pins_float: usb1_iddig {
usb1_id_pins_float: usb1-iddig-pins {
pins_iddig {
pinmux = <MT2712_PIN_14_IDDIG_P1__FUNC_IDDIG_B>;
bias-pull-up;

View File

@ -249,10 +249,11 @@
#clock-cells = <1>;
};
infracfg: syscon@10001000 {
infracfg: clock-controller@10001000 {
compatible = "mediatek,mt2712-infracfg", "syscon";
reg = <0 0x10001000 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
};
pericfg: syscon@10003000 {
@ -632,6 +633,71 @@
status = "disabled";
};
stmmac_axi_setup: stmmac-axi-config {
snps,wr_osr_lmt = <0x7>;
snps,rd_osr_lmt = <0x7>;
snps,blen = <0 0 0 0 16 8 4>;
};
mtl_rx_setup: rx-queues-config {
snps,rx-queues-to-use = <1>;
snps,rx-sched-sp;
queue0 {
snps,dcb-algorithm;
snps,map-to-dma-channel = <0x0>;
snps,priority = <0x0>;
};
};
mtl_tx_setup: tx-queues-config {
snps,tx-queues-to-use = <3>;
snps,tx-sched-wrr;
queue0 {
snps,weight = <0x10>;
snps,dcb-algorithm;
snps,priority = <0x0>;
};
queue1 {
snps,weight = <0x11>;
snps,dcb-algorithm;
snps,priority = <0x1>;
};
queue2 {
snps,weight = <0x12>;
snps,dcb-algorithm;
snps,priority = <0x2>;
};
};
eth: ethernet@1101c000 {
compatible = "mediatek,mt2712-gmac";
reg = <0 0x1101c000 0 0x1300>;
interrupts = <GIC_SPI 237 IRQ_TYPE_LEVEL_LOW>;
interrupt-names = "macirq";
mac-address = [00 55 7b b5 7d f7];
clock-names = "axi",
"apb",
"mac_main",
"ptp_ref";
clocks = <&pericfg CLK_PERI_GMAC>,
<&pericfg CLK_PERI_GMAC_PCLK>,
<&topckgen CLK_TOP_ETHER_125M_SEL>,
<&topckgen CLK_TOP_ETHER_50M_SEL>;
assigned-clocks = <&topckgen CLK_TOP_ETHER_125M_SEL>,
<&topckgen CLK_TOP_ETHER_50M_SEL>;
assigned-clock-parents = <&topckgen CLK_TOP_ETHERPLL_125M>,
<&topckgen CLK_TOP_APLL1_D3>;
power-domains = <&scpsys MT2712_POWER_DOMAIN_AUDIO>;
mediatek,pericfg = <&pericfg>;
snps,axi-config = <&stmmac_axi_setup>;
snps,mtl-rx-config = <&mtl_rx_setup>;
snps,mtl-tx-config = <&mtl_tx_setup>;
snps,txpbl = <1>;
snps,rxpbl = <1>;
clk_csr = <0>;
status = "disabled";
};
mmc0: mmc@11230000 {
compatible = "mediatek,mt2712-mmc";
reg = <0 0x11230000 0 0x1000>;

View File

@ -244,7 +244,7 @@
clock-names = "hif_sel";
};
cir: cir@10009000 {
cir: ir-receiver@10009000 {
compatible = "mediatek,mt7622-cir";
reg = <0 0x10009000 0 0x1000>;
interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_LOW>;
@ -507,7 +507,6 @@
<&pericfg CLK_PERI_AUXADC_PD>;
clock-names = "therm", "auxadc";
resets = <&pericfg MT7622_PERI_THERM_SW_RST>;
reset-names = "therm";
mediatek,auxadc = <&auxadc>;
mediatek,apmixedsys = <&apmixedsys>;
nvmem-cells = <&thermal_calibration>;
@ -901,9 +900,7 @@
};
eth: ethernet@1b100000 {
compatible = "mediatek,mt7622-eth",
"mediatek,mt2701-eth",
"syscon";
compatible = "mediatek,mt7622-eth";
reg = <0 0x1b100000 0 0x20000>;
interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 224 IRQ_TYPE_LEVEL_LOW>,

View File

@ -684,11 +684,20 @@
status = "disabled";
ports {
hdmi_in: port {
#address-cells = <1>;
#size-cells = <0>;
hdmi_in: port@0 {
reg = <0>;
hdmi_in_vop: endpoint {
remote-endpoint = <&vop_out_hdmi>;
};
};
hdmi_out: port@1 {
reg = <1>;
};
};
};

View File

@ -425,16 +425,22 @@
gpio1830-supply = <&vcc_1v8>;
};
&pmu_io_domains {
status = "okay";
pmu1830-supply = <&vcc_1v8>;
};
&pwm2 {
status = "okay";
&pcie_clkreqn_cpm {
rockchip,pins =
<2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_up>;
};
&pinctrl {
pinctrl-names = "default";
pinctrl-0 = <&q7_thermal_pin>;
gpios {
q7_thermal_pin: q7-thermal-pin {
rockchip,pins =
<0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
i2c8 {
i2c8_xfer_a: i2c8-xfer {
rockchip,pins =
@ -465,6 +471,15 @@
};
};
&pmu_io_domains {
status = "okay";
pmu1830-supply = <&vcc_1v8>;
};
&pwm2 {
status = "okay";
};
&sdhci {
/*
* Signal integrity isn't great at 200MHz but 100MHz has proven stable

View File

@ -1743,6 +1743,7 @@
hdmi: hdmi@ff940000 {
compatible = "rockchip,rk3399-dw-hdmi";
reg = <0x0 0xff940000 0x0 0x20000>;
reg-io-width = <4>;
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cru PCLK_HDMI_CTRL>,
<&cru SCLK_HDMI_SFR>,
@ -1751,13 +1752,16 @@
<&cru PLL_VPLL>;
clock-names = "iahb", "isfr", "cec", "grf", "vpll";
power-domains = <&power RK3399_PD_HDCP>;
reg-io-width = <4>;
rockchip,grf = <&grf>;
#sound-dai-cells = <0>;
status = "disabled";
ports {
hdmi_in: port {
#address-cells = <1>;
#size-cells = <0>;
hdmi_in: port@0 {
reg = <0>;
#address-cells = <1>;
#size-cells = <0>;
@ -1770,6 +1774,10 @@
remote-endpoint = <&vopl_out_hdmi>;
};
};
hdmi_out: port@1 {
reg = <1>;
};
};
};

View File

@ -42,31 +42,32 @@ extern __wsum csum_partial_copy_from_user(const void __user *src,
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum;
unsigned long t0, t1, t2;
__asm__ __volatile__ (
" ldws,ma 4(%1), %0\n"
" addib,<= -4, %2, 2f\n"
"\n"
" ldws 4(%1), %%r20\n"
" ldws 8(%1), %%r21\n"
" add %0, %%r20, %0\n"
" ldws,ma 12(%1), %%r19\n"
" addc %0, %%r21, %0\n"
" addc %0, %%r19, %0\n"
"1: ldws,ma 4(%1), %%r19\n"
" addib,< 0, %2, 1b\n"
" addc %0, %%r19, %0\n"
" ldws 4(%1), %4\n"
" ldws 8(%1), %5\n"
" add %0, %4, %0\n"
" ldws,ma 12(%1), %3\n"
" addc %0, %5, %0\n"
" addc %0, %3, %0\n"
"1: ldws,ma 4(%1), %3\n"
" addib,> -1, %2, 1b\n"
" addc %0, %3, %0\n"
"\n"
" extru %0, 31, 16, %%r20\n"
" extru %0, 15, 16, %%r21\n"
" addc %%r20, %%r21, %0\n"
" extru %0, 15, 16, %%r21\n"
" add %0, %%r21, %0\n"
" extru %0, 31, 16, %4\n"
" extru %0, 15, 16, %5\n"
" addc %4, %5, %0\n"
" extru %0, 15, 16, %5\n"
" add %0, %5, %0\n"
" subi -1, %0, %0\n"
"2:\n"
: "=r" (sum), "=r" (iph), "=r" (ihl)
: "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (t0), "=r" (t1), "=r" (t2)
: "1" (iph), "2" (ihl)
: "r19", "r20", "r21", "memory");
: "memory");
return (__force __sum16)sum;
}
@ -126,6 +127,10 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
__u32 len, __u8 proto,
__wsum sum)
{
unsigned long t0, t1, t2, t3;
len += proto; /* add 16-bit proto + len */
__asm__ __volatile__ (
#if BITS_PER_LONG > 32
@ -136,20 +141,20 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
** Try to keep 4 registers with "live" values ahead of the ALU.
*/
" ldd,ma 8(%1), %%r19\n" /* get 1st saddr word */
" ldd,ma 8(%2), %%r20\n" /* get 1st daddr word */
" add %8, %3, %3\n"/* add 16-bit proto + len */
" add %%r19, %0, %0\n"
" ldd,ma 8(%1), %%r21\n" /* 2cd saddr */
" ldd,ma 8(%2), %%r22\n" /* 2cd daddr */
" add,dc %%r20, %0, %0\n"
" add,dc %%r21, %0, %0\n"
" add,dc %%r22, %0, %0\n"
" depdi 0, 31, 32, %0\n"/* clear upper half of incoming checksum */
" ldd,ma 8(%1), %4\n" /* get 1st saddr word */
" ldd,ma 8(%2), %5\n" /* get 1st daddr word */
" add %4, %0, %0\n"
" ldd,ma 8(%1), %6\n" /* 2nd saddr */
" ldd,ma 8(%2), %7\n" /* 2nd daddr */
" add,dc %5, %0, %0\n"
" add,dc %6, %0, %0\n"
" add,dc %7, %0, %0\n"
" add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */
" extrd,u %0, 31, 32, %%r19\n" /* copy upper half down */
" depdi 0, 31, 32, %0\n" /* clear upper half */
" add %%r19, %0, %0\n" /* fold into 32-bits */
" addc 0, %0, %0\n" /* add carry */
" extrd,u %0, 31, 32, %4\n"/* copy upper half down */
" depdi 0, 31, 32, %0\n"/* clear upper half */
" add,dc %4, %0, %0\n" /* fold into 32-bits, plus carry */
" addc 0, %0, %0\n" /* add final carry */
#else
@ -158,30 +163,30 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
** Insn stream is serialized on the carry bit here too.
** result from the previous operation (eg r0 + x)
*/
" ldw,ma 4(%1), %%r19\n" /* get 1st saddr word */
" ldw,ma 4(%2), %%r20\n" /* get 1st daddr word */
" add %8, %3, %3\n" /* add 16-bit proto + len */
" add %%r19, %0, %0\n"
" ldw,ma 4(%1), %%r21\n" /* 2cd saddr */
" addc %%r20, %0, %0\n"
" ldw,ma 4(%2), %%r22\n" /* 2cd daddr */
" addc %%r21, %0, %0\n"
" ldw,ma 4(%1), %%r19\n" /* 3rd saddr */
" addc %%r22, %0, %0\n"
" ldw,ma 4(%2), %%r20\n" /* 3rd daddr */
" addc %%r19, %0, %0\n"
" ldw,ma 4(%1), %%r21\n" /* 4th saddr */
" addc %%r20, %0, %0\n"
" ldw,ma 4(%2), %%r22\n" /* 4th daddr */
" addc %%r21, %0, %0\n"
" addc %%r22, %0, %0\n"
" addc %3, %0, %0\n" /* fold in proto+len, catch carry */
" ldw,ma 4(%1), %4\n" /* get 1st saddr word */
" ldw,ma 4(%2), %5\n" /* get 1st daddr word */
" add %4, %0, %0\n"
" ldw,ma 4(%1), %6\n" /* 2nd saddr */
" addc %5, %0, %0\n"
" ldw,ma 4(%2), %7\n" /* 2nd daddr */
" addc %6, %0, %0\n"
" ldw,ma 4(%1), %4\n" /* 3rd saddr */
" addc %7, %0, %0\n"
" ldw,ma 4(%2), %5\n" /* 3rd daddr */
" addc %4, %0, %0\n"
" ldw,ma 4(%1), %6\n" /* 4th saddr */
" addc %5, %0, %0\n"
" ldw,ma 4(%2), %7\n" /* 4th daddr */
" addc %6, %0, %0\n"
" addc %7, %0, %0\n"
" addc %3, %0, %0\n" /* fold in proto+len */
" addc 0, %0, %0\n" /* add carry */
#endif
: "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len)
: "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto)
: "r19", "r20", "r21", "r22", "memory");
: "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len),
"=r" (t0), "=r" (t1), "=r" (t2), "=r" (t3)
: "0" (sum), "1" (saddr), "2" (daddr), "3" (len)
: "memory");
return csum_fold(sum);
}

View File

@ -12,9 +12,16 @@
#ifndef __ASSEMBLY__
/* Performance Monitor Registers */
#define mfpmr(rn) ({unsigned int rval; \
asm volatile("mfpmr %0," __stringify(rn) \
asm volatile(".machine push; " \
".machine e300; " \
"mfpmr %0," __stringify(rn) ";" \
".machine pop; " \
: "=r" (rval)); rval;})
#define mtpmr(rn, v) asm volatile("mtpmr " __stringify(rn) ",%0" : : "r" (v))
#define mtpmr(rn, v) asm volatile(".machine push; " \
".machine e300; " \
"mtpmr " __stringify(rn) ",%0; " \
".machine pop; " \
: : "r" (v))
#endif /* __ASSEMBLY__ */
/* Freescale Book E Performance Monitor APU Registers */

View File

@ -61,6 +61,6 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
obj-$(CONFIG_ALTIVEC) += xor_vmx.o xor_vmx_glue.o
CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec)
CFLAGS_xor_vmx.o += -mhard-float -maltivec $(call cc-option,-mabi=altivec)
obj-$(CONFIG_PPC64) += $(obj64-y)

View File

@ -1542,6 +1542,7 @@ ENDPROC(cleanup_critical)
.quad .Lsie_skip - .Lsie_entry
#endif
.section .rodata, "a"
.balign 8
#define SYSCALL(esame,emu) .quad __s390x_ ## esame
.globl sys_call_table
sys_call_table:

View File

@ -274,7 +274,7 @@ static int __init setup_nmi_watchdog(char *str)
if (!strncmp(str, "panic", 5))
panic_on_timeout = 1;
return 0;
return 1;
}
__setup("nmi_watchdog=", setup_nmi_watchdog);

View File

@ -449,9 +449,8 @@ static __init int vdso_setup(char *s)
unsigned long val;
err = kstrtoul(s, 10, &val);
if (err)
return err;
vdso_enabled = val;
return 0;
if (!err)
vdso_enabled = val;
return 1;
}
__setup("vdso=", vdso_setup);

View File

@ -12,6 +12,7 @@
#include <asm/mpspec.h>
#include <asm/msr.h>
#include <asm/hardirq.h>
#include <asm/io.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1
@ -111,7 +112,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
static inline u32 native_apic_mem_read(u32 reg)
{
return *((volatile u32 *)(APIC_BASE + reg));
return readl((void __iomem *)(APIC_BASE + reg));
}
extern void native_apic_wait_icr_idle(void);

View File

@ -92,8 +92,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) || \
REQUIRED_MASK_CHECK || \
BUILD_BUG_ON_ZERO(NCAPINTS != 21))
BUILD_BUG_ON_ZERO(NCAPINTS != 22))
#define DISABLED_MASK_BIT_SET(feature_bit) \
( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
@ -117,8 +118,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) || \
DISABLED_MASK_CHECK || \
BUILD_BUG_ON_ZERO(NCAPINTS != 21))
BUILD_BUG_ON_ZERO(NCAPINTS != 22))
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \

View File

@ -13,7 +13,7 @@
/*
* Defines x86 CPU feature bits
*/
#define NCAPINTS 21 /* N 32-bit words worth of info */
#define NCAPINTS 22 /* N 32-bit words worth of info */
#define NBUGINTS 2 /* N 32-bit bug flags */
/*
@ -382,6 +382,8 @@
#define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */
/*
* BUG word(s)
*/

View File

@ -86,6 +86,7 @@
#define DISABLED_MASK18 0
#define DISABLED_MASK19 0
#define DISABLED_MASK20 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
#define DISABLED_MASK21 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
#endif /* _ASM_X86_DISABLED_FEATURES_H */

View File

@ -379,6 +379,15 @@ static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
temp_mm_state_t temp_state;
lockdep_assert_irqs_disabled();
/*
* Make sure not to be in TLB lazy mode, as otherwise we'll end up
* with a stale address space WITHOUT being in lazy mode after
* restoring the previous mm.
*/
if (this_cpu_read(cpu_tlbstate.is_lazy))
leave_mm(smp_processor_id());
temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
switch_mm_irqs_off(NULL, mm, current);

View File

@ -30,6 +30,7 @@
#define _EFER_SVME 12 /* Enable virtualization */
#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
#define _EFER_AUTOIBRS 21 /* Enable Automatic IBRS */
#define EFER_SCE (1<<_EFER_SCE)
#define EFER_LME (1<<_EFER_LME)
@ -38,6 +39,7 @@
#define EFER_SVME (1<<_EFER_SVME)
#define EFER_LMSLE (1<<_EFER_LMSLE)
#define EFER_FFXSR (1<<_EFER_FFXSR)
#define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS)
/* Intel MSRs. Some also available on other CPUs */

View File

@ -13,6 +13,8 @@
#include <asm/unwind_hints.h>
#include <asm/percpu.h>
#include <linux/frame.h>
#include <asm/unwind_hints.h>
/*
* This should be used immediately before a retpoline alternative. It tells
* objtool where the retpolines are so that it can make sense of the control
@ -51,14 +53,18 @@
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
mov $(nr/2), reg; \
771: \
ANNOTATE_INTRA_FUNCTION_CALL; \
call 772f; \
773: /* speculation trap */ \
UNWIND_HINT_EMPTY; \
pause; \
lfence; \
jmp 773b; \
772: \
ANNOTATE_INTRA_FUNCTION_CALL; \
call 774f; \
775: /* speculation trap */ \
UNWIND_HINT_EMPTY; \
pause; \
lfence; \
jmp 775b; \
@ -152,6 +158,7 @@
.endm
.macro ISSUE_UNBALANCED_RET_GUARD
ANNOTATE_INTRA_FUNCTION_CALL;
call .Lunbalanced_ret_guard_\@
int3
.Lunbalanced_ret_guard_\@:

View File

@ -103,6 +103,7 @@
#define REQUIRED_MASK18 0
#define REQUIRED_MASK19 0
#define REQUIRED_MASK20 0
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
#define REQUIRED_MASK21 0
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */

View File

@ -101,7 +101,7 @@
".popsection\n\t"
#define UNWIND_HINT_SAVE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_SAVE, 0)
#define UNWIND_HINT_EMPTY
#define UNWIND_HINT_RESTORE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_RESTORE, 0)
#endif /* __ASSEMBLY__ */

View File

@ -998,11 +998,11 @@ static bool cpu_has_zenbleed_microcode(void)
u32 good_rev = 0;
switch (boot_cpu_data.x86_model) {
case 0x30 ... 0x3f: good_rev = 0x0830107a; break;
case 0x60 ... 0x67: good_rev = 0x0860010b; break;
case 0x68 ... 0x6f: good_rev = 0x08608105; break;
case 0x70 ... 0x7f: good_rev = 0x08701032; break;
case 0xa0 ... 0xaf: good_rev = 0x08a00008; break;
case 0x30 ... 0x3f: good_rev = 0x0830107b; break;
case 0x60 ... 0x67: good_rev = 0x0860010c; break;
case 0x68 ... 0x6f: good_rev = 0x08608107; break;
case 0x70 ... 0x7f: good_rev = 0x08701033; break;
case 0xa0 ... 0xaf: good_rev = 0x08a00009; break;
default:
return false;

View File

@ -1153,19 +1153,21 @@ spectre_v2_user_select_mitigation(void)
}
/*
* If no STIBP, enhanced IBRS is enabled, or SMT impossible, STIBP
* If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
* is not required.
*
* Enhanced IBRS also protects against cross-thread branch target
* Intel's Enhanced IBRS also protects against cross-thread branch target
* injection in user-mode as the IBRS bit remains always set which
* implicitly enables cross-thread protections. However, in legacy IBRS
* mode, the IBRS bit is set only on kernel entry and cleared on return
* to userspace. This disables the implicit cross-thread protection,
* so allow for STIBP to be selected in that case.
* to userspace. AMD Automatic IBRS also does not protect userspace.
* These modes therefore disable the implicit cross-thread protection,
* so allow for STIBP to be selected in those cases.
*/
if (!boot_cpu_has(X86_FEATURE_STIBP) ||
!smt_possible ||
spectre_v2_in_eibrs_mode(spectre_v2_enabled))
(spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
!boot_cpu_has(X86_FEATURE_AUTOIBRS)))
return;
/*
@ -1187,9 +1189,9 @@ static const char * const spectre_v2_strings[] = {
[SPECTRE_V2_NONE] = "Vulnerable",
[SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
[SPECTRE_V2_LFENCE] = "Mitigation: LFENCE",
[SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
[SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
[SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
[SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS",
[SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
[SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
[SPECTRE_V2_IBRS] = "Mitigation: IBRS",
};
@ -1258,7 +1260,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
!boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
mitigation_options[i].option);
return SPECTRE_V2_CMD_AUTO;
}
@ -1436,8 +1438,12 @@ static void __init spectre_v2_select_mitigation(void)
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
if (spectre_v2_in_ibrs_mode(mode)) {
x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
update_spec_ctrl(x86_spec_ctrl_base);
if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
} else {
x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
update_spec_ctrl(x86_spec_ctrl_base);
}
}
switch (mode) {
@ -1521,8 +1527,8 @@ static void __init spectre_v2_select_mitigation(void)
/*
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
* and Enhanced IBRS protect firmware too, so enable IBRS around
* firmware calls only when IBRS / Enhanced IBRS aren't otherwise
* enabled.
* firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
* otherwise enabled.
*
* Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
* the user might select retpoline on the kernel command line and if
@ -2144,69 +2150,69 @@ static const char * const l1tf_vmx_states[] = {
static ssize_t l1tf_show_state(char *buf)
{
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
(l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
sched_smt_active())) {
return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
l1tf_vmx_states[l1tf_vmx_mitigation]);
return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
l1tf_vmx_states[l1tf_vmx_mitigation]);
}
return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
l1tf_vmx_states[l1tf_vmx_mitigation],
sched_smt_active() ? "vulnerable" : "disabled");
return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
l1tf_vmx_states[l1tf_vmx_mitigation],
sched_smt_active() ? "vulnerable" : "disabled");
}
static ssize_t itlb_multihit_show_state(char *buf)
{
if (itlb_multihit_kvm_mitigation)
return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n");
else
return sprintf(buf, "KVM: Vulnerable\n");
return sysfs_emit(buf, "KVM: Vulnerable\n");
}
#else
static ssize_t l1tf_show_state(char *buf)
{
return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
}
static ssize_t itlb_multihit_show_state(char *buf)
{
return sprintf(buf, "Processor vulnerable\n");
return sysfs_emit(buf, "Processor vulnerable\n");
}
#endif
static ssize_t mds_show_state(char *buf)
{
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
return sprintf(buf, "%s; SMT Host state unknown\n",
mds_strings[mds_mitigation]);
return sysfs_emit(buf, "%s; SMT Host state unknown\n",
mds_strings[mds_mitigation]);
}
if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
(mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
sched_smt_active() ? "mitigated" : "disabled"));
return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
(mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
sched_smt_active() ? "mitigated" : "disabled"));
}
return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
sched_smt_active() ? "vulnerable" : "disabled");
return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
sched_smt_active() ? "vulnerable" : "disabled");
}
static ssize_t tsx_async_abort_show_state(char *buf)
{
if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
(taa_mitigation == TAA_MITIGATION_OFF))
return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]);
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
return sprintf(buf, "%s; SMT Host state unknown\n",
taa_strings[taa_mitigation]);
return sysfs_emit(buf, "%s; SMT Host state unknown\n",
taa_strings[taa_mitigation]);
}
return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
sched_smt_active() ? "vulnerable" : "disabled");
return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
sched_smt_active() ? "vulnerable" : "disabled");
}
static ssize_t mmio_stale_data_show_state(char *buf)
@ -2228,7 +2234,8 @@ static ssize_t mmio_stale_data_show_state(char *buf)
static char *stibp_state(void)
{
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
!boot_cpu_has(X86_FEATURE_AUTOIBRS))
return "";
switch (spectre_v2_user_stibp) {
@ -2274,33 +2281,33 @@ static char *pbrsb_eibrs_state(void)
static ssize_t spectre_v2_show_state(char *buf)
{
if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
return sprintf(buf, "Vulnerable: LFENCE\n");
return sysfs_emit(buf, "Vulnerable: LFENCE\n");
if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
if (sched_smt_active() && unprivileged_ebpf_enabled() &&
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
return sprintf(buf, "%s%s%s%s%s%s%s\n",
spectre_v2_strings[spectre_v2_enabled],
ibpb_state(),
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
stibp_state(),
boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
pbrsb_eibrs_state(),
spectre_v2_module_string());
return sysfs_emit(buf, "%s%s%s%s%s%s%s\n",
spectre_v2_strings[spectre_v2_enabled],
ibpb_state(),
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
stibp_state(),
boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
pbrsb_eibrs_state(),
spectre_v2_module_string());
}
static ssize_t srbds_show_state(char *buf)
{
return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]);
}
static ssize_t retbleed_show_state(char *buf)
{
return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
}
static ssize_t gds_show_state(char *buf)
@ -2312,26 +2319,26 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
char *buf, unsigned int bug)
{
if (!boot_cpu_has_bug(bug))
return sprintf(buf, "Not affected\n");
return sysfs_emit(buf, "Not affected\n");
switch (bug) {
case X86_BUG_CPU_MELTDOWN:
if (boot_cpu_has(X86_FEATURE_PTI))
return sprintf(buf, "Mitigation: PTI\n");
return sysfs_emit(buf, "Mitigation: PTI\n");
if (hypervisor_is_type(X86_HYPER_XEN_PV))
return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
break;
case X86_BUG_SPECTRE_V1:
return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
case X86_BUG_SPECTRE_V2:
return spectre_v2_show_state(buf);
case X86_BUG_SPEC_STORE_BYPASS:
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]);
case X86_BUG_L1TF:
if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
@ -2364,7 +2371,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
break;
}
return sprintf(buf, "Vulnerable\n");
return sysfs_emit(buf, "Vulnerable\n");
}
ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)

View File

@ -1100,8 +1100,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
/* Zhaoxin Family 7 */
VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_MMIO),
@ -1220,8 +1220,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
if (ia32_cap & ARCH_CAP_IBRS_ALL)
/*
* AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
* flag and protect from vendor-specific bugs via the whitelist.
*/
if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
!(ia32_cap & ARCH_CAP_PBRSB_NO))
setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
}
if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
!(ia32_cap & ARCH_CAP_MDS_NO)) {
@ -1283,11 +1291,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_RETBLEED);
}
if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
!(ia32_cap & ARCH_CAP_PBRSB_NO))
setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
/*
* Check if CPU is vulnerable to GDS. If running in a virtual machine on
* an affected processor, the VMM may have disabled the use of GATHER by

View File

@ -44,7 +44,10 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_F16C, X86_FEATURE_XMM2, },
{ X86_FEATURE_AES, X86_FEATURE_XMM2 },
{ X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 },
{ X86_FEATURE_GFNI, X86_FEATURE_XMM2 },
{ X86_FEATURE_FMA, X86_FEATURE_AVX },
{ X86_FEATURE_VAES, X86_FEATURE_AVX },
{ X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX },
{ X86_FEATURE_AVX2, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512F, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F },
@ -56,9 +59,6 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL },
{ X86_FEATURE_GFNI, X86_FEATURE_AVX512VL },
{ X86_FEATURE_VAES, X86_FEATURE_AVX512VL },
{ X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },

View File

@ -2228,12 +2228,14 @@ static ssize_t set_bank(struct device *s, struct device_attribute *attr,
return -EINVAL;
b = &per_cpu(mce_banks_array, s->id)[bank];
if (!b->init)
return -ENODEV;
b->ctl = new;
mutex_lock(&mce_sysfs_mutex);
mce_restart();
mutex_unlock(&mce_sysfs_mutex);
return size;
}

View File

@ -26,31 +26,18 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
for (; addr < end; addr = next) {
pud_t *pud = pud_page + pud_index(addr);
pmd_t *pmd;
bool use_gbpage;
next = (addr & PUD_MASK) + PUD_SIZE;
if (next > end)
next = end;
/* if this is already a gbpage, this portion is already mapped */
if (pud_large(*pud))
continue;
/* Is using a gbpage allowed? */
use_gbpage = info->direct_gbpages;
/* Don't use gbpage if it maps more than the requested region. */
/* at the begining: */
use_gbpage &= ((addr & ~PUD_MASK) == 0);
/* ... or at the end: */
use_gbpage &= ((next & ~PUD_MASK) == 0);
/* Never overwrite existing mappings */
use_gbpage &= !pud_present(*pud);
if (use_gbpage) {
if (info->direct_gbpages) {
pud_t pudval;
if (pud_present(*pud))
continue;
addr &= PUD_MASK;
pudval = __pud((addr - info->offset) | info->page_flag);
set_pud(pud, pudval);
continue;

View File

@ -34,6 +34,7 @@
#include "pat_internal.h"
#include "mm_internal.h"
#include "../../mm/internal.h" /* is_cow_mapping() */
#undef pr_fmt
#define pr_fmt(fmt) "" fmt
@ -955,6 +956,38 @@ static void free_pfn_range(u64 paddr, unsigned long size)
free_memtype(paddr, paddr + size);
}
static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
pgprot_t *pgprot)
{
unsigned long prot;
VM_WARN_ON_ONCE(!(vma->vm_flags & VM_PAT));
/*
* We need the starting PFN and cachemode used for track_pfn_remap()
* that covered the whole VMA. For most mappings, we can obtain that
* information from the page tables. For COW mappings, we might now
* suddenly have anon folios mapped and follow_phys() will fail.
*
* Fallback to using vma->vm_pgoff, see remap_pfn_range_notrack(), to
* detect the PFN. If we need the cachemode as well, we're out of luck
* for now and have to fail fork().
*/
if (!follow_phys(vma, vma->vm_start, 0, &prot, paddr)) {
if (pgprot)
*pgprot = __pgprot(prot);
return 0;
}
if (is_cow_mapping(vma->vm_flags)) {
if (pgprot)
return -EINVAL;
*paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
return 0;
}
WARN_ON_ONCE(1);
return -EINVAL;
}
/*
* track_pfn_copy is called when vma that is covering the pfnmap gets
* copied through copy_page_range().
@ -965,20 +998,13 @@ static void free_pfn_range(u64 paddr, unsigned long size)
int track_pfn_copy(struct vm_area_struct *vma)
{
resource_size_t paddr;
unsigned long prot;
unsigned long vma_size = vma->vm_end - vma->vm_start;
pgprot_t pgprot;
if (vma->vm_flags & VM_PAT) {
/*
* reserve the whole chunk covered by vma. We need the
* starting address and protection from pte.
*/
if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
WARN_ON_ONCE(1);
if (get_pat_info(vma, &paddr, &pgprot))
return -EINVAL;
}
pgprot = __pgprot(prot);
/* reserve the whole chunk covered by vma. */
return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
}
@ -1053,7 +1079,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
unsigned long size)
{
resource_size_t paddr;
unsigned long prot;
if (vma && !(vma->vm_flags & VM_PAT))
return;
@ -1061,11 +1086,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
/* free the chunk starting from pfn or the whole chunk */
paddr = (resource_size_t)pfn << PAGE_SHIFT;
if (!paddr && !size) {
if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
WARN_ON_ONCE(1);
if (get_pat_info(vma, &paddr, NULL))
return;
}
size = vma->vm_end - vma->vm_start;
}
free_pfn_range(paddr, size);

View File

@ -28,7 +28,7 @@ void blk_rq_stat_init(struct blk_rq_stat *stat)
/* src is a per-cpu stat, mean isn't initialized */
void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
{
if (!src->nr_samples)
if (dst->nr_samples + src->nr_samples <= dst->nr_samples)
return;
dst->min = min(dst->min, src->min);

View File

@ -217,7 +217,6 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
}
if (!strcmp(q->cra_driver_name, alg->cra_name) ||
!strcmp(q->cra_driver_name, alg->cra_driver_name) ||
!strcmp(q->cra_name, alg->cra_driver_name))
goto err;
}

View File

@ -382,18 +382,6 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
},
},
/*
* ASUS B1400CEAE hangs on resume from suspend (see
* https://bugzilla.kernel.org/show_bug.cgi?id=215742).
*/
{
.callback = init_default_s3,
.ident = "ASUS B1400CEAE",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
},
},
{},
};

View File

@ -2047,8 +2047,10 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
if (offset > buffer->data_size || read_size < sizeof(*hdr))
if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
!IS_ALIGNED(offset, sizeof(u32)))
return 0;
if (u) {
if (copy_from_user(object, u + offset, read_size))
return 0;

View File

@ -626,11 +626,6 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
{
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) {
dev_info(&pdev->dev, "ASM1166 has only six ports\n");
hpriv->saved_port_map = 0x3f;
}
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
dev_info(&pdev->dev, "JMB361 has only one port\n");
hpriv->force_port_map = 1;

View File

@ -783,37 +783,6 @@ static const struct ata_port_info mv_port_info[] = {
},
};
static const struct pci_device_id mv_pci_tbl[] = {
{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
/* RocketRAID 1720/174x have different identifiers */
{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
/* Adaptec 1430SA */
{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
/* Marvell 7042 support */
{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
/* Highpoint RocketRAID PCIe series */
{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
{ } /* terminate list */
};
static const struct mv_hw_ops mv5xxx_ops = {
.phy_errata = mv5_phy_errata,
.enable_leds = mv5_enable_leds,
@ -4307,6 +4276,36 @@ static int mv_pci_init_one(struct pci_dev *pdev,
static int mv_pci_device_resume(struct pci_dev *pdev);
#endif
static const struct pci_device_id mv_pci_tbl[] = {
{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
/* RocketRAID 1720/174x have different identifiers */
{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
/* Adaptec 1430SA */
{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
/* Marvell 7042 support */
{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
/* Highpoint RocketRAID PCIe series */
{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
{ } /* terminate list */
};
static struct pci_driver mv_pci_driver = {
.name = DRV_NAME,
@ -4319,6 +4318,7 @@ static struct pci_driver mv_pci_driver = {
#endif
};
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
/**
* mv_print_info - Dump key info to kernel log for perusal.
@ -4491,7 +4491,6 @@ static void __exit mv_exit(void)
MODULE_AUTHOR("Brett Russ");
MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);

View File

@ -1004,8 +1004,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
offset -= (idx * window_size);
idx++;
dist = ((long) (window_size - (offset + size))) >= 0 ? size :
(long) (window_size - offset);
dist = min(size, window_size - offset);
memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
psource += dist;
@ -1053,8 +1052,7 @@ static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
readl(mmio + PDC_DIMM_WINDOW_CTLR);
offset -= (idx * window_size);
idx++;
dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
(long) (window_size - offset);
dist = min(size, window_size - offset);
memcpy_toio(dimm_mmio + offset / 4, psource, dist);
writel(0x01, mmio + PDC_GENERAL_CTLR);
readl(mmio + PDC_GENERAL_CTLR);

View File

@ -365,8 +365,10 @@ void dev_pm_enable_wake_irq_complete(struct device *dev)
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
wirq->status & WAKE_IRQ_DEDICATED_REVERSE) {
enable_irq(wirq->irq);
wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
}
}
/**

View File

@ -226,24 +226,30 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
blk_mq_unfreeze_queue(lo->lo_queue);
}
static int
/**
* loop_set_size() - sets device size and notifies userspace
* @lo: struct loop_device to set the size for
* @size: new size of the loop device
*
* Callers must validate that the size passed into this function fits into
* a sector_t, eg using loop_validate_size()
*/
static void loop_set_size(struct loop_device *lo, loff_t size)
{
struct block_device *bdev = lo->lo_device;
set_capacity(lo->lo_disk, size);
bd_set_size(bdev, size << SECTOR_SHIFT);
/* let user-space know about the new size */
kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
}
static void
figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
{
loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
sector_t x = (sector_t)size;
struct block_device *bdev = lo->lo_device;
if (unlikely((loff_t)x != size))
return -EFBIG;
if (lo->lo_offset != offset)
lo->lo_offset = offset;
if (lo->lo_sizelimit != sizelimit)
lo->lo_sizelimit = sizelimit;
set_capacity(lo->lo_disk, x);
bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
/* let user-space know about the new size */
kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
return 0;
loop_set_size(lo, size);
}
static inline int
@ -1020,10 +1026,8 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
!file->f_op->write_iter)
lo_flags |= LO_FLAGS_READ_ONLY;
error = -EFBIG;
size = get_loop_size(lo, file);
if ((loff_t)(sector_t)size != size)
goto out_unlock;
error = loop_prepare_queue(lo);
if (error)
goto out_unlock;
@ -1057,11 +1061,8 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
loop_update_rotational(lo);
loop_update_dio(lo);
set_capacity(lo->lo_disk, size);
bd_set_size(bdev, size << 9);
loop_sysfs_init(lo);
/* let user-space know about the new size */
kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
loop_set_size(lo, size);
set_blocksize(bdev, S_ISBLK(inode->i_mode) ?
block_size(inode->i_bdev) : PAGE_SIZE);
@ -1275,82 +1276,50 @@ static int loop_clr_fd(struct loop_device *lo)
return __loop_clr_fd(lo, false);
}
/**
* loop_set_status_from_info - configure device from loop_info
* @lo: struct loop_device to configure
* @info: struct loop_info64 to configure the device with
*
* Configures the loop device parameters according to the passed
* in loop_info64 configuration.
*/
static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
loop_set_status_from_info(struct loop_device *lo,
const struct loop_info64 *info)
{
int err;
struct loop_func_table *xfer;
kuid_t uid = current_uid();
struct block_device *bdev;
bool partscan = false;
err = mutex_lock_killable(&loop_ctl_mutex);
if (err)
return err;
if (lo->lo_encrypt_key_size &&
!uid_eq(lo->lo_key_owner, uid) &&
!capable(CAP_SYS_ADMIN)) {
err = -EPERM;
goto out_unlock;
}
if (lo->lo_state != Lo_bound) {
err = -ENXIO;
goto out_unlock;
}
if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) {
err = -EINVAL;
goto out_unlock;
}
if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) {
sync_blockdev(lo->lo_device);
invalidate_bdev(lo->lo_device);
}
/* I/O need to be drained during transfer transition */
blk_mq_freeze_queue(lo->lo_queue);
if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
return -EINVAL;
err = loop_release_xfer(lo);
if (err)
goto out_unfreeze;
return err;
if (info->lo_encrypt_type) {
unsigned int type = info->lo_encrypt_type;
if (type >= MAX_LO_CRYPT) {
err = -EINVAL;
goto out_unfreeze;
}
if (type >= MAX_LO_CRYPT)
return -EINVAL;
xfer = xfer_funcs[type];
if (xfer == NULL) {
err = -EINVAL;
goto out_unfreeze;
}
if (xfer == NULL)
return -EINVAL;
} else
xfer = NULL;
err = loop_init_xfer(lo, xfer, info);
if (err)
goto out_unfreeze;
return err;
if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) {
/* kill_bdev should have truncated all the pages */
if (lo->lo_device->bd_inode->i_mapping->nrpages) {
err = -EAGAIN;
pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
__func__, lo->lo_number, lo->lo_file_name,
lo->lo_device->bd_inode->i_mapping->nrpages);
goto out_unfreeze;
}
if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
err = -EFBIG;
goto out_unfreeze;
}
}
/* Avoid assigning overflow values */
if (info->lo_offset > LLONG_MAX || info->lo_sizelimit > LLONG_MAX)
return -EOVERFLOW;
loop_config_discard(lo);
lo->lo_offset = info->lo_offset;
lo->lo_sizelimit = info->lo_sizelimit;
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
@ -1375,6 +1344,63 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
lo->lo_key_owner = uid;
}
return 0;
}
static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
{
int err;
struct block_device *bdev;
kuid_t uid = current_uid();
bool partscan = false;
bool size_changed = false;
err = mutex_lock_killable(&loop_ctl_mutex);
if (err)
return err;
if (lo->lo_encrypt_key_size &&
!uid_eq(lo->lo_key_owner, uid) &&
!capable(CAP_SYS_ADMIN)) {
err = -EPERM;
goto out_unlock;
}
if (lo->lo_state != Lo_bound) {
err = -ENXIO;
goto out_unlock;
}
if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) {
size_changed = true;
sync_blockdev(lo->lo_device);
invalidate_bdev(lo->lo_device);
}
/* I/O need to be drained during transfer transition */
blk_mq_freeze_queue(lo->lo_queue);
if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) {
/* If any pages were dirtied after invalidate_bdev(), try again */
err = -EAGAIN;
pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
__func__, lo->lo_number, lo->lo_file_name,
lo->lo_device->bd_inode->i_mapping->nrpages);
goto out_unfreeze;
}
err = loop_set_status_from_info(lo, info);
if (err)
goto out_unfreeze;
if (size_changed) {
loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
lo->lo_backing_file);
loop_set_size(lo, new_size);
}
loop_config_discard(lo);
/* update dio if lo_offset or transfer is changed */
__loop_update_dio(lo, lo->use_dio);
@ -1415,11 +1441,6 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
info->lo_number = lo->lo_number;
info->lo_offset = lo->lo_offset;
info->lo_sizelimit = lo->lo_sizelimit;
/* loff_t vars have been assigned __u64 */
if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
return -EOVERFLOW;
info->lo_flags = lo->lo_flags;
memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
@ -1555,7 +1576,9 @@ static int loop_set_capacity(struct loop_device *lo)
if (unlikely(lo->lo_state != Lo_bound))
return -ENXIO;
return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
return 0;
}
static int loop_set_dio(struct loop_device *lo, unsigned long arg)

View File

@ -346,7 +346,7 @@ int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver)
return PTR_ERR(skb);
}
if (skb->len != sizeof(*ver)) {
if (!skb || skb->len != sizeof(*ver)) {
bt_dev_err(hdev, "Intel version event size mismatch");
kfree_skb(skb);
return -EILSEQ;

View File

@ -360,6 +360,8 @@ static const struct usb_device_id blacklist_table[] = {
/* Realtek 8852BE Bluetooth devices */
{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x4853), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |

View File

@ -37,7 +37,11 @@ static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
static struct hlist_head *all_lists[] = {
/* List of registered clks that use runtime PM */
static HLIST_HEAD(clk_rpm_list);
static DEFINE_MUTEX(clk_rpm_list_lock);
static const struct hlist_head *all_lists[] = {
&clk_root_list,
&clk_orphan_list,
NULL,
@ -59,6 +63,7 @@ struct clk_core {
struct clk_hw *hw;
struct module *owner;
struct device *dev;
struct hlist_node rpm_node;
struct device_node *of_node;
struct clk_core *parent;
struct clk_parent_map *parents;
@ -129,6 +134,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
pm_runtime_put_sync(core->dev);
}
/**
* clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
*
* Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
* that disabling unused clks avoids a deadlock where a device is runtime PM
* resuming/suspending and the runtime PM callback is trying to grab the
* prepare_lock for something like clk_prepare_enable() while
* clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
* PM resume/suspend the device as well.
*
* Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
* success. Otherwise the lock is released on failure.
*
* Return: 0 on success, negative errno otherwise.
*/
static int clk_pm_runtime_get_all(void)
{
int ret;
struct clk_core *core, *failed;
/*
* Grab the list lock to prevent any new clks from being registered
* or unregistered until clk_pm_runtime_put_all().
*/
mutex_lock(&clk_rpm_list_lock);
/*
* Runtime PM "get" all the devices that are needed for the clks
* currently registered. Do this without holding the prepare_lock, to
* avoid the deadlock.
*/
hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
ret = clk_pm_runtime_get(core);
if (ret) {
failed = core;
pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
dev_name(failed->dev), failed->name);
goto err;
}
}
return 0;
err:
hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
if (core == failed)
break;
clk_pm_runtime_put(core);
}
mutex_unlock(&clk_rpm_list_lock);
return ret;
}
/**
* clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
*
* Put the runtime PM references taken in clk_pm_runtime_get_all() and release
* the 'clk_rpm_list_lock'.
*/
static void clk_pm_runtime_put_all(void)
{
struct clk_core *core;
hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
clk_pm_runtime_put(core);
mutex_unlock(&clk_rpm_list_lock);
}
static void clk_pm_runtime_init(struct clk_core *core)
{
struct device *dev = core->dev;
if (dev && pm_runtime_enabled(dev)) {
core->rpm_enabled = true;
mutex_lock(&clk_rpm_list_lock);
hlist_add_head(&core->rpm_node, &clk_rpm_list);
mutex_unlock(&clk_rpm_list_lock);
}
}
/*** locking ***/
static void clk_prepare_lock(void)
{
@ -1237,9 +1325,6 @@ static void clk_unprepare_unused_subtree(struct clk_core *core)
if (core->flags & CLK_IGNORE_UNUSED)
return;
if (clk_pm_runtime_get(core))
return;
if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core);
if (core->ops->unprepare_unused)
@ -1248,8 +1333,6 @@ static void clk_unprepare_unused_subtree(struct clk_core *core)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
}
clk_pm_runtime_put(core);
}
static void clk_disable_unused_subtree(struct clk_core *core)
@ -1265,9 +1348,6 @@ static void clk_disable_unused_subtree(struct clk_core *core)
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
if (clk_pm_runtime_get(core))
goto unprepare_out;
flags = clk_enable_lock();
if (core->enable_count)
@ -1292,8 +1372,6 @@ static void clk_disable_unused_subtree(struct clk_core *core)
unlock_out:
clk_enable_unlock(flags);
clk_pm_runtime_put(core);
unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_disable_unprepare(core->parent);
}
@ -1309,12 +1387,22 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
static int clk_disable_unused(void)
{
struct clk_core *core;
int ret;
if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
return 0;
}
pr_info("clk: Disabling unused clocks\n");
ret = clk_pm_runtime_get_all();
if (ret)
return ret;
/*
* Grab the prepare lock to keep the clk topology stable while iterating
* over clks.
*/
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
@ -1331,6 +1419,8 @@ static int clk_disable_unused(void)
clk_prepare_unlock();
clk_pm_runtime_put_all();
return 0;
}
late_initcall_sync(clk_disable_unused);
@ -3507,9 +3597,6 @@ static int __clk_core_init(struct clk_core *core)
}
clk_core_reparent_orphans_nolock();
kref_init(&core->ref);
out:
clk_pm_runtime_put(core);
unlock:
@ -3719,6 +3806,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
kfree(core->parents);
}
/* Free memory allocated for a struct clk_core */
static void __clk_release(struct kref *ref)
{
struct clk_core *core = container_of(ref, struct clk_core, ref);
if (core->rpm_enabled) {
mutex_lock(&clk_rpm_list_lock);
hlist_del(&core->rpm_node);
mutex_unlock(&clk_rpm_list_lock);
}
clk_core_free_parent_map(core);
kfree_const(core->name);
kfree(core);
}
static struct clk *
__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
{
@ -3739,6 +3842,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
goto fail_out;
}
kref_init(&core->ref);
core->name = kstrdup_const(init->name, GFP_KERNEL);
if (!core->name) {
ret = -ENOMEM;
@ -3751,9 +3856,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
}
core->ops = init->ops;
if (dev && pm_runtime_enabled(dev))
core->rpm_enabled = true;
core->dev = dev;
clk_pm_runtime_init(core);
core->of_node = np;
if (dev && dev->driver)
core->owner = dev->driver->owner;
@ -3793,12 +3897,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
hw->clk = NULL;
fail_create_clk:
clk_core_free_parent_map(core);
fail_parents:
fail_ops:
kfree_const(core->name);
fail_name:
kfree(core);
kref_put(&core->ref, __clk_release);
fail_out:
return ERR_PTR(ret);
}
@ -3878,18 +3980,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(of_clk_hw_register);
/* Free memory allocated for a clock. */
static void __clk_release(struct kref *ref)
{
struct clk_core *core = container_of(ref, struct clk_core, ref);
lockdep_assert_held(&prepare_lock);
clk_core_free_parent_map(core);
kfree_const(core->name);
kfree(core);
}
/*
* Empty clk_ops for unregistered clocks. These are used temporarily
* after clk_unregister() was called on a clock and until last clock
@ -3942,7 +4032,7 @@ static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
/* Remove this clk from all parent caches */
static void clk_core_evict_parent_cache(struct clk_core *core)
{
struct hlist_head **lists;
const struct hlist_head **lists;
struct clk_core *root;
lockdep_assert_held(&prepare_lock);

View File

@ -972,6 +972,7 @@ static struct clk_rcg2 pcie0_axi_clk_src = {
static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
{ }
};
static struct clk_rcg2 pcie0_aux_clk_src = {
@ -1077,6 +1078,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(160000000, P_GPLL0, 5, 0, 0),
F(308570000, P_GPLL6, 3.5, 0, 0),
{ }
};
static struct clk_rcg2 sdcc1_ice_core_clk_src = {

View File

@ -3646,3 +3646,4 @@ module_exit(gcc_sdm845_exit);
MODULE_DESCRIPTION("QTI GCC SDM845 Driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:gcc-sdm845");
MODULE_SOFTDEP("pre: rpmhpd");

View File

@ -333,6 +333,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = {
F(333430000, P_MMPLL1, 3.5, 0, 0),
F(400000000, P_MMPLL0, 2, 0, 0),
F(466800000, P_MMPLL1, 2.5, 0, 0),
{ }
};
static struct clk_rcg2 mmss_axi_clk_src = {
@ -357,6 +358,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = {
F(150000000, P_GPLL0, 4, 0, 0),
F(228570000, P_MMPLL0, 3.5, 0, 0),
F(320000000, P_MMPLL0, 2.5, 0, 0),
{ }
};
static struct clk_rcg2 ocmemnoc_clk_src = {

View File

@ -283,6 +283,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = {
F(291750000, P_MMPLL1, 4, 0, 0),
F(400000000, P_MMPLL0, 2, 0, 0),
F(466800000, P_MMPLL1, 2.5, 0, 0),
{ }
};
static struct clk_rcg2 mmss_axi_clk_src = {
@ -307,6 +308,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = {
F(150000000, P_GPLL0, 4, 0, 0),
F(291750000, P_MMPLL1, 4, 0, 0),
F(400000000, P_MMPLL0, 2, 0, 0),
{ }
};
static struct clk_rcg2 ocmemnoc_clk_src = {

View File

@ -139,18 +139,28 @@ static void adf_device_reset_worker(struct work_struct *work)
if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
/* The device hanged and we can't restart it so stop here */
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
kfree(reset_data);
if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
completion_done(&reset_data->compl))
kfree(reset_data);
WARN(1, "QAT: device restart failed. Device is unusable\n");
return;
}
adf_dev_restarted_notify(accel_dev);
clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
/* The dev is back alive. Notify the caller if in sync mode */
if (reset_data->mode == ADF_DEV_RESET_SYNC)
complete(&reset_data->compl);
else
/*
* The dev is back alive. Notify the caller if in sync mode
*
* If device restart will take a more time than expected,
* the schedule_reset() function can timeout and exit. This can be
* detected by calling the completion_done() function. In this case
* the reset_data structure needs to be freed here.
*/
if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
completion_done(&reset_data->compl))
kfree(reset_data);
else
complete(&reset_data->compl);
}
static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
@ -183,8 +193,9 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
dev_err(&GET_DEV(accel_dev),
"Reset device timeout expired\n");
ret = -EFAULT;
} else {
kfree(reset_data);
}
kfree(reset_data);
return ret;
}
return 0;

View File

@ -167,6 +167,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
u32 status_err;
unsigned short i;
/* Since IRQ may be shared, check if DMA controller is powered on */
if (status == GENMASK(31, 0))
return IRQ_NONE;
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
/* Check if we have any interrupt from the DMA controller */

View File

@ -238,7 +238,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
else
regval &= ~val;
writel(val, pchan->base + reg);
writel(regval, pchan->base + reg);
}
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
@ -262,7 +262,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
else
regval &= ~val;
writel(val, od->base + reg);
writel(regval, od->base + reg);
}
static void dma_writel(struct owl_dma *od, u32 reg, u32 data)

View File

@ -427,7 +427,7 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
void *data, bool duplicates, struct list_head *head)
{
const struct efivar_operations *ops;
unsigned long variable_name_size = 1024;
unsigned long variable_name_size = 512;
efi_char16_t *variable_name;
efi_status_t status;
efi_guid_t vendor_guid;
@ -450,12 +450,13 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
}
/*
* Per EFI spec, the maximum storage allocated for both
* the variable name and variable data is 1024 bytes.
* A small set of old UEFI implementations reject sizes
* above a certain threshold, the lowest seen in the wild
* is 512.
*/
do {
variable_name_size = 1024;
variable_name_size = 512;
status = ops->get_next_variable(&variable_name_size,
variable_name,
@ -499,9 +500,13 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
break;
case EFI_NOT_FOUND:
break;
case EFI_BUFFER_TOO_SMALL:
pr_warn("efivars: Variable name size exceeds maximum (%lu > 512)\n",
variable_name_size);
status = EFI_NOT_FOUND;
break;
default:
printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n",
status);
pr_warn("efivars: get_next_variable: status=%lx\n", status);
status = EFI_NOT_FOUND;
break;
}

View File

@ -54,8 +54,6 @@ struct meson_sm_firmware {
void __iomem *sm_shmem_out_base;
};
static struct meson_sm_firmware fw;
static u32 meson_sm_get_cmd(const struct meson_sm_chip *chip,
unsigned int cmd_index)
{
@ -90,6 +88,7 @@ static void __iomem *meson_sm_map_shmem(u32 cmd_shmem, unsigned int size)
/**
* meson_sm_call - generic SMC32 call to the secure-monitor
*
* @fw: Pointer to secure-monitor firmware
* @cmd_index: Index of the SMC32 function ID
* @ret: Returned value
* @arg0: SMC32 Argument 0
@ -100,15 +99,15 @@ static void __iomem *meson_sm_map_shmem(u32 cmd_shmem, unsigned int size)
*
* Return: 0 on success, a negative value on error
*/
int meson_sm_call(unsigned int cmd_index, u32 *ret, u32 arg0,
u32 arg1, u32 arg2, u32 arg3, u32 arg4)
int meson_sm_call(struct meson_sm_firmware *fw, unsigned int cmd_index,
u32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
{
u32 cmd, lret;
if (!fw.chip)
if (!fw->chip)
return -ENOENT;
cmd = meson_sm_get_cmd(fw.chip, cmd_index);
cmd = meson_sm_get_cmd(fw->chip, cmd_index);
if (!cmd)
return -EINVAL;
@ -124,6 +123,7 @@ EXPORT_SYMBOL(meson_sm_call);
/**
* meson_sm_call_read - retrieve data from secure-monitor
*
* @fw: Pointer to secure-monitor firmware
* @buffer: Buffer to store the retrieved data
* @bsize: Size of the buffer
* @cmd_index: Index of the SMC32 function ID
@ -137,22 +137,23 @@ EXPORT_SYMBOL(meson_sm_call);
* When 0 is returned there is no guarantee about the amount of
* data read and bsize bytes are copied in buffer.
*/
int meson_sm_call_read(void *buffer, unsigned int bsize, unsigned int cmd_index,
u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
int meson_sm_call_read(struct meson_sm_firmware *fw, void *buffer,
unsigned int bsize, unsigned int cmd_index, u32 arg0,
u32 arg1, u32 arg2, u32 arg3, u32 arg4)
{
u32 size;
int ret;
if (!fw.chip)
if (!fw->chip)
return -ENOENT;
if (!fw.chip->cmd_shmem_out_base)
if (!fw->chip->cmd_shmem_out_base)
return -EINVAL;
if (bsize > fw.chip->shmem_size)
if (bsize > fw->chip->shmem_size)
return -EINVAL;
if (meson_sm_call(cmd_index, &size, arg0, arg1, arg2, arg3, arg4) < 0)
if (meson_sm_call(fw, cmd_index, &size, arg0, arg1, arg2, arg3, arg4) < 0)
return -EINVAL;
if (size > bsize)
@ -164,7 +165,7 @@ int meson_sm_call_read(void *buffer, unsigned int bsize, unsigned int cmd_index,
size = bsize;
if (buffer)
memcpy(buffer, fw.sm_shmem_out_base, size);
memcpy(buffer, fw->sm_shmem_out_base, size);
return ret;
}
@ -173,6 +174,7 @@ EXPORT_SYMBOL(meson_sm_call_read);
/**
* meson_sm_call_write - send data to secure-monitor
*
* @fw: Pointer to secure-monitor firmware
* @buffer: Buffer containing data to send
* @size: Size of the data to send
* @cmd_index: Index of the SMC32 function ID
@ -184,23 +186,24 @@ EXPORT_SYMBOL(meson_sm_call_read);
*
* Return: size of sent data on success, a negative value on error
*/
int meson_sm_call_write(void *buffer, unsigned int size, unsigned int cmd_index,
u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
int meson_sm_call_write(struct meson_sm_firmware *fw, void *buffer,
unsigned int size, unsigned int cmd_index, u32 arg0,
u32 arg1, u32 arg2, u32 arg3, u32 arg4)
{
u32 written;
if (!fw.chip)
if (!fw->chip)
return -ENOENT;
if (size > fw.chip->shmem_size)
if (size > fw->chip->shmem_size)
return -EINVAL;
if (!fw.chip->cmd_shmem_in_base)
if (!fw->chip->cmd_shmem_in_base)
return -EINVAL;
memcpy(fw.sm_shmem_in_base, buffer, size);
memcpy(fw->sm_shmem_in_base, buffer, size);
if (meson_sm_call(cmd_index, &written, arg0, arg1, arg2, arg3, arg4) < 0)
if (meson_sm_call(fw, cmd_index, &written, arg0, arg1, arg2, arg3, arg4) < 0)
return -EINVAL;
if (!written)
@ -210,6 +213,24 @@ int meson_sm_call_write(void *buffer, unsigned int size, unsigned int cmd_index,
}
EXPORT_SYMBOL(meson_sm_call_write);
/**
* meson_sm_get - get pointer to meson_sm_firmware structure.
*
* @sm_node: Pointer to the secure-monitor Device Tree node.
*
* Return: NULL is the secure-monitor device is not ready.
*/
struct meson_sm_firmware *meson_sm_get(struct device_node *sm_node)
{
struct platform_device *pdev = of_find_device_by_node(sm_node);
if (!pdev)
return NULL;
return platform_get_drvdata(pdev);
}
EXPORT_SYMBOL_GPL(meson_sm_get);
#define SM_CHIP_ID_LENGTH 119
#define SM_CHIP_ID_OFFSET 4
#define SM_CHIP_ID_SIZE 12
@ -217,14 +238,18 @@ EXPORT_SYMBOL(meson_sm_call_write);
static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
struct meson_sm_firmware *fw;
uint8_t *id_buf;
int ret;
fw = platform_get_drvdata(pdev);
id_buf = kmalloc(SM_CHIP_ID_LENGTH, GFP_KERNEL);
if (!id_buf)
return -ENOMEM;
ret = meson_sm_call_read(id_buf, SM_CHIP_ID_LENGTH, SM_GET_CHIP_ID,
ret = meson_sm_call_read(fw, id_buf, SM_CHIP_ID_LENGTH, SM_GET_CHIP_ID,
0, 0, 0, 0, 0);
if (ret < 0) {
kfree(id_buf);
@ -268,25 +293,36 @@ static const struct of_device_id meson_sm_ids[] = {
static int __init meson_sm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct meson_sm_chip *chip;
struct meson_sm_firmware *fw;
chip = of_match_device(meson_sm_ids, &pdev->dev)->data;
fw = devm_kzalloc(dev, sizeof(*fw), GFP_KERNEL);
if (!fw)
return -ENOMEM;
chip = of_match_device(meson_sm_ids, dev)->data;
if (!chip)
return -EINVAL;
if (chip->cmd_shmem_in_base) {
fw.sm_shmem_in_base = meson_sm_map_shmem(chip->cmd_shmem_in_base,
chip->shmem_size);
if (WARN_ON(!fw.sm_shmem_in_base))
fw->sm_shmem_in_base = meson_sm_map_shmem(chip->cmd_shmem_in_base,
chip->shmem_size);
if (WARN_ON(!fw->sm_shmem_in_base))
goto out;
}
if (chip->cmd_shmem_out_base) {
fw.sm_shmem_out_base = meson_sm_map_shmem(chip->cmd_shmem_out_base,
chip->shmem_size);
if (WARN_ON(!fw.sm_shmem_out_base))
fw->sm_shmem_out_base = meson_sm_map_shmem(chip->cmd_shmem_out_base,
chip->shmem_size);
if (WARN_ON(!fw->sm_shmem_out_base))
goto out_in_base;
}
fw.chip = chip;
fw->chip = chip;
platform_set_drvdata(pdev, fw);
pr_info("secure-monitor enabled\n");
if (sysfs_create_group(&pdev->dev.kobj, &meson_sm_sysfs_attr_group))
@ -295,7 +331,7 @@ static int __init meson_sm_probe(struct platform_device *pdev)
return 0;
out_in_base:
iounmap(fw.sm_shmem_in_base);
iounmap(fw->sm_shmem_in_base);
out:
return -EINVAL;
}

View File

@ -1204,6 +1204,7 @@ allocate_init_user_pages_failed:
err_bo_create:
unreserve_mem_limit(adev, size, alloc_domain, !!sg);
err_reserve_limit:
amdgpu_sync_free(&(*mem)->sync);
mutex_destroy(&(*mem)->lock);
kfree(*mem);
err:

View File

@ -2095,6 +2095,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
/* Validate operation parameters to prevent potential abuse */
static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
struct amdgpu_bo *bo,
uint64_t saddr,
uint64_t offset,
uint64_t size)
{
uint64_t tmp, lpfn;
if (saddr & AMDGPU_GPU_PAGE_MASK
|| offset & AMDGPU_GPU_PAGE_MASK
|| size & AMDGPU_GPU_PAGE_MASK)
return -EINVAL;
if (check_add_overflow(saddr, size, &tmp)
|| check_add_overflow(offset, size, &tmp)
|| size == 0 /* which also leads to end < begin */)
return -EINVAL;
/* make sure object fit at this offset */
if (bo && offset + size > amdgpu_bo_size(bo))
return -EINVAL;
/* Ensure last pfn not exceed max_pfn */
lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
if (lpfn >= adev->vm_manager.max_pfn)
return -EINVAL;
return 0;
}
/**
* amdgpu_vm_bo_map - map bo inside a vm
*
@ -2121,20 +2152,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
int r;
/* validate the parameters */
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
size == 0 || size & ~PAGE_MASK)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if (saddr >= eaddr ||
(bo && offset + size > amdgpu_bo_size(bo)))
return -EINVAL;
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (r)
return r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) {
@ -2187,16 +2212,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t eaddr;
int r;
/* validate the parameters */
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
size == 0 || size & ~PAGE_MASK)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if (saddr >= eaddr ||
(bo && offset + size > amdgpu_bo_size(bo)))
return -EINVAL;
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (r)
return r;
/* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@ -2210,7 +2228,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
}
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
mapping->start = saddr;
mapping->last = eaddr;
@ -2297,10 +2315,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
LIST_HEAD(removed);
uint64_t eaddr;
int r;
r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
if (r)
return r;
eaddr = saddr + size - 1;
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL);

View File

@ -938,8 +938,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
* nodes, but not more than args->num_of_nodes as that is
* the amount of memory allocated by user
*/
pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
args->num_of_nodes), GFP_KERNEL);
pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures),
GFP_KERNEL);
if (!pa)
return -ENOMEM;

View File

@ -51,10 +51,10 @@ void mod_stats_update_event(struct mod_stats *mod_stats,
unsigned int length);
void mod_stats_update_flip(struct mod_stats *mod_stats,
unsigned long timestamp_in_ns);
unsigned long long timestamp_in_ns);
void mod_stats_update_vupdate(struct mod_stats *mod_stats,
unsigned long timestamp_in_ns);
unsigned long long timestamp_in_ns);
void mod_stats_update_freesync(struct mod_stats *mod_stats,
unsigned int v_total_min,

View File

@ -700,6 +700,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
unsigned int total_modes_count = 0;
struct drm_client_offset *offsets;
unsigned int connector_count = 0;
/* points to modes protected by mode_config.mutex */
struct drm_display_mode **modes;
struct drm_crtc **crtcs;
int i, ret = 0;
@ -768,7 +769,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
drm_client_pick_crtcs(client, connectors, connector_count,
crtcs, modes, 0, width, height);
}
mutex_unlock(&dev->mode_config.mutex);
drm_client_modeset_release(client);
@ -798,6 +798,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
modeset->y = offset->y;
}
}
mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&client->modeset_mutex);
out:

View File

@ -308,14 +308,14 @@ static int vidi_get_modes(struct drm_connector *connector)
*/
if (!ctx->raw_edid) {
DRM_DEV_DEBUG_KMS(ctx->dev, "raw_edid is null.\n");
return -EFAULT;
return 0;
}
edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
if (!edid) {
DRM_DEV_DEBUG_KMS(ctx->dev, "failed to allocate edid\n");
return -ENOMEM;
return 0;
}
drm_connector_update_edid_property(connector, edid);

View File

@ -876,11 +876,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
int ret;
if (!hdata->ddc_adpt)
return -ENODEV;
return 0;
edid = drm_get_edid(connector, hdata->ddc_adpt);
if (!edid)
return -ENODEV;
return 0;
hdata->dvi_mode = !drm_detect_hdmi_monitor(edid);
DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",

View File

@ -145,9 +145,6 @@ static int __engine_park(struct intel_wakeref *wf)
intel_engine_disarm_breadcrumbs(engine);
intel_engine_pool_park(&engine->pool);
/* Must be reset upon idling, or we may miss the busy wakeup. */
GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
if (engine->park)
engine->park(engine);

View File

@ -2992,6 +2992,9 @@ static u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *request,
static void execlists_park(struct intel_engine_cs *engine)
{
del_timer(&engine->execlists.timer);
/* Reset upon idling, or we may delay the busy wakeup. */
WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
}
void intel_execlists_set_default_submission(struct intel_engine_cs *engine)

View File

@ -63,14 +63,14 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
int ret;
if (!mode)
return -EINVAL;
return 0;
ret = of_get_drm_display_mode(np, &imxpd->mode,
&imxpd->bus_flags,
OF_USE_NATIVE_MODE);
if (ret) {
drm_mode_destroy(connector->dev, mode);
return ret;
return 0;
}
drm_mode_copy(mode, &imxpd->mode);

View File

@ -23,6 +23,7 @@
*/
#include "nouveau_drv.h"
#include "nouveau_bios.h"
#include "nouveau_reg.h"
#include "dispnv04/hw.h"
#include "nouveau_encoder.h"
@ -1672,7 +1673,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
*/
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
if (*conn == 0xf2005014 && *conf == 0xffffffff) {
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
return false;
}
}
@ -1758,26 +1759,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
#ifdef __powerpc__
/* Apple iMac G4 NV17 */
if (of_machine_is_compatible("PowerMac4,5")) {
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
return;
}
#endif
/* Make up some sane defaults */
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
bios->legacy.i2c_indices.crt, 1, 1);
bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
bios->legacy.i2c_indices.tv,
all_heads, 0);
all_heads, DCB_OUTPUT_A);
else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr)
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
bios->legacy.i2c_indices.panel,
all_heads, 1);
all_heads, DCB_OUTPUT_B);
}
static int

View File

@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name)
return ERR_PTR(-EINVAL);
}
static void of_fini(void *p)
{
kfree(p);
}
const struct nvbios_source
nvbios_of = {
.name = "OpenFirmware",
.init = of_init,
.fini = (void(*)(void *))kfree,
.fini = of_fini,
.read = of_read,
.size = of_size,
.rw = false,

View File

@ -221,8 +221,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
void __iomem *map = NULL;
/* Already mapped? */
if (refcount_inc_not_zero(&iobj->maps))
if (refcount_inc_not_zero(&iobj->maps)) {
/* read barrier match the wmb on refcount set */
smp_rmb();
return iobj->map;
}
/* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime.
@ -249,6 +252,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
iobj->base.memory.ptrs = &nv50_instobj_fast;
else
iobj->base.memory.ptrs = &nv50_instobj_slow;
/* barrier to ensure the ptrs are written before refcount is set */
smp_wmb();
refcount_set(&iobj->maps, 1);
}

View File

@ -236,7 +236,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, vc4->hdmi->ddc);
cec_s_phys_addr_from_edid(vc4->hdmi->cec_adap, edid);
if (!edid)
return -ENODEV;
return 0;
vc4_encoder->hdmi_monitor = drm_detect_hdmi_monitor(edid);

View File

@ -60,7 +60,6 @@ static void vkms_release(struct drm_device *dev)
struct vkms_device *vkms = container_of(dev, struct vkms_device, drm);
platform_device_unregister(vkms->platform);
drm_atomic_helper_shutdown(&vkms->drm);
drm_mode_config_cleanup(&vkms->drm);
drm_dev_fini(&vkms->drm);
destroy_workqueue(vkms->output.composer_workq);
@ -194,6 +193,7 @@ static void __exit vkms_exit(void)
}
drm_dev_unregister(&vkms_device->drm);
drm_atomic_helper_shutdown(&vkms_device->drm);
drm_dev_put(&vkms_device->drm);
kfree(vkms_device);

View File

@ -56,7 +56,6 @@
/* flags */
#define I2C_HID_STARTED 0
#define I2C_HID_RESET_PENDING 1
#define I2C_HID_READ_PENDING 2
#define I2C_HID_PWR_ON 0x00
#define I2C_HID_PWR_SLEEP 0x01
@ -258,7 +257,6 @@ static int __i2c_hid_command(struct i2c_client *client,
msg[1].len = data_len;
msg[1].buf = buf_recv;
msg_num = 2;
set_bit(I2C_HID_READ_PENDING, &ihid->flags);
}
if (wait)
@ -266,9 +264,6 @@ static int __i2c_hid_command(struct i2c_client *client,
ret = i2c_transfer(client->adapter, msg, msg_num);
if (data_len > 0)
clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
if (ret != msg_num)
return ret < 0 ? ret : -EIO;
@ -540,9 +535,6 @@ static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
{
struct i2c_hid *ihid = dev_id;
if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
return IRQ_HANDLED;
i2c_hid_get_input(ihid);
return IRQ_HANDLED;

View File

@ -935,10 +935,21 @@ static const struct i2c_device_id amc6821_id[] = {
MODULE_DEVICE_TABLE(i2c, amc6821_id);
static const struct of_device_id __maybe_unused amc6821_of_match[] = {
{
.compatible = "ti,amc6821",
.data = (void *)amc6821,
},
{ }
};
MODULE_DEVICE_TABLE(of, amc6821_of_match);
static struct i2c_driver amc6821_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "amc6821",
.of_match_table = of_match_ptr(amc6821_of_match),
},
.probe = amc6821_probe,
.id_table = amc6821_id,

View File

@ -1965,13 +1965,18 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs,
* Returns negative errno, else the number of messages executed.
*
* Adapter lock must be held when calling this function. No debug logging
* takes place. adap->algo->master_xfer existence isn't checked.
* takes place.
*/
int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
unsigned long orig_jiffies;
int ret, try;
if (!adap->algo->master_xfer) {
dev_dbg(&adap->dev, "I2C level transfers not supported\n");
return -EOPNOTSUPP;
}
if (WARN_ON(!msgs || num < 1))
return -EINVAL;
@ -2038,11 +2043,6 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
int ret;
if (!adap->algo->master_xfer) {
dev_dbg(&adap->dev, "I2C level transfers not supported\n");
return -EOPNOTSUPP;
}
/* REVISIT the fault reporting model here is weak:
*
* - When we get an error after receiving N bytes from a slave,

View File

@ -219,7 +219,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
mdev = dev->mdev;
mdev_port_num = 1;
}
if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
!mlx5_core_mp_enabled(mdev)) {
/* set local port to one for Function-Per-Port HCA. */
mdev = dev->mdev;
mdev_port_num = 1;

View File

@ -72,6 +72,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
if (rxe->tfm)
crypto_free_shash(rxe->tfm);
mutex_destroy(&rxe->usdev_lock);
}
/* initialize rxe device parameters */

View File

@ -1196,7 +1196,11 @@ static int rmi_driver_probe(struct device *dev)
}
rmi_driver_set_input_params(rmi_dev, data->input);
data->input->phys = devm_kasprintf(dev, GFP_KERNEL,
"%s/input0", dev_name(dev));
"%s/input0", dev_name(dev));
if (!data->input->phys) {
retval = -ENOMEM;
goto err;
}
}
retval = rmi_init_functions(data);

View File

@ -3121,13 +3121,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
set_bit(i, bitmap);
}
if (err) {
if (i > 0)
its_vpe_irq_domain_free(domain, virq, i);
its_lpi_free(bitmap, base, nr_ids);
its_free_prop_table(vprop_page);
}
if (err)
its_vpe_irq_domain_free(domain, virq, i);
return err;
}

View File

@ -18,6 +18,8 @@
#include "dm.h"
#define DM_RESERVED_MAX_IOS 1024
#define DM_MAX_TARGETS 1048576
#define DM_MAX_TARGET_PARAMS 1024
struct dm_kobject_holder {
struct kobject kobj;

View File

@ -3755,7 +3755,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
} else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
} else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
if ((uint64_t)val >= (uint64_t)UINT_MAX * 1000 / HZ) {
r = -EINVAL;
ti->error = "Invalid bitmap_flush_interval argument";
goto bad;

View File

@ -1760,7 +1760,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
if (copy_from_user(param_kernel, user, minimum_data_size))
return -EFAULT;
if (param_kernel->data_size < minimum_data_size)
if (unlikely(param_kernel->data_size < minimum_data_size) ||
unlikely(param_kernel->data_size > DM_MAX_TARGETS * DM_MAX_TARGET_PARAMS))
return -EINVAL;
secure_data = param_kernel->flags & DM_SECURE_DATA_FLAG;

View File

@ -4027,7 +4027,9 @@ static void raid_resume(struct dm_target *ti)
* Take this opportunity to check whether any failed
* devices are reachable again.
*/
mddev_lock_nointr(mddev);
attempt_restore_of_faulty_devices(rs);
mddev_unlock(mddev);
}
if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {

View File

@ -685,8 +685,10 @@ static void dm_exception_table_exit(struct dm_exception_table *et,
for (i = 0; i < size; i++) {
slot = et->table + i;
hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list)
hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) {
kmem_cache_free(mem, ex);
cond_resched();
}
}
vfree(et->table);

View File

@ -184,7 +184,12 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
int dm_table_create(struct dm_table **result, fmode_t mode,
unsigned num_targets, struct mapped_device *md)
{
struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
struct dm_table *t;
if (num_targets > DM_MAX_TARGETS)
return -EOVERFLOW;
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (!t)
return -ENOMEM;
@ -199,7 +204,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
if (!num_targets) {
kfree(t);
return -ENOMEM;
return -EOVERFLOW;
}
if (alloc_targets(t, num_targets)) {

View File

@ -36,6 +36,7 @@
*/
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
@ -6334,7 +6335,18 @@ static void raid5d(struct md_thread *thread)
spin_unlock_irq(&conf->device_lock);
md_check_recovery(mddev);
spin_lock_irq(&conf->device_lock);
/*
* Waiting on MD_SB_CHANGE_PENDING below may deadlock
* seeing md_check_recovery() is needed to clear
* the flag when using mdmon.
*/
continue;
}
wait_event_lock_irq(mddev->sb_wait,
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
conf->device_lock);
}
pr_debug("%d stripes handled\n", handled);

View File

@ -760,7 +760,7 @@ static const struct video_device video_dev_template = {
/**
* vip_irq - interrupt routine
* @irq: Number of interrupt ( not used, correct number is assumed )
* @vip: local data structure containing all information
* @data: local data structure containing all information
*
* check for both frame interrupts set ( top and bottom ).
* check FIFO overflow, but limit number of log messages after open.
@ -770,8 +770,9 @@ static const struct video_device video_dev_template = {
*
* IRQ_HANDLED, interrupt done.
*/
static irqreturn_t vip_irq(int irq, struct sta2x11_vip *vip)
static irqreturn_t vip_irq(int irq, void *data)
{
struct sta2x11_vip *vip = data;
unsigned int status;
status = reg_read(vip, DVP_ITS);
@ -1053,9 +1054,7 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
spin_lock_init(&vip->slock);
ret = request_irq(pdev->irq,
(irq_handler_t) vip_irq,
IRQF_SHARED, KBUILD_MODNAME, vip);
ret = request_irq(pdev->irq, vip_irq, IRQF_SHARED, KBUILD_MODNAME, vip);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
ret = -ENODEV;

View File

@ -1517,10 +1517,10 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
{
struct xc4000_priv *priv = fe->tuner_priv;
mutex_lock(&priv->lock);
*freq = priv->freq_hz + priv->freq_offset;
if (debug) {
mutex_lock(&priv->lock);
if ((priv->cur_fw.type
& (BASE | FM | DTV6 | DTV7 | DTV78 | DTV8)) == BASE) {
u16 snr = 0;
@ -1531,8 +1531,8 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
return 0;
}
}
mutex_unlock(&priv->lock);
}
mutex_unlock(&priv->lock);
dprintk(1, "%s()\n", __func__);

View File

@ -234,7 +234,8 @@ static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg)
dg_info->in_dg_host_queue = true;
dg_info->entry = dst_entry;
memcpy(&dg_info->msg, dg, dg_size);
dg_info->msg = *dg;
memcpy(&dg_info->msg_payload, dg + 1, dg->payload_size);
INIT_WORK(&dg_info->work, dg_delayed_dispatch);
schedule_work(&dg_info->work);
@ -377,7 +378,8 @@ int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg)
dg_info->in_dg_host_queue = false;
dg_info->entry = dst_entry;
memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg));
dg_info->msg = *dg;
memcpy(&dg_info->msg_payload, dg + 1, dg->payload_size);
INIT_WORK(&dg_info->work, dg_delayed_dispatch);
schedule_work(&dg_info->work);

View File

@ -358,7 +358,7 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
struct mmc_blk_ioc_data *idata;
int err;
idata = kmalloc(sizeof(*idata), GFP_KERNEL);
idata = kzalloc(sizeof(*idata), GFP_KERNEL);
if (!idata) {
err = -ENOMEM;
goto out;
@ -511,7 +511,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
if (idata->flags & MMC_BLK_IOC_DROP)
return 0;
if (idata->flags & MMC_BLK_IOC_SBC)
if (idata->flags & MMC_BLK_IOC_SBC && i > 0)
prev_idata = idatas[i - 1];
/*
@ -873,10 +873,11 @@ static const struct block_device_operations mmc_bdops = {
static int mmc_blk_part_switch_pre(struct mmc_card *card,
unsigned int part_type)
{
const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
int ret = 0;
if ((part_type & mask) == mask) {
if ((part_type & mask) == rpmb) {
if (card->ext_csd.cmdq_en) {
ret = mmc_cmdq_disable(card);
if (ret)
@ -891,10 +892,11 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card,
static int mmc_blk_part_switch_post(struct mmc_card *card,
unsigned int part_type)
{
const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
int ret = 0;
if ((part_type & mask) == mask) {
if ((part_type & mask) == rpmb) {
mmc_retune_unpause(card->host);
if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
ret = mmc_cmdq_enable(card);

View File

@ -217,6 +217,8 @@ static void tmio_mmc_reset_work(struct work_struct *work)
else
mrq->cmd->error = -ETIMEDOUT;
/* No new calls yet, but disallow concurrent tmio_mmc_done_work() */
host->mrq = ERR_PTR(-EBUSY);
host->cmd = NULL;
host->data = NULL;

View File

@ -53,7 +53,7 @@ static unsigned long doc_locations[] __initdata = {
0xe8000, 0xea000, 0xec000, 0xee000,
#endif
#endif
0xffffffff };
};
static struct mtd_info *doclist = NULL;
@ -1666,7 +1666,7 @@ static int __init init_nanddoc(void)
if (ret < 0)
return ret;
} else {
for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
for (i = 0; i < ARRAY_SIZE(doc_locations); i++) {
doc_probe(doc_locations[i]);
}
}

View File

@ -59,7 +59,7 @@
#define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \
( \
(cmd_dir) | \
((ran) << 19) | \
(ran) | \
((bch) << 14) | \
((short_mode) << 13) | \
(((page_size) & 0x7f) << 6) | \

View File

@ -86,9 +86,10 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi)
sizeof(struct ubi_fm_scan_pool) +
sizeof(struct ubi_fm_scan_pool) +
(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
(sizeof(struct ubi_fm_eba) +
(ubi->peb_count * sizeof(__be32))) +
sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
((sizeof(struct ubi_fm_eba) +
sizeof(struct ubi_fm_volhdr)) *
(UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT)) +
(ubi->peb_count * sizeof(__be32));
return roundup(size, ubi->leb_size);
}

View File

@ -791,6 +791,12 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
* The number of supported volumes is limited by the eraseblock size
* and by the UBI_MAX_VOLUMES constant.
*/
if (ubi->leb_size < UBI_VTBL_RECORD_SIZE) {
ubi_err(ubi, "LEB size too small for a volume record");
return -EINVAL;
}
ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE;
if (ubi->vtbl_slots > UBI_MAX_VOLUMES)
ubi->vtbl_slots = UBI_MAX_VOLUMES;

View File

@ -374,7 +374,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
io_sq->bounce_buf_ctrl.next_to_use = 0;
size = io_sq->bounce_buf_ctrl.buffer_size *
size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
io_sq->bounce_buf_ctrl.buffers_num;
dev_node = dev_to_node(ena_dev->dmadev);

View File

@ -2033,12 +2033,14 @@ static int b44_set_pauseparam(struct net_device *dev,
bp->flags |= B44_FLAG_TX_PAUSE;
else
bp->flags &= ~B44_FLAG_TX_PAUSE;
if (bp->flags & B44_FLAG_PAUSE_AUTO) {
b44_halt(bp);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
} else {
__b44_set_flow_ctrl(bp, bp->flags);
if (netif_running(dev)) {
if (bp->flags & B44_FLAG_PAUSE_AUTO) {
b44_halt(bp);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
} else {
__b44_set_flow_ctrl(bp, bp->flags);
}
}
spin_unlock_irq(&bp->lock);

View File

@ -15971,7 +15971,7 @@ static int __init i40e_init_module(void)
* since we need to be able to guarantee forward progress even under
* memory pressure.
*/
i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM;

View File

@ -1534,8 +1534,8 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
{
struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf;
int i, v;
u32 reg;
int i;
/* If we don't have any VFs, then there is nothing to reset */
if (!pf->num_alloc_vfs)
@ -1546,11 +1546,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
return false;
/* Begin reset on all VFs at once */
for (v = 0; v < pf->num_alloc_vfs; v++) {
vf = &pf->vf[v];
for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* If VF is being reset no need to trigger reset again */
if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
i40e_trigger_vf_reset(&pf->vf[v], flr);
i40e_trigger_vf_reset(vf, flr);
}
/* HW requires some time to make sure it can flush the FIFO for a VF
@ -1559,14 +1558,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
* the VFs using a simple iterator that increments once that VF has
* finished resetting.
*/
for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) {
usleep_range(10000, 20000);
/* Check each VF in sequence, beginning with the VF to fail
* the previous check.
*/
while (v < pf->num_alloc_vfs) {
vf = &pf->vf[v];
while (vf < &pf->vf[pf->num_alloc_vfs]) {
if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
@ -1576,7 +1574,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
/* If the current VF has finished resetting, move on
* to the next VF in sequence.
*/
v++;
++vf;
}
}
@ -1586,39 +1584,39 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
/* Display a warning if at least one VF didn't manage to reset in
* time, but continue on with the operation.
*/
if (v < pf->num_alloc_vfs)
if (vf < &pf->vf[pf->num_alloc_vfs])
dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
pf->vf[v].vf_id);
vf->vf_id);
usleep_range(10000, 20000);
/* Begin disabling all the rings associated with VFs, but do not wait
* between each VF.
*/
for (v = 0; v < pf->num_alloc_vfs; v++) {
for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* On initial reset, we don't have any queues to disable */
if (pf->vf[v].lan_vsi_idx == 0)
if (vf->lan_vsi_idx == 0)
continue;
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]);
}
/* Now that we've notified HW to disable all of the VF rings, wait
* until they finish.
*/
for (v = 0; v < pf->num_alloc_vfs; v++) {
for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* On initial reset, we don't have any queues to disable */
if (pf->vf[v].lan_vsi_idx == 0)
if (vf->lan_vsi_idx == 0)
continue;
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]);
}
/* Hw may need up to 50ms to finish disabling the RX queues. We
@ -1627,12 +1625,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
mdelay(50);
/* Finish the reset on each VF */
for (v = 0; v < pf->num_alloc_vfs; v++) {
for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
i40e_cleanup_reset_vf(&pf->vf[v]);
i40e_cleanup_reset_vf(vf);
}
i40e_flush(hw);

View File

@ -2647,6 +2647,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
spin_unlock_bh(&adapter->cloud_filter_list_lock);
}
/**
* iavf_is_tc_config_same - Compare the mqprio TC config with the
* TC config already configured on this adapter.
* @adapter: board private structure
* @mqprio_qopt: TC config received from kernel.
*
* This function compares the TC config received from the kernel
* with the config already configured on the adapter.
*
* Return: True if configuration is same, false otherwise.
**/
static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
struct tc_mqprio_qopt *mqprio_qopt)
{
struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
int i;
if (adapter->num_tc != mqprio_qopt->num_tc)
return false;
for (i = 0; i < adapter->num_tc; i++) {
if (ch[i].count != mqprio_qopt->count[i] ||
ch[i].offset != mqprio_qopt->offset[i])
return false;
}
return true;
}
/**
* __iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
@ -2703,7 +2731,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
if (ret)
return ret;
/* Return if same TC config is requested */
if (adapter->num_tc == num_tc)
if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
return 0;
adapter->num_tc = num_tc;

View File

@ -909,7 +909,13 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
goto err_out;
}
xs = kzalloc(sizeof(*xs), GFP_KERNEL);
algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
if (unlikely(!algo)) {
err = -ENOENT;
goto err_out;
}
xs = kzalloc(sizeof(*xs), GFP_ATOMIC);
if (unlikely(!xs)) {
err = -ENOMEM;
goto err_out;
@ -925,14 +931,8 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4));
xs->xso.dev = adapter->netdev;
algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
if (unlikely(!algo)) {
err = -ENOENT;
goto err_xs;
}
aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8;
xs->aead = kzalloc(aead_len, GFP_KERNEL);
xs->aead = kzalloc(aead_len, GFP_ATOMIC);
if (unlikely(!xs->aead)) {
err = -ENOMEM;
goto err_xs;

View File

@ -114,15 +114,18 @@ static u8 alloc_token(struct mlx5_cmd *cmd)
return token;
}
static int cmd_alloc_index(struct mlx5_cmd *cmd)
static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cmd->alloc_lock, flags);
ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
if (ret < cmd->max_reg_cmds)
if (ret < cmd->max_reg_cmds) {
clear_bit(ret, &cmd->bitmask);
ent->idx = ret;
cmd->ent_arr[ent->idx] = ent;
}
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
@ -905,7 +908,7 @@ static void cmd_work_handler(struct work_struct *work)
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
if (!ent->page_queue) {
alloc_ret = cmd_alloc_index(cmd);
alloc_ret = cmd_alloc_index(cmd, ent);
if (alloc_ret < 0) {
mlx5_core_err(dev, "failed to allocate command entry\n");
if (ent->callback) {
@ -920,15 +923,14 @@ static void cmd_work_handler(struct work_struct *work)
up(sem);
return;
}
ent->idx = alloc_ret;
} else {
ent->idx = cmd->max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags);
clear_bit(ent->idx, &cmd->bitmask);
cmd->ent_arr[ent->idx] = ent;
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
cmd->ent_arr[ent->idx] = ent;
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
memset(lay, 0, sizeof(*lay));

View File

@ -1549,8 +1549,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
}
trace_mlx5_fs_set_fte(fte, false);
/* Link newly added rules into the tree. */
for (i = 0; i < handle->num_rules; i++) {
if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
if (!handle->rule[i]->node.parent) {
tree_add_node(&handle->rule[i]->node, &fte->node);
trace_mlx5_fs_add_rule(handle->rule[i]);
}

Some files were not shown because too many files have changed in this diff Show More