Merge branch 'v6.6/standard/base' into v6.6/standard/intel-sdk-6.6/intel-socfpga

Signed-off-by: Bruce Ashfield <bruce.ashfield@gmail.com>

# Conflicts:
#	drivers/firmware/stratix10-svc.c
This commit is contained in:
Bruce Ashfield 2026-01-15 20:49:44 -05:00
commit 39d2d55b5f
754 changed files with 6968 additions and 4373 deletions

View File

@ -41,7 +41,7 @@ properties:
patternProperties:
"^sdhci@[0-9a-f]+$":
type: object
$ref: mmc-controller.yaml
$ref: sdhci-common.yaml
unevaluatedProperties: false
properties:

View File

@ -36,13 +36,13 @@ properties:
reg:
items:
- description: External local bus interface registers
- description: Data Bus Interface registers
- description: Meson designed configuration registers
- description: PCIe configuration space
reg-names:
items:
- const: elbi
- const: dbi
- const: cfg
- const: config
@ -113,7 +113,7 @@ examples:
pcie: pcie@f9800000 {
compatible = "amlogic,axg-pcie", "snps,dw-pcie";
reg = <0xf9800000 0x400000>, <0xff646000 0x2000>, <0xf9f00000 0x100000>;
reg-names = "elbi", "cfg", "config";
reg-names = "dbi", "cfg", "config";
interrupts = <GIC_SPI 177 IRQ_TYPE_EDGE_RISING>;
clocks = <&pclk>, <&clk_port>, <&clk_phy>;
clock-names = "pclk", "port", "general";

View File

@ -42,9 +42,10 @@ TTY Refcounting
TTY Helpers
-----------
.. kernel-doc:: include/linux/tty_port.h
:identifiers: tty_port_tty_hangup tty_port_tty_vhangup
.. kernel-doc:: drivers/tty/tty_port.c
:identifiers: tty_port_tty_hangup tty_port_tty_wakeup
:identifiers: tty_port_tty_wakeup
Modem Signals
-------------

View File

@ -104,8 +104,10 @@ kernels go out with a handful of known regressions though, hopefully, none
of them are serious.
Once a stable release is made, its ongoing maintenance is passed off to the
"stable team," currently Greg Kroah-Hartman. The stable team will release
occasional updates to the stable release using the 5.x.y numbering scheme.
"stable team," currently consists of Greg Kroah-Hartman and Sasha Levin. The
stable team will release occasional updates to the stable release using the
5.x.y numbering scheme.
To be considered for an update release, a patch must (1) fix a significant
bug, and (2) already be merged into the mainline for the next development
kernel. Kernels will typically receive stable updates for a little more

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 119
SUBLEVEL = 120
EXTRAVERSION =
NAME = Pinguïn Aangedreven

View File

@ -568,7 +568,7 @@
AT91_XDMAC_DT_PER_IF(1) |
AT91_XDMAC_DT_PERID(12))>;
dma-names = "tx", "rx";
atmel,fifo-size = <16>;
atmel,fifo-size = <32>;
status = "disabled";
};
@ -639,7 +639,7 @@
AT91_XDMAC_DT_PER_IF(1) |
AT91_XDMAC_DT_PERID(14))>;
dma-names = "tx", "rx";
atmel,fifo-size = <16>;
atmel,fifo-size = <32>;
status = "disabled";
};
@ -851,7 +851,7 @@
AT91_XDMAC_DT_PER_IF(1) |
AT91_XDMAC_DT_PERID(16))>;
dma-names = "tx", "rx";
atmel,fifo-size = <16>;
atmel,fifo-size = <32>;
status = "disabled";
};
@ -922,7 +922,7 @@
AT91_XDMAC_DT_PER_IF(1) |
AT91_XDMAC_DT_PERID(18))>;
dma-names = "tx", "rx";
atmel,fifo-size = <16>;
atmel,fifo-size = <32>;
status = "disabled";
};
@ -994,7 +994,7 @@
AT91_XDMAC_DT_PER_IF(1) |
AT91_XDMAC_DT_PERID(20))>;
dma-names = "tx", "rx";
atmel,fifo-size = <16>;
atmel,fifo-size = <32>;
status = "disabled";
};

View File

@ -811,7 +811,7 @@
dma-names = "tx", "rx";
atmel,use-dma-rx;
atmel,use-dma-tx;
atmel,fifo-size = <16>;
atmel,fifo-size = <32>;
status = "disabled";
};
};
@ -837,7 +837,7 @@
dma-names = "tx", "rx";
atmel,use-dma-rx;
atmel,use-dma-tx;
atmel,fifo-size = <16>;
atmel,fifo-size = <32>;
status = "disabled";
};
};

View File

@ -355,7 +355,6 @@
port@3 {
reg = <3>;
adv7180_out: endpoint {
bus-width = <8>;
remote-endpoint = <&vin1ep>;
};
};

View File

@ -126,8 +126,6 @@
&switch {
status = "okay";
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&pins_eth3>, <&pins_eth4>, <&pins_mdio1>;

View File

@ -815,6 +815,7 @@
#size-cells = <0>;
non-removable;
cap-power-off-card;
bus-width = <4>;
mmc-pwrseq = <&wlan_pwrseq>;
vmmc-supply = <&vtf_reg>;

View File

@ -518,6 +518,7 @@
#size-cells = <0>;
non-removable;
cap-power-off-card;
bus-width = <4>;
mmc-pwrseq = <&wlan_pwrseq>;
vmmc-supply = <&tflash_reg>;

View File

@ -610,6 +610,7 @@
#size-cells = <0>;
non-removable;
cap-power-off-card;
bus-width = <4>;
mmc-pwrseq = <&wlan_pwrseq>;
vmmc-supply = <&ldo5_reg>;

View File

@ -1440,6 +1440,7 @@
#address-cells = <1>;
#size-cells = <0>;
non-removable;
cap-power-off-card;
bus-width = <4>;
mmc-pwrseq = <&wlan_pwrseq>;

View File

@ -185,13 +185,13 @@
interrupt-parent = <&gpioi>;
vio-supply = <&v3v3>;
vcc-supply = <&v3v3>;
st,sample-time = <4>;
st,mod-12b = <1>;
st,ref-sel = <0>;
st,adc-freq = <1>;
touchscreen {
compatible = "st,stmpe-ts";
st,sample-time = <4>;
st,mod-12b = <1>;
st,ref-sel = <0>;
st,adc-freq = <1>;
st,ave-ctrl = <1>;
st,touch-det-delay = <2>;
st,settling = <2>;

View File

@ -222,10 +222,10 @@
"ModeA1",
"ModeA2",
"ModeA3",
"NC",
"NC",
"NC",
"NC",
"ModeB0",
"ModeB1",
"ModeB2",
"ModeB3",
"NC",
"NC",
"NC",

View File

@ -291,7 +291,7 @@
};
twl_power: power {
compatible = "ti,twl4030-power-beagleboard-xm", "ti,twl4030-power-idle-osc-off";
compatible = "ti,twl4030-power-idle-osc-off";
ti,use_poweroff;
};
};

View File

@ -508,7 +508,7 @@
};
twl_power: power {
compatible = "ti,twl4030-power-n900", "ti,twl4030-power-idle-osc-off";
compatible = "ti,twl4030-power-idle-osc-off";
ti,use_poweroff;
};
};

View File

@ -66,7 +66,7 @@ static inline unsigned long find_zero(unsigned long mask)
*/
static inline unsigned long load_unaligned_zeropad(const void *addr)
{
unsigned long ret, offset;
unsigned long ret, tmp;
/* Load word from unaligned pointer addr */
asm(
@ -74,9 +74,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
"2:\n"
" .pushsection .text.fixup,\"ax\"\n"
" .align 2\n"
"3: and %1, %2, #0x3\n"
" bic %2, %2, #0x3\n"
" ldr %0, [%2]\n"
"3: bic %1, %2, #0x3\n"
" ldr %0, [%1]\n"
" and %1, %2, #0x3\n"
" lsl %1, %1, #0x3\n"
#ifndef __ARMEB__
" lsr %0, %0, %1\n"
@ -89,7 +89,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
" .align 3\n"
" .long 1b, 3b\n"
" .popsection"
: "=&r" (ret), "=&r" (offset)
: "=&r" (ret), "=&r" (tmp)
: "r" (addr), "Qo" (*(unsigned long *)addr));
return ret;

View File

@ -337,17 +337,6 @@
>;
};
pinctrl_usdhc1: usdhc1grp {
fsl,pins = <
MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x190
MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d0
MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d0
MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d0
MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d0
MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d0
>;
};
pinctrl_usdhc2: usdhc2grp {
fsl,pins = <
MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190

View File

@ -373,13 +373,6 @@
status = "okay";
};
/* off-board header */
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
status = "okay";
};
/* console */
&uart2 {
pinctrl-names = "default";
@ -387,25 +380,6 @@
status = "okay";
};
/* off-board header */
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
status = "okay";
};
/* off-board */
&usdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc1>;
bus-width = <4>;
non-removable;
status = "okay";
bus-width = <4>;
non-removable;
status = "okay";
};
/* eMMC */
&usdhc3 {
pinctrl-names = "default", "state_100mhz", "state_200mhz";
@ -499,13 +473,6 @@
>;
};
pinctrl_uart1: uart1grp {
fsl,pins = <
MX8MP_IOMUXC_UART1_RXD__UART1_DCE_RX 0x140
MX8MP_IOMUXC_UART1_TXD__UART1_DCE_TX 0x140
>;
};
pinctrl_uart2: uart2grp {
fsl,pins = <
MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX 0x140
@ -513,24 +480,6 @@
>;
};
pinctrl_uart3: uart3grp {
fsl,pins = <
MX8MP_IOMUXC_UART3_RXD__UART3_DCE_RX 0x140
MX8MP_IOMUXC_UART3_TXD__UART3_DCE_TX 0x140
>;
};
pinctrl_usdhc1: usdhc1grp {
fsl,pins = <
MX8MP_IOMUXC_SD1_CLK__USDHC1_CLK 0x190
MX8MP_IOMUXC_SD1_CMD__USDHC1_CMD 0x1d0
MX8MP_IOMUXC_SD1_DATA0__USDHC1_DATA0 0x1d0
MX8MP_IOMUXC_SD1_DATA1__USDHC1_DATA1 0x1d0
MX8MP_IOMUXC_SD1_DATA2__USDHC1_DATA2 0x1d0
MX8MP_IOMUXC_SD1_DATA3__USDHC1_DATA3 0x1d0
>;
};
pinctrl_usdhc3: usdhc3grp {
fsl,pins = <
MX8MP_IOMUXC_NAND_WE_B__USDHC3_CLK 0x190

View File

@ -310,17 +310,6 @@
>;
};
pinctrl_usdhc1: usdhc1grp {
fsl,pins = <
MX8MP_IOMUXC_SD1_CLK__USDHC1_CLK 0x190
MX8MP_IOMUXC_SD1_CMD__USDHC1_CMD 0x1d0
MX8MP_IOMUXC_SD1_DATA0__USDHC1_DATA0 0x1d0
MX8MP_IOMUXC_SD1_DATA1__USDHC1_DATA1 0x1d0
MX8MP_IOMUXC_SD1_DATA2__USDHC1_DATA2 0x1d0
MX8MP_IOMUXC_SD1_DATA3__USDHC1_DATA3 0x1d0
>;
};
pinctrl_usdhc2: usdhc2grp {
fsl,pins = <
MX8MP_IOMUXC_SD2_CLK__USDHC2_CLK 0x190

View File

@ -3432,6 +3432,9 @@
<&gcc GCC_USB20_MASTER_CLK>;
assigned-clock-rates = <19200000>, <60000000>;
interconnects = <&pnoc MASTER_USB_HS &bimc SLAVE_EBI_CH0>,
<&bimc MASTER_AMPSS_M0 &pnoc SLAVE_USB_HS>;
interconnect-names = "usb-ddr", "apps-usb";
power-domains = <&gcc USB30_GDSC>;
qcom,select-utmi-as-pipe-clk;
status = "disabled";

View File

@ -780,8 +780,8 @@
bias-disable;
};
tri_state_key_default: tri-state-key-default-state {
pins = "gpio40", "gpio42", "gpio26";
alert_slider_default: alert-slider-default-state {
pins = "gpio126", "gpio52", "gpio24";
function = "gpio";
drive-strength = <2>;
bias-disable;

View File

@ -184,6 +184,13 @@
regulator-off-in-suspend;
};
};
eeprom: eeprom@50 {
compatible = "belling,bl24c16a", "atmel,24c16";
reg = <0x50>;
pagesize = <16>;
vcc-supply = <&vcc_3v3_pmu>;
};
};
&i2c2 {
@ -205,12 +212,6 @@
regulator-off-in-suspend;
};
};
eeprom: eeprom@50 {
compatible = "belling,bl24c16a", "atmel,24c16";
reg = <0x50>;
pagesize = <16>;
};
};
&i2c3 {
@ -518,7 +519,7 @@
};
};
vcc_3v3_s3: dcdc-reg8 {
vcc_3v3_pmu: vcc_3v3_s3: dcdc-reg8 {
regulator-name = "vcc_3v3_s3";
regulator-always-on;
regulator-boot-on;

View File

@ -59,7 +59,7 @@
<0x00 0x01000000 0x00 0x01000000 0x00 0x01b28400>, /* First peripheral window */
<0x00 0x08000000 0x00 0x08000000 0x00 0x00200000>, /* Main CPSW */
<0x00 0x0e000000 0x00 0x0e000000 0x00 0x01d20000>, /* Second peripheral window */
<0x00 0x0fd00000 0x00 0x0fd00000 0x00 0x00020000>, /* GPU */
<0x00 0x0fd80000 0x00 0x0fd80000 0x00 0x00080000>, /* GPU */
<0x00 0x20000000 0x00 0x20000000 0x00 0x0a008000>, /* Third peripheral window */
<0x00 0x30040000 0x00 0x30040000 0x00 0x00080000>, /* PRUSS-M */
<0x00 0x30101000 0x00 0x30101000 0x00 0x00010100>, /* CSI window */

View File

@ -571,6 +571,12 @@
J721E_IOPAD(0x234, PIN_INPUT, 7) /* (U3) EXT_REFCLK1.GPIO1_12 */
>;
};
vdd_sd_dv_pins_default: vdd-sd-dv-default-pins {
pinctrl-single,pins = <
J721E_IOPAD(0x1dc, PIN_OUTPUT, 7) /* (Y1) SPI1_CLK.GPIO0_118 */
>;
};
};
&wkup_pmx0 {
@ -626,12 +632,6 @@
>;
};
vdd_sd_dv_pins_default: vdd-sd-dv-default-pins {
pinctrl-single,pins = <
J721E_IOPAD(0x1dc, PIN_OUTPUT, 7) /* (Y1) SPI1_CLK.GPIO0_118 */
>;
};
wkup_uart0_pins_default: wkup-uart0-default-pins {
pinctrl-single,pins = <
J721E_WKUP_IOPAD(0xa0, PIN_INPUT, 0) /* (J29) WKUP_UART0_RXD */

View File

@ -666,7 +666,7 @@ static void __maybe_unused build_bhb_mitigation(struct jit_ctx *ctx)
arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE)
return;
if (capable(CAP_SYS_ADMIN))
if (ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN))
return;
if (supports_clearbhb(SCOPE_SYSTEM)) {

View File

@ -285,9 +285,9 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __swp_offset(x) ((x).val >> 24)
#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define __swp_entry_to_pte(x) __pte((x).val)
#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
#define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE })
#define __swp_entry_to_pmd(x) __pmd((x).val | _PAGE_HUGE)
static inline int pte_swp_exclusive(pte_t pte)
{

View File

@ -136,6 +136,28 @@ void kexec_reboot(void)
BUG();
}
static void machine_kexec_mask_interrupts(void)
{
unsigned int i;
struct irq_desc *desc;
for_each_irq_desc(i, desc) {
struct irq_chip *chip;
chip = irq_desc_get_chip(desc);
if (!chip)
continue;
if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
chip->irq_eoi(&desc->irq_data);
if (chip->irq_mask)
chip->irq_mask(&desc->irq_data);
if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
chip->irq_disable(&desc->irq_data);
}
}
#ifdef CONFIG_SMP
static void kexec_shutdown_secondary(void *regs)
@ -249,6 +271,7 @@ void machine_crash_shutdown(struct pt_regs *regs)
#ifdef CONFIG_SMP
crash_smp_send_stop();
#endif
machine_kexec_mask_interrupts();
cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
pr_info("Starting crashdump kernel...\n");
@ -286,6 +309,7 @@ void machine_kexec(struct kimage *image)
/* We do not want to be bothered. */
local_irq_disable();
machine_kexec_mask_interrupts();
pr_notice("EFI boot flag 0x%lx\n", efi_boot);
pr_notice("Command line at 0x%lx\n", cmdline_ptr);

View File

@ -93,7 +93,6 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
* at the callsite, so there is no need to restore the T series regs.
*/
ftrace_common_return:
PTR_L ra, sp, PT_R1
PTR_L a0, sp, PT_R4
PTR_L a1, sp, PT_R5
PTR_L a2, sp, PT_R6
@ -103,12 +102,17 @@ ftrace_common_return:
PTR_L a6, sp, PT_R10
PTR_L a7, sp, PT_R11
PTR_L fp, sp, PT_R22
PTR_L t0, sp, PT_ERA
PTR_L t1, sp, PT_R13
PTR_ADDI sp, sp, PT_SIZE
bnez t1, .Ldirect
PTR_L ra, sp, PT_R1
PTR_L t0, sp, PT_ERA
PTR_ADDI sp, sp, PT_SIZE
jr t0
.Ldirect:
PTR_L t0, sp, PT_R1
PTR_L ra, sp, PT_ERA
PTR_ADDI sp, sp, PT_SIZE
jr t1
SYM_CODE_END(ftrace_common)
@ -155,6 +159,8 @@ SYM_CODE_END(return_to_handler)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
SYM_CODE_START(ftrace_stub_direct_tramp)
jr t0
move t1, ra
move ra, t0
jr t1
SYM_CODE_END(ftrace_stub_direct_tramp)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */

View File

@ -141,7 +141,7 @@ static inline void __init *determine_relocation_address(void)
if (kaslr_disabled())
return destination;
kernel_length = (long)_end - (long)_text;
kernel_length = (unsigned long)_end - (unsigned long)_text;
random_offset = get_random_boot() << 16;
random_offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1);
@ -190,7 +190,7 @@ unsigned long __init relocate_kernel(void)
early_memunmap(cmdline, COMMAND_LINE_SIZE);
if (random_offset) {
kernel_length = (long)(_end) - (long)(_text);
kernel_length = (unsigned long)(_end) - (unsigned long)(_text);
/* Copy the kernel to it's new location */
memcpy(location_new, _text, kernel_length);

View File

@ -56,6 +56,7 @@
#define SMBIOS_FREQLOW_MASK 0xFF
#define SMBIOS_CORE_PACKAGE_OFFSET 0x23
#define SMBIOS_THREAD_PACKAGE_OFFSET 0x25
#define SMBIOS_THREAD_PACKAGE_2_OFFSET 0x2E
#define LOONGSON_EFI_ENABLE (1 << 3)
#ifdef CONFIG_EFI
@ -130,7 +131,12 @@ static void __init parse_cpu_table(const struct dmi_header *dm)
cpu_clock_freq = freq_temp * 1000000;
loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]);
loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET);
loongson_sysconf.cores_per_package = *(u8 *)(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET);
if (dm->length >= 0x30 && loongson_sysconf.cores_per_package == 0xff) {
/* SMBIOS 3.0+ has ThreadCount2 for more than 255 threads */
loongson_sysconf.cores_per_package =
*(u16 *)(dmi_data + SMBIOS_THREAD_PACKAGE_2_OFFSET);
}
pr_info("CpuClock = %llu\n", cpu_clock_freq);
}

View File

@ -25,8 +25,8 @@ SYM_FUNC_START(__switch_to)
stptr.d a4, a0, THREAD_SCHED_CFA
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
la t7, __stack_chk_guard
LONG_L t8, a1, TASK_STACK_CANARY
LONG_S t8, t7, 0
ldptr.d t8, a1, TASK_STACK_CANARY
stptr.d t8, t7, 0
#endif
move tp, a2
cpu_restore_nonscratch a1

View File

@ -226,6 +226,8 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int insn)
* goto out;
*/
tc_ninsn = insn ? ctx->offset[insn+1] - ctx->offset[insn] : ctx->offset[0];
emit_zext_32(ctx, a2, true);
off = offsetof(struct bpf_array, map.max_entries);
emit_insn(ctx, ldwu, t1, a1, off);
/* bgeu $a2, $t1, jmp_offset */
@ -832,6 +834,22 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
if (ret < 0)
return ret;
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
const struct btf_func_model *m;
int i;
m = bpf_jit_find_kfunc_model(ctx->prog, insn);
if (!m)
return -EINVAL;
for (i = 0; i < m->nr_args; i++) {
u8 reg = regmap[BPF_REG_1 + i];
bool sign = m->arg_flags[i] & BTF_FMODEL_SIGNED_ARG;
emit_abi_ext(ctx, reg, m->arg_size[i], sign);
}
}
move_addr(ctx, t1, func_addr);
emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);

View File

@ -87,6 +87,32 @@ static inline void emit_sext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, boo
emit_insn(ctx, addiw, reg, reg, 0);
}
/* Emit proper extension according to ABI requirements.
* Note that it requires a value of size `size` already resides in register `reg`.
*/
static inline void emit_abi_ext(struct jit_ctx *ctx, int reg, u8 size, bool sign)
{
/* ABI requires unsigned char/short to be zero-extended */
if (!sign && (size == 1 || size == 2))
return;
switch (size) {
case 1:
emit_insn(ctx, extwb, reg, reg);
break;
case 2:
emit_insn(ctx, extwh, reg, reg);
break;
case 4:
emit_insn(ctx, addiw, reg, reg, 0);
break;
case 8:
break;
default:
pr_warn("bpf_jit: invalid size %d for extension\n", size);
}
}
static inline void move_addr(struct jit_ctx *ctx, enum loongarch_gpr rd, u64 addr)
{
u64 imm_11_0, imm_31_12, imm_51_32, imm_63_52;

View File

@ -15,6 +15,7 @@
#define PCI_DEVICE_ID_LOONGSON_HOST 0x7a00
#define PCI_DEVICE_ID_LOONGSON_DC1 0x7a06
#define PCI_DEVICE_ID_LOONGSON_DC2 0x7a36
#define PCI_DEVICE_ID_LOONGSON_DC3 0x7a46
int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val)
@ -98,3 +99,4 @@ static void pci_fixup_vgadev(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC1, pci_fixup_vgadev);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC2, pci_fixup_vgadev);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC3, pci_fixup_vgadev);

View File

@ -373,7 +373,8 @@ static void ip22_check_gio(int slotno, unsigned long addr, int irq)
gio_dev->resource.flags = IORESOURCE_MEM;
gio_dev->irq = irq;
dev_set_name(&gio_dev->dev, "%d", slotno);
gio_device_register(gio_dev);
if (gio_device_register(gio_dev))
gio_dev_put(gio_dev);
} else
printk(KERN_INFO "GIO: slot %d : Empty\n", slotno);
}

View File

@ -258,6 +258,8 @@ int main(void)
BLANK();
DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP);
DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP);
DEFINE(TIF_32BIT_PA_BIT, 31-TIF_32BIT);
BLANK();
DEFINE(ASM_PMD_SHIFT, PMD_SHIFT);
DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT);

View File

@ -1059,8 +1059,6 @@ ENTRY_CFI(intr_save) /* for os_hpmc */
STREG %r17, PT_IOR(%r29)
#if defined(CONFIG_64BIT)
b,n intr_save2
skip_save_ior:
/* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
* need to adjust iasq/iaoq here in the same way we adjusted isr/ior
@ -1069,10 +1067,17 @@ skip_save_ior:
bb,COND(>=),n %r8,PSW_W_BIT,intr_save2
LDREG PT_IASQ0(%r29), %r16
LDREG PT_IAOQ0(%r29), %r17
/* adjust iasq/iaoq */
/* adjust iasq0/iaoq0 */
space_adjust %r16,%r17,%r1
STREG %r16, PT_IASQ0(%r29)
STREG %r17, PT_IAOQ0(%r29)
LDREG PT_IASQ1(%r29), %r16
LDREG PT_IAOQ1(%r29), %r17
/* adjust iasq1/iaoq1 */
space_adjust %r16,%r17,%r1
STREG %r16, PT_IASQ1(%r29)
STREG %r17, PT_IAOQ1(%r29)
#else
skip_save_ior:
#endif
@ -1841,6 +1846,10 @@ syscall_restore_rfi:
extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
depi -1,7,1,%r20 /* T bit */
#ifdef CONFIG_64BIT
extru,<> %r19,TIF_32BIT_PA_BIT,1,%r0
depi -1,4,1,%r20 /* W bit */
#endif
STREG %r20,TASK_PT_PSW(%r1)
/* Always store space registers, since sr3 can be changed (e.g. fork) */
@ -1854,7 +1863,6 @@ syscall_restore_rfi:
STREG %r25,TASK_PT_IASQ0(%r1)
STREG %r25,TASK_PT_IASQ1(%r1)
/* XXX W bit??? */
/* Now if old D bit is clear, it means we didn't save all registers
* on syscall entry, so do that now. This only happens on TRACEME
* calls, or if someone attached to us while we were on a syscall.

View File

@ -68,8 +68,8 @@ static int e_class = ELFCLASS32;
#define PUT_16BE(off, v)(buf[off] = ((v) >> 8) & 0xff, \
buf[(off) + 1] = (v) & 0xff)
#define PUT_32BE(off, v)(PUT_16BE((off), (v) >> 16L), PUT_16BE((off) + 2, (v)))
#define PUT_64BE(off, v)((PUT_32BE((off), (v) >> 32L), \
PUT_32BE((off) + 4, (v))))
#define PUT_64BE(off, v)((PUT_32BE((off), (unsigned long long)(v) >> 32L), \
PUT_32BE((off) + 4, (unsigned long long)(v))))
#define GET_16LE(off) ((buf[off]) + (buf[(off)+1] << 8))
#define GET_32LE(off) (GET_16LE(off) + (GET_16LE((off)+2U) << 16U))
@ -78,7 +78,8 @@ static int e_class = ELFCLASS32;
#define PUT_16LE(off, v) (buf[off] = (v) & 0xff, \
buf[(off) + 1] = ((v) >> 8) & 0xff)
#define PUT_32LE(off, v) (PUT_16LE((off), (v)), PUT_16LE((off) + 2, (v) >> 16L))
#define PUT_64LE(off, v) (PUT_32LE((off), (v)), PUT_32LE((off) + 4, (v) >> 32L))
#define PUT_64LE(off, v) (PUT_32LE((off), (unsigned long long)(v)), \
PUT_32LE((off) + 4, (unsigned long long)(v) >> 32L))
#define GET_16(off) (e_data == ELFDATA2MSB ? GET_16BE(off) : GET_16LE(off))
#define GET_32(off) (e_data == ELFDATA2MSB ? GET_32BE(off) : GET_32LE(off))

View File

@ -11,6 +11,7 @@
void hash__flush_tlb_mm(struct mm_struct *mm);
void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
void hash__flush_range(struct mm_struct *mm, unsigned long start, unsigned long end);
void hash__flush_gather(struct mmu_gather *tlb);
#ifdef CONFIG_SMP
void _tlbie(unsigned long address);
@ -29,7 +30,9 @@ void _tlbia(void);
static inline void tlb_flush(struct mmu_gather *tlb)
{
/* 603 needs to flush the whole TLB here since it doesn't use a hash table. */
if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
hash__flush_gather(tlb);
else
_tlbia();
}

View File

@ -524,7 +524,6 @@ void slb_save_contents(struct slb_entry *slb_ptr);
void slb_dump_contents(struct slb_entry *slb_ptr);
extern void slb_vmalloc_update(void);
void preload_new_slb_context(unsigned long start, unsigned long sp);
#ifdef CONFIG_PPC_64S_HASH_MMU
void slb_set_size(u16 size);

View File

@ -15,10 +15,19 @@
#define ARCH_FUNC_PREFIX "."
#endif
#ifdef CONFIG_KFENCE
extern bool kfence_disabled;
static inline void disable_kfence(void)
{
kfence_disabled = true;
}
static inline bool arch_kfence_init_pool(void)
{
return true;
return !kfence_disabled;
}
#endif
#ifdef CONFIG_PPC64
static inline bool kfence_protect_page(unsigned long addr, bool protect)

View File

@ -269,10 +269,9 @@ interrupt_return:
mtspr SPRN_SRR1,r12
BEGIN_FTR_SECTION
lwarx r0,0,r1
END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
stwcx. r0,0,r1 /* to clear the reservation */
FTR_SECTION_ELSE
lwarx r0,0,r1
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
lwz r3,_CCR(r1)
lwz r4,_LINK(r1)
@ -315,10 +314,9 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
mtspr SPRN_SRR1,r12
BEGIN_FTR_SECTION
lwarx r0,0,r1
END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
stwcx. r0,0,r1 /* to clear the reservation */
FTR_SECTION_ELSE
lwarx r0,0,r1
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
lwz r3,_LINK(r1)
lwz r4,_CTR(r1)

View File

@ -1882,8 +1882,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
return 0;
}
void preload_new_slb_context(unsigned long start, unsigned long sp);
/*
* Set up a thread for executing a new program
*/
@ -1891,9 +1889,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
{
#ifdef CONFIG_PPC64
unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled())
preload_new_slb_context(start, sp);
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM

View File

@ -200,6 +200,23 @@ static void kexec_prepare_cpus_wait(int wait_state)
mb();
}
/*
* The add_cpu() call in wake_offline_cpus() can fail as cpu_bootable()
* returns false for CPUs that fail the cpu_smt_thread_allowed() check
* or non primary threads if SMT is disabled. Re-enable SMT and set the
* number of SMT threads to threads per core.
*/
static void kexec_smt_reenable(void)
{
#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
lock_device_hotplug();
cpu_smt_num_threads = threads_per_core;
cpu_smt_control = CPU_SMT_ENABLED;
unlock_device_hotplug();
#endif
}
/*
* We need to make sure each present CPU is online. The next kernel will scan
* the device tree and assume primary threads are online and query secondary
@ -214,6 +231,8 @@ static void wake_offline_cpus(void)
{
int cpu = 0;
kexec_smt_reenable();
for_each_present_cpu(cpu) {
if (!cpu_online(cpu)) {
printk(KERN_INFO "kexec: Waking offline cpu %d.\n",

View File

@ -105,3 +105,12 @@ void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
}
EXPORT_SYMBOL(hash__flush_tlb_page);
void hash__flush_gather(struct mmu_gather *tlb)
{
if (tlb->fullmm || tlb->need_flush_all)
hash__flush_tlb_mm(tlb->mm);
else
hash__flush_range(tlb->mm, tlb->start, tlb->end);
}
EXPORT_SYMBOL(hash__flush_gather);

View File

@ -1032,11 +1032,14 @@ static void __init htab_initialize(void)
unsigned long table;
unsigned long pteg_count;
unsigned long prot;
phys_addr_t base = 0, size = 0, end;
phys_addr_t base = 0, size = 0, end, limit = MEMBLOCK_ALLOC_ANYWHERE;
u64 i;
DBG(" -> htab_initialize()\n");
if (firmware_has_feature(FW_FEATURE_LPAR))
limit = ppc64_rma_size;
if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
mmu_kernel_ssize = MMU_SEGSIZE_1T;
mmu_highuser_ssize = MMU_SEGSIZE_1T;
@ -1052,7 +1055,7 @@ static void __init htab_initialize(void)
// Too early to use nr_cpu_ids, so use NR_CPUS
tmp = memblock_phys_alloc_range(sizeof(struct stress_hpt_struct) * NR_CPUS,
__alignof__(struct stress_hpt_struct),
0, MEMBLOCK_ALLOC_ANYWHERE);
MEMBLOCK_LOW_LIMIT, limit);
memset((void *)tmp, 0xff, sizeof(struct stress_hpt_struct) * NR_CPUS);
stress_hpt_struct = __va(tmp);
@ -1086,7 +1089,6 @@ static void __init htab_initialize(void)
mmu_hash_ops.hpte_clear_all();
#endif
} else {
unsigned long limit = MEMBLOCK_ALLOC_ANYWHERE;
#ifdef CONFIG_PPC_CELL
/*
@ -1102,7 +1104,7 @@ static void __init htab_initialize(void)
table = memblock_phys_alloc_range(htab_size_bytes,
htab_size_bytes,
0, limit);
MEMBLOCK_LOW_LIMIT, limit);
if (!table)
panic("ERROR: Failed to allocate %pa bytes below %pa\n",
&htab_size_bytes, &limit);

View File

@ -24,8 +24,6 @@ static inline bool stress_hpt(void)
void hpt_do_stress(unsigned long ea, unsigned long hpte_group);
void slb_setup_new_exec(void);
void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush);
#endif /* ARCH_POWERPC_MM_BOOK3S64_INTERNAL_H */

View File

@ -150,8 +150,6 @@ static int hash__init_new_context(struct mm_struct *mm)
void hash__setup_new_exec(void)
{
slice_setup_new_exec();
slb_setup_new_exec();
}
#else
static inline int hash__init_new_context(struct mm_struct *mm)

View File

@ -17,6 +17,7 @@
#include <linux/hugetlb.h>
#include <linux/string_helpers.h>
#include <linux/memory.h>
#include <linux/kfence.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
@ -31,6 +32,7 @@
#include <asm/uaccess.h>
#include <asm/ultravisor.h>
#include <asm/set_memory.h>
#include <asm/kfence.h>
#include <trace/events/thp.h>
@ -293,7 +295,8 @@ static unsigned long next_boundary(unsigned long addr, unsigned long end)
static int __meminit create_physical_mapping(unsigned long start,
unsigned long end,
int nid, pgprot_t _prot)
int nid, pgprot_t _prot,
unsigned long mapping_sz_limit)
{
unsigned long vaddr, addr, mapping_size = 0;
bool prev_exec, exec = false;
@ -301,7 +304,10 @@ static int __meminit create_physical_mapping(unsigned long start,
int psize;
unsigned long max_mapping_size = memory_block_size;
if (debug_pagealloc_enabled_or_kfence())
if (mapping_sz_limit < max_mapping_size)
max_mapping_size = mapping_sz_limit;
if (debug_pagealloc_enabled())
max_mapping_size = PAGE_SIZE;
start = ALIGN(start, PAGE_SIZE);
@ -356,8 +362,74 @@ static int __meminit create_physical_mapping(unsigned long start,
return 0;
}
#ifdef CONFIG_KFENCE
static bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
static int __init parse_kfence_early_init(char *arg)
{
int val;
if (get_option(&arg, &val))
kfence_early_init = !!val;
return 0;
}
early_param("kfence.sample_interval", parse_kfence_early_init);
static inline phys_addr_t alloc_kfence_pool(void)
{
phys_addr_t kfence_pool;
/*
* TODO: Support to enable KFENCE after bootup depends on the ability to
* split page table mappings. As such support is not currently
* implemented for radix pagetables, support enabling KFENCE
* only at system startup for now.
*
* After support for splitting mappings is available on radix,
* alloc_kfence_pool() & map_kfence_pool() can be dropped and
* mapping for __kfence_pool memory can be
* split during arch_kfence_init_pool().
*/
if (!kfence_early_init)
goto no_kfence;
kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
if (!kfence_pool)
goto no_kfence;
memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
return kfence_pool;
no_kfence:
disable_kfence();
return 0;
}
static inline void map_kfence_pool(phys_addr_t kfence_pool)
{
if (!kfence_pool)
return;
if (create_physical_mapping(kfence_pool, kfence_pool + KFENCE_POOL_SIZE,
-1, PAGE_KERNEL, PAGE_SIZE))
goto err;
memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
__kfence_pool = __va(kfence_pool);
return;
err:
memblock_phys_free(kfence_pool, KFENCE_POOL_SIZE);
disable_kfence();
}
#else
static inline phys_addr_t alloc_kfence_pool(void) { return 0; }
static inline void map_kfence_pool(phys_addr_t kfence_pool) { }
#endif
static void __init radix_init_pgtable(void)
{
phys_addr_t kfence_pool;
unsigned long rts_field;
phys_addr_t start, end;
u64 i;
@ -365,6 +437,8 @@ static void __init radix_init_pgtable(void)
/* We don't support slb for radix */
slb_set_size(0);
kfence_pool = alloc_kfence_pool();
/*
* Create the linear mapping
*/
@ -381,9 +455,11 @@ static void __init radix_init_pgtable(void)
}
WARN_ON(create_physical_mapping(start, end,
-1, PAGE_KERNEL));
-1, PAGE_KERNEL, ~0UL));
}
map_kfence_pool(kfence_pool);
if (!cpu_has_feature(CPU_FTR_HVMODE) &&
cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
/*
@ -875,7 +951,7 @@ int __meminit radix__create_section_mapping(unsigned long start,
}
return create_physical_mapping(__pa(start), __pa(end),
nid, prot);
nid, prot, ~0UL);
}
int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)

View File

@ -328,94 +328,6 @@ static void preload_age(struct thread_info *ti)
ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR;
}
void slb_setup_new_exec(void)
{
struct thread_info *ti = current_thread_info();
struct mm_struct *mm = current->mm;
unsigned long exec = 0x10000000;
WARN_ON(irqs_disabled());
/*
* preload cache can only be used to determine whether a SLB
* entry exists if it does not start to overflow.
*/
if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR)
return;
hard_irq_disable();
/*
* We have no good place to clear the slb preload cache on exec,
* flush_thread is about the earliest arch hook but that happens
* after we switch to the mm and have already preloaded the SLBEs.
*
* For the most part that's probably okay to use entries from the
* previous exec, they will age out if unused. It may turn out to
* be an advantage to clear the cache before switching to it,
* however.
*/
/*
* preload some userspace segments into the SLB.
* Almost all 32 and 64bit PowerPC executables are linked at
* 0x10000000 so it makes sense to preload this segment.
*/
if (!is_kernel_addr(exec)) {
if (preload_add(ti, exec))
slb_allocate_user(mm, exec);
}
/* Libraries and mmaps. */
if (!is_kernel_addr(mm->mmap_base)) {
if (preload_add(ti, mm->mmap_base))
slb_allocate_user(mm, mm->mmap_base);
}
/* see switch_slb */
asm volatile("isync" : : : "memory");
local_irq_enable();
}
void preload_new_slb_context(unsigned long start, unsigned long sp)
{
struct thread_info *ti = current_thread_info();
struct mm_struct *mm = current->mm;
unsigned long heap = mm->start_brk;
WARN_ON(irqs_disabled());
/* see above */
if (ti->slb_preload_nr + 3 > SLB_PRELOAD_NR)
return;
hard_irq_disable();
/* Userspace entry address. */
if (!is_kernel_addr(start)) {
if (preload_add(ti, start))
slb_allocate_user(mm, start);
}
/* Top of stack, grows down. */
if (!is_kernel_addr(sp)) {
if (preload_add(ti, sp))
slb_allocate_user(mm, sp);
}
/* Bottom of heap, grows up. */
if (heap && !is_kernel_addr(heap)) {
if (preload_add(ti, heap))
slb_allocate_user(mm, heap);
}
/* see switch_slb */
asm volatile("isync" : : : "memory");
local_irq_enable();
}
static void slb_cache_slbie_kernel(unsigned int index)
{
unsigned long slbie_data = get_paca()->slb_cache[index];

View File

@ -31,6 +31,9 @@ EXPORT_SYMBOL_GPL(kernstart_virt_addr);
bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP);
bool disable_kuap = !IS_ENABLED(CONFIG_PPC_KUAP);
#ifdef CONFIG_KFENCE
bool __ro_after_init kfence_disabled;
#endif
static int __init parse_nosmep(char *p)
{

View File

@ -216,6 +216,8 @@ static int native_find(unsigned long ea, int psize, bool primary, u64 *v, u64
vpn = hpt_vpn(ea, vsid, ssize);
hash = hpt_hash(vpn, shift, ssize);
want_v = hpte_encode_avpn(vpn, psize, ssize);
if (cpu_has_feature(CPU_FTR_ARCH_300))
want_v = hpte_old_to_new_v(want_v);
/* to check in the secondary hash table, we invert the hash */
if (!primary)
@ -229,6 +231,10 @@ static int native_find(unsigned long ea, int psize, bool primary, u64 *v, u64
/* HPTE matches */
*v = be64_to_cpu(hptep->v);
*r = be64_to_cpu(hptep->r);
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
*v = hpte_new_to_old_v(*v, *r);
*r = hpte_new_to_old_r(*r);
}
return 0;
}
++hpte_group;

View File

@ -532,7 +532,7 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
balloon_page_insert(b_dev_info, newpage);
balloon_page_delete(page);
__count_vm_event(BALLOON_MIGRATE);
b_dev_info->isolated_pages--;
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
@ -542,6 +542,7 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
*/
plpar_page_set_active(page);
balloon_page_finalize(page);
/* balloon page list reference */
put_page(page);
@ -550,7 +551,6 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
static void cmm_balloon_compaction_init(void)
{
balloon_devinfo_init(&b_dev_info);
b_dev_info.migratepage = cmm_migratepage;
}
#else /* CONFIG_BALLOON_COMPACTION */
@ -572,6 +572,7 @@ static int cmm_init(void)
if (!firmware_has_feature(FW_FEATURE_CMO) && !simulate)
return -EOPNOTSUPP;
balloon_devinfo_init(&b_dev_info);
cmm_balloon_compaction_init();
rc = register_oom_notifier(&cmm_oom_nb);

View File

@ -396,6 +396,22 @@ static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
return (rc <= 0) ? rc : 1;
}
static bool is_load_guest_page_fault(unsigned long scause)
{
/**
* If a g-stage page fault occurs, the direct approach
* is to let the g-stage page fault handler handle it
* naturally, however, calling the g-stage page fault
* handler here seems rather strange.
* Considering this is a corner case, we can directly
* return to the guest and re-execute the same PC, this
* will trigger a g-stage page fault again and then the
* regular g-stage page fault handler will populate
* g-stage page table.
*/
return (scause == EXC_LOAD_GUEST_PAGE_FAULT);
}
/**
* kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap
*
@ -421,6 +437,8 @@ int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
ct->sepc,
&utrap);
if (utrap.scause) {
if (is_load_guest_page_fault(utrap.scause))
return 1;
utrap.sepc = ct->sepc;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
return 1;
@ -476,6 +494,8 @@ int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
&utrap);
if (utrap.scause) {
if (is_load_guest_page_fault(utrap.scause))
return 1;
/* Redirect trap if we failed to read instruction */
utrap.sepc = ct->sepc;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
@ -602,6 +622,8 @@ int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
&utrap);
if (utrap.scause) {
if (is_load_guest_page_fault(utrap.scause))
return 1;
/* Redirect trap if we failed to read instruction */
utrap.sepc = ct->sepc;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);

View File

@ -15,6 +15,7 @@ struct ipl_pl_hdr {
#define IPL_PL_FLAG_IPLPS 0x80
#define IPL_PL_FLAG_SIPL 0x40
#define IPL_PL_FLAG_IPLSR 0x20
#define IPL_PL_FLAG_SBP 0x10
/* IPL Parameter Block header */
struct ipl_pb_hdr {

View File

@ -260,6 +260,24 @@ static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
sys_##_prefix##_##_name##_show, \
sys_##_prefix##_##_name##_store)
#define DEFINE_IPL_ATTR_BOOTPROG_RW(_prefix, _name, _fmt_out, _fmt_in, _hdr, _value) \
IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, (unsigned long long) _value) \
static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t len) \
{ \
unsigned long long value; \
if (sscanf(buf, _fmt_in, &value) != 1) \
return -EINVAL; \
(_value) = value; \
(_hdr).flags &= ~IPL_PL_FLAG_SBP; \
return len; \
} \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
__ATTR(_name, 0644, \
sys_##_prefix##_##_name##_show, \
sys_##_prefix##_##_name##_store)
#define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\
IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, _value) \
static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
@ -824,12 +842,13 @@ DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
reipl_block_fcp->fcp.wwpn);
DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n",
reipl_block_fcp->fcp.lun);
DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
reipl_block_fcp->fcp.bootprog);
DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n",
reipl_block_fcp->fcp.br_lba);
DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
reipl_block_fcp->fcp.devno);
DEFINE_IPL_ATTR_BOOTPROG_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
reipl_block_fcp->hdr,
reipl_block_fcp->fcp.bootprog);
static void reipl_get_ascii_loadparm(char *loadparm,
struct ipl_parameter_block *ibp)
@ -982,10 +1001,11 @@ DEFINE_IPL_ATTR_RW(reipl_nvme, fid, "0x%08llx\n", "%llx\n",
reipl_block_nvme->nvme.fid);
DEFINE_IPL_ATTR_RW(reipl_nvme, nsid, "0x%08llx\n", "%llx\n",
reipl_block_nvme->nvme.nsid);
DEFINE_IPL_ATTR_RW(reipl_nvme, bootprog, "%lld\n", "%lld\n",
reipl_block_nvme->nvme.bootprog);
DEFINE_IPL_ATTR_RW(reipl_nvme, br_lba, "%lld\n", "%lld\n",
reipl_block_nvme->nvme.br_lba);
DEFINE_IPL_ATTR_BOOTPROG_RW(reipl_nvme, bootprog, "%lld\n", "%lld\n",
reipl_block_nvme->hdr,
reipl_block_nvme->nvme.bootprog);
static struct attribute *reipl_nvme_attrs[] = {
&sys_reipl_nvme_fid_attr.attr,
@ -1112,8 +1132,9 @@ static struct bin_attribute *reipl_eckd_bin_attrs[] = {
};
DEFINE_IPL_CCW_ATTR_RW(reipl_eckd, device, reipl_block_eckd->eckd);
DEFINE_IPL_ATTR_RW(reipl_eckd, bootprog, "%lld\n", "%lld\n",
reipl_block_eckd->eckd.bootprog);
DEFINE_IPL_ATTR_BOOTPROG_RW(reipl_eckd, bootprog, "%lld\n", "%lld\n",
reipl_block_eckd->hdr,
reipl_block_eckd->eckd.bootprog);
static struct attribute *reipl_eckd_attrs[] = {
&sys_reipl_eckd_device_attr.attr,
@ -1641,12 +1662,13 @@ DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n",
dump_block_fcp->fcp.wwpn);
DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n",
dump_block_fcp->fcp.lun);
DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
dump_block_fcp->fcp.bootprog);
DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
dump_block_fcp->fcp.br_lba);
DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
dump_block_fcp->fcp.devno);
DEFINE_IPL_ATTR_BOOTPROG_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
dump_block_fcp->hdr,
dump_block_fcp->fcp.bootprog);
static struct attribute *dump_fcp_attrs[] = {
&sys_dump_fcp_device_attr.attr,
@ -1667,10 +1689,11 @@ DEFINE_IPL_ATTR_RW(dump_nvme, fid, "0x%08llx\n", "%llx\n",
dump_block_nvme->nvme.fid);
DEFINE_IPL_ATTR_RW(dump_nvme, nsid, "0x%08llx\n", "%llx\n",
dump_block_nvme->nvme.nsid);
DEFINE_IPL_ATTR_RW(dump_nvme, bootprog, "%lld\n", "%llx\n",
dump_block_nvme->nvme.bootprog);
DEFINE_IPL_ATTR_RW(dump_nvme, br_lba, "%lld\n", "%llx\n",
dump_block_nvme->nvme.br_lba);
DEFINE_IPL_ATTR_BOOTPROG_RW(dump_nvme, bootprog, "%lld\n", "%llx\n",
dump_block_nvme->hdr,
dump_block_nvme->nvme.bootprog);
static struct attribute *dump_nvme_attrs[] = {
&sys_dump_nvme_fid_attr.attr,
@ -1687,8 +1710,9 @@ static struct attribute_group dump_nvme_attr_group = {
/* ECKD dump device attributes */
DEFINE_IPL_CCW_ATTR_RW(dump_eckd, device, dump_block_eckd->eckd);
DEFINE_IPL_ATTR_RW(dump_eckd, bootprog, "%lld\n", "%llx\n",
dump_block_eckd->eckd.bootprog);
DEFINE_IPL_ATTR_BOOTPROG_RW(dump_eckd, bootprog, "%lld\n", "%llx\n",
dump_block_eckd->hdr,
dump_block_eckd->eckd.bootprog);
IPL_ATTR_BR_CHR_SHOW_FN(dump, dump_block_eckd->eckd);
IPL_ATTR_BR_CHR_STORE_FN(dump, dump_block_eckd->eckd);

View File

@ -759,6 +759,7 @@ static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
continue;
info->core[info->configured].core_id =
address >> smp_cpu_mt_shift;
info->core[info->configured].type = boot_core_type;
info->configured++;
}
info->combined = info->configured;

View File

@ -2,6 +2,7 @@
#include "misc.h"
#include <asm/bootparam_utils.h>
#include <asm/e820/types.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include "pgtable.h"
#include "../string.h"
@ -175,9 +176,10 @@ asmlinkage void configure_5level_paging(struct boot_params *bp, void *pgtable)
* For 4- to 5-level paging transition, set up current CR3 as
* the first and the only entry in a new top-level page table.
*/
*trampoline_32bit = __native_read_cr3() | _PAGE_TABLE_NOENC;
*trampoline_32bit = native_read_cr3_pa() | _PAGE_TABLE_NOENC;
} else {
unsigned long src;
u64 *new_cr3;
pgd_t *pgdp;
/*
* For 5- to 4-level paging transition, copy page table pointed
@ -187,8 +189,9 @@ asmlinkage void configure_5level_paging(struct boot_params *bp, void *pgtable)
* We cannot just point to the page table from trampoline as it
* may be above 4G.
*/
src = *(unsigned long *)__native_read_cr3() & PAGE_MASK;
memcpy(trampoline_32bit, (void *)src, PAGE_SIZE);
pgdp = (pgd_t *)native_read_cr3_pa();
new_cr3 = (u64 *)(native_pgd_val(pgdp[0]) & PTE_PFN_MASK);
memcpy(trampoline_32bit, new_cr3, PAGE_SIZE);
}
toggle_la57(trampoline_32bit);

View File

@ -54,7 +54,7 @@ SYM_FUNC_START(blake2s_compress_ssse3)
movdqa ROT16(%rip),%xmm12
movdqa ROR328(%rip),%xmm13
movdqu 0x20(%rdi),%xmm14
movq %rcx,%xmm15
movd %ecx,%xmm15
leaq SIGMA+0xa0(%rip),%r8
jmp .Lbeginofloop
.align 32
@ -179,7 +179,7 @@ SYM_FUNC_START(blake2s_compress_avx512)
vmovdqu (%rdi),%xmm0
vmovdqu 0x10(%rdi),%xmm1
vmovdqu 0x20(%rdi),%xmm4
vmovq %rcx,%xmm5
vmovd %ecx,%xmm5
vmovdqa IV(%rip),%xmm14
vmovdqa IV+16(%rip),%xmm15
jmp .Lblake2s_compress_avx512_mainloop

View File

@ -20,11 +20,6 @@
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#ifdef CONFIG_XEN_PV
#include <xen/xen-ops.h>
#include <xen/events.h>
#endif
#include <asm/apic.h>
#include <asm/desc.h>
#include <asm/traps.h>
@ -346,70 +341,3 @@ SYSCALL_DEFINE0(ni_syscall)
{
return -ENOSYS;
}
#ifdef CONFIG_XEN_PV
#ifndef CONFIG_PREEMPTION
/*
* Some hypercalls issued by the toolstack can take many 10s of
* seconds. Allow tasks running hypercalls via the privcmd driver to
* be voluntarily preempted even if full kernel preemption is
* disabled.
*
* Such preemptible hypercalls are bracketed by
* xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
* calls.
*/
DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
/*
* In case of scheduling the flag must be cleared and restored after
* returning from schedule as the task might move to a different CPU.
*/
static __always_inline bool get_and_clear_inhcall(void)
{
bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
__this_cpu_write(xen_in_preemptible_hcall, false);
return inhcall;
}
static __always_inline void restore_inhcall(bool inhcall)
{
__this_cpu_write(xen_in_preemptible_hcall, inhcall);
}
#else
static __always_inline bool get_and_clear_inhcall(void) { return false; }
static __always_inline void restore_inhcall(bool inhcall) { }
#endif
static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
inc_irq_stat(irq_hv_callback_count);
xen_evtchn_do_upcall();
set_irq_regs(old_regs);
}
__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
{
irqentry_state_t state = irqentry_enter(regs);
bool inhcall;
instrumentation_begin();
run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
inhcall = get_and_clear_inhcall();
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
irqentry_exit_cond_resched();
instrumentation_end();
restore_inhcall(inhcall);
} else {
instrumentation_end();
irqentry_exit(regs, state);
}
}
#endif /* CONFIG_XEN_PV */

View File

@ -745,7 +745,12 @@ static void amd_pmu_enable_all(int added)
if (!test_bit(idx, cpuc->active_mask))
continue;
amd_pmu_enable_event(cpuc->events[idx]);
/*
* FIXME: cpuc->events[idx] can become NULL in a subtle race
* condition with NMI->throttle->x86_pmu_stop().
*/
if (cpuc->events[idx])
amd_pmu_enable_event(cpuc->events[idx]);
}
}

View File

@ -3808,7 +3808,9 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
if (!event->attr.exclude_kernel)
flags &= ~PERF_SAMPLE_REGS_USER;
if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
flags &= ~PERF_SAMPLE_REGS_USER;
if (event->attr.sample_regs_intr & ~PEBS_GP_REGS)
flags &= ~PERF_SAMPLE_REGS_INTR;
return flags;
}

View File

@ -1950,6 +1950,11 @@ u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
* the gfn, i.e. retrying the instruction will hit a
* !PRESENT fault, which results in a new shadow page
* and sends KVM back to square one.
*
* EMULTYPE_SKIP_SOFT_INT - Set in combination with EMULTYPE_SKIP to only skip
* an instruction if it could generate a given software
* interrupt, which must be encoded via
* EMULTYPE_SET_SOFT_INT_VECTOR().
*/
#define EMULTYPE_NO_DECODE (1 << 0)
#define EMULTYPE_TRAP_UD (1 << 1)
@ -1960,6 +1965,10 @@ u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
#define EMULTYPE_PF (1 << 6)
#define EMULTYPE_COMPLETE_USER_EXIT (1 << 7)
#define EMULTYPE_WRITE_PF_TO_SP (1 << 8)
#define EMULTYPE_SKIP_SOFT_INT (1 << 9)
#define EMULTYPE_SET_SOFT_INT_VECTOR(v) ((u32)((v) & 0xff) << 16)
#define EMULTYPE_GET_SOFT_INT_VECTOR(e) (((e) >> 16) & 0xff)
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,

View File

@ -187,12 +187,12 @@ convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
extern void send_sigtrap(struct pt_regs *regs, int error_code, int si_code);
static inline unsigned long regs_return_value(struct pt_regs *regs)
static __always_inline unsigned long regs_return_value(struct pt_regs *regs)
{
return regs->ax;
}
static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
static __always_inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
{
regs->ax = rc;
}
@ -277,34 +277,34 @@ static __always_inline bool ip_within_syscall_gap(struct pt_regs *regs)
}
#endif
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
static __always_inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
return regs->sp;
}
static inline unsigned long instruction_pointer(struct pt_regs *regs)
static __always_inline unsigned long instruction_pointer(struct pt_regs *regs)
{
return regs->ip;
}
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
static __always_inline
void instruction_pointer_set(struct pt_regs *regs, unsigned long val)
{
regs->ip = val;
}
static inline unsigned long frame_pointer(struct pt_regs *regs)
static __always_inline unsigned long frame_pointer(struct pt_regs *regs)
{
return regs->bp;
}
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
static __always_inline unsigned long user_stack_pointer(struct pt_regs *regs)
{
return regs->sp;
}
static inline void user_stack_pointer_set(struct pt_regs *regs,
unsigned long val)
static __always_inline
void user_stack_pointer_set(struct pt_regs *regs, unsigned long val)
{
regs->sp = val;
}

View File

@ -237,7 +237,7 @@ static bool cpu_has_entrysign(void)
if (fam == 0x1a) {
if (model <= 0x2f ||
(0x40 <= model && model <= 0x4f) ||
(0x60 <= model && model <= 0x6f))
(0x60 <= model && model <= 0x7f))
return true;
}

View File

@ -183,8 +183,8 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
* in false positive reports. Disable instrumentation to avoid those.
*/
__no_kmsan_checks
static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, const char *log_lvl)
static void __show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, const char *log_lvl)
{
struct unwind_state state;
struct stack_info stack_info = {0};
@ -305,6 +305,25 @@ next:
}
}
static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, const char *log_lvl)
{
/*
* Disable KASAN to avoid false positives during walking another
* task's stacks, as values on these stacks may change concurrently
* with task execution.
*/
bool disable_kasan = task && task != current;
if (disable_kasan)
kasan_disable_current();
__show_trace_log_lvl(task, regs, stack, log_lvl);
if (disable_kasan)
kasan_enable_current();
}
void show_stack(struct task_struct *task, unsigned long *sp,
const char *loglvl)
{

View File

@ -2058,15 +2058,33 @@ static void advance_periodic_target_expiration(struct kvm_lapic *apic)
ktime_t delta;
/*
* Synchronize both deadlines to the same time source or
* differences in the periods (caused by differences in the
* underlying clocks or numerical approximation errors) will
* cause the two to drift apart over time as the errors
* accumulate.
* Use kernel time as the time source for both the hrtimer deadline and
* TSC-based deadline so that they stay synchronized. Computing each
* deadline independently will cause the two deadlines to drift apart
* over time as differences in the periods accumulate, e.g. due to
* differences in the underlying clocks or numerical approximation errors.
*/
apic->lapic_timer.target_expiration =
ktime_add_ns(apic->lapic_timer.target_expiration,
apic->lapic_timer.period);
/*
* If the new expiration is in the past, e.g. because userspace stopped
* running the VM for an extended duration, then force the expiration
* to "now" and don't try to play catch-up with the missed events. KVM
* will only deliver a single interrupt regardless of how many events
* are pending, i.e. restarting the timer with an expiration in the
* past will do nothing more than waste host cycles, and can even lead
* to a hard lockup in extreme cases.
*/
if (ktime_before(apic->lapic_timer.target_expiration, now))
apic->lapic_timer.target_expiration = now;
/*
* Note, ensuring the expiration isn't in the past also prevents delta
* from going negative, which could cause the TSC deadline to become
* excessively large due to it an unsigned value.
*/
delta = ktime_sub(apic->lapic_timer.target_expiration, now);
apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
nsec_to_cycles(apic->vcpu, delta);
@ -2855,9 +2873,9 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
apic_timer_expired(apic, true);
if (lapic_is_periodic(apic)) {
if (lapic_is_periodic(apic) && !WARN_ON_ONCE(!apic->lapic_timer.period)) {
advance_periodic_target_expiration(apic);
hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
hrtimer_set_expires(&ktimer->timer, ktimer->target_expiration);
return HRTIMER_RESTART;
} else
return HRTIMER_NORESTART;

View File

@ -544,6 +544,7 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
struct kvm_vcpu *vcpu = &svm->vcpu;
nested_vmcb02_compute_g_pat(svm);
vmcb_mark_dirty(vmcb02, VMCB_NPT);
/* Load the nested guest state */
if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
@ -600,11 +601,10 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
*/
svm_copy_lbrs(vmcb02, vmcb12);
vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
svm_update_lbrv(&svm->vcpu);
} else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
} else {
svm_copy_lbrs(vmcb02, vmcb01);
}
svm_update_lbrv(&svm->vcpu);
}
static inline bool is_evtinj_soft(u32 evtinj)
@ -675,6 +675,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
vmcb_mark_dirty(vmcb02, VMCB_PERM_MAP);
/* Done at vmrun: asid. */
@ -729,11 +730,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
svm->soft_int_next_rip = vmcb12_rip;
}
vmcb02->control.virt_ext = vmcb01->control.virt_ext &
LBR_CTL_ENABLE_MASK;
if (guest_can_use(vcpu, X86_FEATURE_LBRV))
vmcb02->control.virt_ext |=
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
/* LBR_CTL_ENABLE_MASK is controlled by svm_update_lbrv() */
if (!nested_vmcb_needs_vls_intercept(svm))
vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
@ -884,7 +881,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
if (!nested_vmcb_check_save(vcpu) ||
!nested_vmcb_check_controls(vcpu)) {
vmcb12->control.exit_code = SVM_EXIT_ERR;
vmcb12->control.exit_code_hi = 0;
vmcb12->control.exit_code_hi = -1u;
vmcb12->control.exit_info_1 = 0;
vmcb12->control.exit_info_2 = 0;
goto out;
@ -917,7 +914,7 @@ out_exit_err:
svm->soft_int_injected = false;
svm->vmcb->control.exit_code = SVM_EXIT_ERR;
svm->vmcb->control.exit_code_hi = 0;
svm->vmcb->control.exit_code_hi = -1u;
svm->vmcb->control.exit_info_1 = 0;
svm->vmcb->control.exit_info_2 = 0;
@ -1064,13 +1061,12 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK)))
svm_copy_lbrs(vmcb12, vmcb02);
svm_update_lbrv(vcpu);
} else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
else
svm_copy_lbrs(vmcb01, vmcb02);
svm_update_lbrv(vcpu);
}
svm_update_lbrv(vcpu);
if (vnmi) {
if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK)

View File

@ -370,6 +370,7 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
void *insn, int insn_len);
static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
int emul_type,
bool commit_side_effects)
{
struct vcpu_svm *svm = to_svm(vcpu);
@ -399,7 +400,7 @@ static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
if (unlikely(!commit_side_effects))
old_rflags = svm->vmcb->save.rflags;
if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
if (!kvm_emulate_instruction(vcpu, emul_type))
return 0;
if (unlikely(!commit_side_effects))
@ -417,11 +418,13 @@ done:
static int svm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
{
return __svm_skip_emulated_instruction(vcpu, true);
return __svm_skip_emulated_instruction(vcpu, EMULTYPE_SKIP, true);
}
static int svm_update_soft_interrupt_rip(struct kvm_vcpu *vcpu)
static int svm_update_soft_interrupt_rip(struct kvm_vcpu *vcpu, u8 vector)
{
const int emul_type = EMULTYPE_SKIP | EMULTYPE_SKIP_SOFT_INT |
EMULTYPE_SET_SOFT_INT_VECTOR(vector);
unsigned long rip, old_rip = kvm_rip_read(vcpu);
struct vcpu_svm *svm = to_svm(vcpu);
@ -437,7 +440,7 @@ static int svm_update_soft_interrupt_rip(struct kvm_vcpu *vcpu)
* in use, the skip must not commit any side effects such as clearing
* the interrupt shadow or RFLAGS.RF.
*/
if (!__svm_skip_emulated_instruction(vcpu, !nrips))
if (!__svm_skip_emulated_instruction(vcpu, emul_type, !nrips))
return -EIO;
rip = kvm_rip_read(vcpu);
@ -473,7 +476,7 @@ static void svm_inject_exception(struct kvm_vcpu *vcpu)
kvm_deliver_exception_payload(vcpu, ex);
if (kvm_exception_is_soft(ex->vector) &&
svm_update_soft_interrupt_rip(vcpu))
svm_update_soft_interrupt_rip(vcpu, ex->vector))
return;
svm->vmcb->control.event_inj = ex->vector
@ -1009,70 +1012,67 @@ void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
vmcb_mark_dirty(to_vmcb, VMCB_LBR);
}
void svm_enable_lbrv(struct kvm_vcpu *vcpu)
static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
bool intercept = !(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK);
svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
if (intercept == svm->lbr_msrs_intercepted)
return;
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP,
!intercept, !intercept);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP,
!intercept, !intercept);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP,
!intercept, !intercept);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP,
!intercept, !intercept);
if (sev_es_guest(vcpu->kvm))
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_DEBUGCTLMSR, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_DEBUGCTLMSR,
!intercept, !intercept);
/* Move the LBR msrs to the vmcb02 so that the guest can see them. */
if (is_guest_mode(vcpu))
svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
svm->lbr_msrs_intercepted = intercept;
}
static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
static void __svm_enable_lbrv(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
to_svm(vcpu)->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
}
void svm_enable_lbrv(struct kvm_vcpu *vcpu)
{
__svm_enable_lbrv(vcpu);
svm_recalc_lbr_msr_intercepts(vcpu);
}
static void __svm_disable_lbrv(struct kvm_vcpu *vcpu)
{
KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
/*
* Move the LBR msrs back to the vmcb01 to avoid copying them
* on nested guest entries.
*/
if (is_guest_mode(vcpu))
svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb);
}
static struct vmcb *svm_get_lbr_vmcb(struct vcpu_svm *svm)
{
/*
* If LBR virtualization is disabled, the LBR MSRs are always kept in
* vmcb01. If LBR virtualization is enabled and L1 is running VMs of
* its own, the MSRs are moved between vmcb01 and vmcb02 as needed.
*/
return svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK ? svm->vmcb :
svm->vmcb01.ptr;
to_svm(vcpu)->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
}
void svm_update_lbrv(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK;
bool enable_lbrv = (svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) ||
bool enable_lbrv = (svm->vmcb->save.dbgctl & DEBUGCTLMSR_LBR) ||
(is_guest_mode(vcpu) && guest_can_use(vcpu, X86_FEATURE_LBRV) &&
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK));
if (enable_lbrv == current_enable_lbrv)
return;
if (enable_lbrv && !current_enable_lbrv)
__svm_enable_lbrv(vcpu);
else if (!enable_lbrv && current_enable_lbrv)
__svm_disable_lbrv(vcpu);
if (enable_lbrv)
svm_enable_lbrv(vcpu);
else
svm_disable_lbrv(vcpu);
/*
* During nested transitions, it is possible that the current VMCB has
* LBR_CTL set, but the previous LBR_CTL had it cleared (or vice versa).
* In this case, even though LBR_CTL does not need an update, intercepts
* do, so always recalculate the intercepts here.
*/
svm_recalc_lbr_msr_intercepts(vcpu);
}
void disable_nmi_singlestep(struct vcpu_svm *svm)
@ -1478,6 +1478,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
}
svm->x2avic_msrs_intercepted = true;
svm->lbr_msrs_intercepted = true;
svm->vmcb01.ptr = page_address(vmcb01_page);
svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
@ -2642,6 +2643,7 @@ static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu,
if (cr0 ^ val) {
svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
svm->vmcb->control.exit_code_hi = 0;
ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
}
@ -2902,19 +2904,19 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = svm->tsc_aux;
break;
case MSR_IA32_DEBUGCTLMSR:
msr_info->data = svm_get_lbr_vmcb(svm)->save.dbgctl;
msr_info->data = svm->vmcb->save.dbgctl;
break;
case MSR_IA32_LASTBRANCHFROMIP:
msr_info->data = svm_get_lbr_vmcb(svm)->save.br_from;
msr_info->data = svm->vmcb->save.br_from;
break;
case MSR_IA32_LASTBRANCHTOIP:
msr_info->data = svm_get_lbr_vmcb(svm)->save.br_to;
msr_info->data = svm->vmcb->save.br_to;
break;
case MSR_IA32_LASTINTFROMIP:
msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_from;
msr_info->data = svm->vmcb->save.last_excp_from;
break;
case MSR_IA32_LASTINTTOIP:
msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_to;
msr_info->data = svm->vmcb->save.last_excp_to;
break;
case MSR_VM_HSAVE_PA:
msr_info->data = svm->nested.hsave_msr;
@ -3183,10 +3185,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
if (data & DEBUGCTL_RESERVED_BITS)
return 1;
if (svm_get_lbr_vmcb(svm)->save.dbgctl == data)
if (svm->vmcb->save.dbgctl == data)
break;
svm_get_lbr_vmcb(svm)->save.dbgctl = data;
svm->vmcb->save.dbgctl = data;
vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
svm_update_lbrv(vcpu);
break;
@ -3668,11 +3670,12 @@ static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu)
static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
{
struct kvm_queued_interrupt *intr = &vcpu->arch.interrupt;
struct vcpu_svm *svm = to_svm(vcpu);
u32 type;
if (vcpu->arch.interrupt.soft) {
if (svm_update_soft_interrupt_rip(vcpu))
if (intr->soft) {
if (svm_update_soft_interrupt_rip(vcpu, intr->nr))
return;
type = SVM_EVTINJ_TYPE_SOFT;
@ -3680,12 +3683,10 @@ static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
type = SVM_EVTINJ_TYPE_INTR;
}
trace_kvm_inj_virq(vcpu->arch.interrupt.nr,
vcpu->arch.interrupt.soft, reinjected);
trace_kvm_inj_virq(intr->nr, intr->soft, reinjected);
++vcpu->stat.irq_injections;
svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
SVM_EVTINJ_VALID | type;
svm->vmcb->control.event_inj = intr->nr | SVM_EVTINJ_VALID | type;
}
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
@ -4546,31 +4547,45 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
case SVM_EXIT_WRITE_CR0: {
unsigned long cr0, val;
if (info->intercept == x86_intercept_cr_write)
/*
* Adjust the exit code accordingly if a CR other than CR0 is
* being written, and skip straight to the common handling as
* only CR0 has an additional selective intercept.
*/
if (info->intercept == x86_intercept_cr_write && info->modrm_reg) {
icpt_info.exit_code += info->modrm_reg;
if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
info->intercept == x86_intercept_clts)
break;
if (!(vmcb12_is_intercept(&svm->nested.ctl,
INTERCEPT_SELECTIVE_CR0)))
break;
cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
if (info->intercept == x86_intercept_lmsw) {
cr0 &= 0xfUL;
val &= 0xfUL;
/* lmsw can't clear PE - catch this here */
if (cr0 & X86_CR0_PE)
val |= X86_CR0_PE;
}
/*
* Convert the exit_code to SVM_EXIT_CR0_SEL_WRITE if a
* selective CR0 intercept is triggered (the common logic will
* treat the selective intercept as being enabled). Note, the
* unconditional intercept has higher priority, i.e. this is
* only relevant if *only* the selective intercept is enabled.
*/
if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_CR0_WRITE) ||
!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))
break;
/* CLTS never triggers INTERCEPT_SELECTIVE_CR0 */
if (info->intercept == x86_intercept_clts)
break;
/* LMSW always triggers INTERCEPT_SELECTIVE_CR0 */
if (info->intercept == x86_intercept_lmsw) {
icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
break;
}
/*
* MOV-to-CR0 only triggers INTERCEPT_SELECTIVE_CR0 if any bit
* other than SVM_CR0_SELECTIVE_MASK is changed.
*/
cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
if (cr0 ^ val)
icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
break;
}
case SVM_EXIT_READ_DR0:
@ -4631,6 +4646,7 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
if (static_cpu_has(X86_FEATURE_NRIPS))
vmcb->control.next_rip = info->next_rip;
vmcb->control.exit_code = icpt_info.exit_code;
vmcb->control.exit_code_hi = 0;
vmexit = nested_svm_exit_handled(svm);
ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED

View File

@ -288,6 +288,7 @@ struct vcpu_svm {
bool guest_state_loaded;
bool x2avic_msrs_intercepted;
bool lbr_msrs_intercepted;
/* Guest GIF value, used when vGIF is not enabled */
bool guest_gif;
@ -601,9 +602,10 @@ int nested_svm_vmexit(struct vcpu_svm *svm);
static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
{
svm->vmcb->control.exit_code = exit_code;
svm->vmcb->control.exit_info_1 = 0;
svm->vmcb->control.exit_info_2 = 0;
svm->vmcb->control.exit_code = exit_code;
svm->vmcb->control.exit_code_hi = 0;
svm->vmcb->control.exit_info_1 = 0;
svm->vmcb->control.exit_info_2 = 0;
return nested_svm_vmexit(svm);
}

View File

@ -4908,7 +4908,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
if (vmx->nested.update_vmcs01_apicv_status) {
vmx->nested.update_vmcs01_apicv_status = false;
kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
vmx_refresh_apicv_exec_ctrl(vcpu);
}
if (vmx->nested.update_vmcs01_hwapic_isr) {

View File

@ -4451,7 +4451,7 @@ static u32 vmx_vmexit_ctrl(void)
~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
}
static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);

View File

@ -395,6 +395,7 @@ void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu);

View File

@ -855,6 +855,13 @@ bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
}
EXPORT_SYMBOL_GPL(kvm_require_dr);
static bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu)
{
u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
return (vcpu->arch.apf.msr_en_val & mask) == mask;
}
static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
{
return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2);
@ -947,15 +954,20 @@ void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned lon
}
if ((cr0 ^ old_cr0) & X86_CR0_PG) {
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
/*
* Clearing CR0.PG is defined to flush the TLB from the guest's
* perspective.
*/
if (!(cr0 & X86_CR0_PG))
kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
/*
* Check for async #PF completion events when enabling paging,
* as the vCPU may have previously encountered async #PFs (it's
* entirely legal for the guest to toggle paging on/off without
* waiting for the async #PF queue to drain).
*/
else if (kvm_pv_async_pf_enabled(vcpu))
kvm_make_request(KVM_REQ_APF_READY, vcpu);
}
if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS)
@ -3398,13 +3410,6 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 0;
}
static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu)
{
u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
return (vcpu->arch.apf.msr_en_val & mask) == mask;
}
static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
{
gpa_t gpa = data & ~0x3f;
@ -8864,6 +8869,23 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
return false;
}
static bool is_soft_int_instruction(struct x86_emulate_ctxt *ctxt,
int emulation_type)
{
u8 vector = EMULTYPE_GET_SOFT_INT_VECTOR(emulation_type);
switch (ctxt->b) {
case 0xcc:
return vector == BP_VECTOR;
case 0xcd:
return vector == ctxt->src.val;
case 0xce:
return vector == OF_VECTOR;
default:
return false;
}
}
/*
* Decode an instruction for emulation. The caller is responsible for handling
* code breakpoints. Note, manually detecting code breakpoints is unnecessary
@ -8954,6 +8976,10 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
* injecting single-step #DBs.
*/
if (emulation_type & EMULTYPE_SKIP) {
if (emulation_type & EMULTYPE_SKIP_SOFT_INT &&
!is_soft_int_instruction(ctxt, emulation_type))
return 0;
if (ctxt->mode != X86EMUL_MODE_PROT64)
ctxt->eip = (u32)ctxt->_eip;
else

View File

@ -72,6 +72,7 @@
#include <asm/mwait.h>
#include <asm/pci_x86.h>
#include <asm/cpu.h>
#include <asm/irq_stack.h>
#ifdef CONFIG_X86_IOPL_IOPERM
#include <asm/io_bitmap.h>
#endif
@ -97,6 +98,44 @@ void *xen_initial_gdt;
static int xen_cpu_up_prepare_pv(unsigned int cpu);
static int xen_cpu_dead_pv(unsigned int cpu);
#ifndef CONFIG_PREEMPTION
/*
* Some hypercalls issued by the toolstack can take many 10s of
* seconds. Allow tasks running hypercalls via the privcmd driver to
* be voluntarily preempted even if full kernel preemption is
* disabled.
*
* Such preemptible hypercalls are bracketed by
* xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
* calls.
*/
DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
EXPORT_PER_CPU_SYMBOL_GPL(xen_in_preemptible_hcall);
/*
* In case of scheduling the flag must be cleared and restored after
* returning from schedule as the task might move to a different CPU.
*/
static __always_inline bool get_and_clear_inhcall(void)
{
bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
__this_cpu_write(xen_in_preemptible_hcall, false);
return inhcall;
}
static __always_inline void restore_inhcall(bool inhcall)
{
__this_cpu_write(xen_in_preemptible_hcall, inhcall);
}
#else
static __always_inline bool get_and_clear_inhcall(void) { return false; }
static __always_inline void restore_inhcall(bool inhcall) { }
#endif
struct tls_descs {
struct desc_struct desc[3];
};
@ -678,6 +717,36 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_machine_check)
}
#endif
static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
inc_irq_stat(irq_hv_callback_count);
xen_evtchn_do_upcall();
set_irq_regs(old_regs);
}
__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
{
irqentry_state_t state = irqentry_enter(regs);
bool inhcall;
instrumentation_begin();
run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
inhcall = get_and_clear_inhcall();
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
irqentry_exit_cond_resched();
instrumentation_end();
restore_inhcall(inhcall);
} else {
instrumentation_end();
irqentry_exit(regs, state);
}
}
struct trap_array_entry {
void (*orig)(void);
void (*xen)(void);

View File

@ -24,11 +24,13 @@
#include <linux/sched/sysctl.h>
#include <linux/sched/topology.h>
#include <linux/sched/signal.h>
#include <linux/suspend.h>
#include <linux/delay.h>
#include <linux/crash_dump.h>
#include <linux/prefetch.h>
#include <linux/blk-crypto.h>
#include <linux/part_stat.h>
#include <linux/sched/isolation.h>
#include <trace/events/block.h>
@ -279,12 +281,12 @@ void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set)
{
struct request_queue *q;
mutex_lock(&set->tag_list_lock);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
rcu_read_lock();
list_for_each_entry_rcu(q, &set->tag_list, tag_set_list) {
if (!blk_queue_skip_tagset_quiesce(q))
blk_mq_quiesce_queue_nowait(q);
}
mutex_unlock(&set->tag_list_lock);
rcu_read_unlock();
blk_mq_wait_quiesce_done(set);
}
@ -294,12 +296,12 @@ void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set)
{
struct request_queue *q;
mutex_lock(&set->tag_list_lock);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
rcu_read_lock();
list_for_each_entry_rcu(q, &set->tag_list, tag_set_list) {
if (!blk_queue_skip_tagset_quiesce(q))
blk_mq_unquiesce_queue(q);
}
mutex_unlock(&set->tag_list_lock);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset);
@ -2192,6 +2194,15 @@ static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
return cpu;
}
/*
* ->next_cpu is always calculated from hctx->cpumask, so simply use
* it for speeding up the check
*/
static bool blk_mq_hctx_empty_cpumask(struct blk_mq_hw_ctx *hctx)
{
return hctx->next_cpu >= nr_cpu_ids;
}
/*
* It'd be great if the workqueue API had a way to pass
* in a mask and had some smarts for more clever placement.
@ -2203,7 +2214,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
bool tried = false;
int next_cpu = hctx->next_cpu;
if (hctx->queue->nr_hw_queues == 1)
/* Switch to unbound if no allowable CPUs in this hctx */
if (hctx->queue->nr_hw_queues == 1 || blk_mq_hctx_empty_cpumask(hctx))
return WORK_CPU_UNBOUND;
if (--hctx->next_cpu_batch <= 0) {
@ -3534,23 +3546,39 @@ static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
return data.has_rq;
}
static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
struct blk_mq_hw_ctx *hctx)
static bool blk_mq_hctx_has_online_cpu(struct blk_mq_hw_ctx *hctx,
unsigned int this_cpu)
{
if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
return false;
if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
return false;
return true;
enum hctx_type type = hctx->type;
int cpu;
/*
* hctx->cpumask has to rule out isolated CPUs, but userspace still
* might submit IOs on these isolated CPUs, so use the queue map to
* check if all CPUs mapped to this hctx are offline
*/
for_each_online_cpu(cpu) {
struct blk_mq_hw_ctx *h = blk_mq_map_queue_type(hctx->queue,
type, cpu);
if (h != hctx)
continue;
/* this hctx has at least one online CPU */
if (this_cpu != cpu)
return true;
}
return false;
}
static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
{
struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
struct blk_mq_hw_ctx, cpuhp_online);
int ret = 0;
if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
!blk_mq_last_cpu_in_hctx(cpu, hctx))
if (!hctx->nr_ctx || blk_mq_hctx_has_online_cpu(hctx, cpu))
return 0;
/*
@ -3569,12 +3597,40 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
* frozen and there are no requests.
*/
if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
while (blk_mq_hctx_has_requests(hctx))
while (blk_mq_hctx_has_requests(hctx)) {
/*
* The wakeup capable IRQ handler of block device is
* not called during suspend. Skip the loop by checking
* pm_wakeup_pending to prevent the deadlock and improve
* suspend latency.
*/
if (pm_wakeup_pending()) {
clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
ret = -EBUSY;
break;
}
msleep(5);
}
percpu_ref_put(&hctx->queue->q_usage_counter);
}
return 0;
return ret;
}
/*
* Check if one CPU is mapped to the specified hctx
*
* Isolated CPUs have been ruled out from hctx->cpumask, which is supposed
* to be used for scheduling kworker only. For other usage, please call this
* helper for checking if one CPU belongs to the specified hctx
*/
static bool blk_mq_cpu_mapped_to_hctx(unsigned int cpu,
const struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_hw_ctx *mapped_hctx = blk_mq_map_queue_type(hctx->queue,
hctx->type, cpu);
return mapped_hctx == hctx;
}
static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
@ -3582,7 +3638,7 @@ static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
struct blk_mq_hw_ctx, cpuhp_online);
if (cpumask_test_cpu(cpu, hctx->cpumask))
if (blk_mq_cpu_mapped_to_hctx(cpu, hctx))
clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
return 0;
}
@ -3600,7 +3656,7 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
enum hctx_type type;
hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
if (!cpumask_test_cpu(cpu, hctx->cpumask))
if (!blk_mq_cpu_mapped_to_hctx(cpu, hctx))
return 0;
ctx = __blk_mq_get_ctx(hctx->queue, cpu);
@ -4031,6 +4087,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
}
queue_for_each_hw_ctx(q, hctx, i) {
int cpu;
/*
* If no software queues are mapped to this hardware queue,
* disable it and free the request entries.
@ -4057,6 +4115,15 @@ static void blk_mq_map_swqueue(struct request_queue *q)
*/
sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
/*
* Rule out isolated CPUs from hctx->cpumask to avoid
* running block kworker on isolated CPUs
*/
for_each_cpu(cpu, hctx->cpumask) {
if (cpu_is_isolated(cpu))
cpumask_clear_cpu(cpu, hctx->cpumask);
}
/*
* Initialize batch roundrobin counts
*/
@ -4103,7 +4170,7 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
struct blk_mq_tag_set *set = q->tag_set;
mutex_lock(&set->tag_list_lock);
list_del(&q->tag_set_list);
list_del_rcu(&q->tag_set_list);
if (list_is_singular(&set->tag_list)) {
/* just transitioned to unshared */
set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
@ -4111,7 +4178,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
blk_mq_update_tag_set_shared(set, false);
}
mutex_unlock(&set->tag_list_lock);
INIT_LIST_HEAD(&q->tag_set_list);
}
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
@ -4130,7 +4196,7 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
}
if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
queue_set_hctx_shared(q, true);
list_add_tail(&q->tag_set_list, &set->tag_list);
list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
mutex_unlock(&set->tag_list_lock);
}
@ -4387,6 +4453,12 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
/* mark the queue as mq asap */
q->mq_ops = set->ops;
/*
* ->tag_set has to be setup before initialize hctx, which cpuphp
* handler needs it for checking queue mapping
*/
q->tag_set = set;
if (blk_mq_alloc_ctxs(q))
goto err_exit;
@ -4405,8 +4477,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
q->tag_set = set;
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
blk_mq_update_poll_flag(q);

View File

@ -83,7 +83,7 @@ bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
(disk->flags & GENHD_FL_HIDDEN))
return false;
pr_info("%s: detected capacity change from %lld to %lld\n",
pr_info_ratelimited("%s: detected capacity change from %lld to %lld\n",
disk->disk_name, capacity, size);
/*

View File

@ -1211,15 +1211,14 @@ struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
if (unlikely(!areq))
return ERR_PTR(-ENOMEM);
memset(areq, 0, areqlen);
ctx->inflight = true;
areq->areqlen = areqlen;
areq->sk = sk;
areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl;
areq->last_rsgl = NULL;
INIT_LIST_HEAD(&areq->rsgl_list);
areq->tsgl = NULL;
areq->tsgl_entries = 0;
return areq;
}

View File

@ -416,9 +416,8 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
if (!ctx)
return -ENOMEM;
ctx->result = NULL;
memset(ctx, 0, len);
ctx->len = len;
ctx->more = false;
crypto_init_wait(&ctx->wait);
ask->private = ctx;

View File

@ -248,9 +248,8 @@ static int rng_accept_parent(void *private, struct sock *sk)
if (!ctx)
return -ENOMEM;
memset(ctx, 0, len);
ctx->len = len;
ctx->addtl = NULL;
ctx->addtl_len = 0;
/*
* No seeding done at that point -- if multiple accepts are

View File

@ -11,6 +11,7 @@
#include <crypto/public_key.h>
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/overflow.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <keys/system_keyring.h>
@ -151,12 +152,17 @@ struct asymmetric_key_id *asymmetric_key_generate_id(const void *val_1,
size_t len_2)
{
struct asymmetric_key_id *kid;
size_t kid_sz;
size_t len;
kid = kmalloc(sizeof(struct asymmetric_key_id) + len_1 + len_2,
GFP_KERNEL);
if (check_add_overflow(len_1, len_2, &len))
return ERR_PTR(-EOVERFLOW);
if (check_add_overflow(sizeof(struct asymmetric_key_id), len, &kid_sz))
return ERR_PTR(-EOVERFLOW);
kid = kmalloc(kid_sz, GFP_KERNEL);
if (!kid)
return ERR_PTR(-ENOMEM);
kid->len = len_1 + len_2;
kid->len = len;
memcpy(kid->data, val_1, len_1);
memcpy(kid->data + len_1, val_2, len_2);
return kid;

View File

@ -39,7 +39,7 @@ struct authenc_request_ctx {
static void authenc_request_complete(struct aead_request *req, int err)
{
if (err != -EINPROGRESS)
if (err != -EINPROGRESS && err != -EBUSY)
aead_request_complete(req, err);
}
@ -109,27 +109,42 @@ out:
return err;
}
static void authenc_geniv_ahash_done(void *data, int err)
static void authenc_geniv_ahash_finish(struct aead_request *req)
{
struct aead_request *req = data;
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
if (err)
goto out;
scatterwalk_map_and_copy(ahreq->result, req->dst,
req->assoclen + req->cryptlen,
crypto_aead_authsize(authenc), 1);
}
out:
static void authenc_geniv_ahash_done(void *data, int err)
{
struct aead_request *req = data;
if (!err)
authenc_geniv_ahash_finish(req);
aead_request_complete(req, err);
}
static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
/*
* Used when the ahash request was invoked in the async callback context
* of the previous skcipher request. Eat any EINPROGRESS notifications.
*/
static void authenc_geniv_ahash_done2(void *data, int err)
{
struct aead_request *req = data;
if (!err)
authenc_geniv_ahash_finish(req);
authenc_request_complete(req, err);
}
static int crypto_authenc_genicv(struct aead_request *req, unsigned int mask)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
@ -138,6 +153,7 @@ static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
struct crypto_ahash *auth = ctx->auth;
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
unsigned int flags = aead_request_flags(req) & ~mask;
u8 *hash = areq_ctx->tail;
int err;
@ -148,7 +164,8 @@ static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
ahash_request_set_crypt(ahreq, req->dst, hash,
req->assoclen + req->cryptlen);
ahash_request_set_callback(ahreq, flags,
authenc_geniv_ahash_done, req);
mask ? authenc_geniv_ahash_done2 :
authenc_geniv_ahash_done, req);
err = crypto_ahash_digest(ahreq);
if (err)
@ -164,12 +181,11 @@ static void crypto_authenc_encrypt_done(void *data, int err)
{
struct aead_request *areq = data;
if (err)
goto out;
err = crypto_authenc_genicv(areq, 0);
out:
if (err) {
aead_request_complete(areq, err);
return;
}
err = crypto_authenc_genicv(areq, CRYPTO_TFM_REQ_MAY_SLEEP);
authenc_request_complete(areq, err);
}
@ -222,11 +238,18 @@ static int crypto_authenc_encrypt(struct aead_request *req)
if (err)
return err;
return crypto_authenc_genicv(req, aead_request_flags(req));
return crypto_authenc_genicv(req, 0);
}
static void authenc_decrypt_tail_done(void *data, int err)
{
struct aead_request *req = data;
authenc_request_complete(req, err);
}
static int crypto_authenc_decrypt_tail(struct aead_request *req,
unsigned int flags)
unsigned int mask)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
@ -237,6 +260,7 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
struct skcipher_request *skreq = (void *)(areq_ctx->tail +
ictx->reqoff);
unsigned int authsize = crypto_aead_authsize(authenc);
unsigned int flags = aead_request_flags(req) & ~mask;
u8 *ihash = ahreq->result + authsize;
struct scatterlist *src, *dst;
@ -253,7 +277,9 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
skcipher_request_set_tfm(skreq, ctx->enc);
skcipher_request_set_callback(skreq, flags,
req->base.complete, req->base.data);
mask ? authenc_decrypt_tail_done :
req->base.complete,
mask ? req : req->base.data);
skcipher_request_set_crypt(skreq, src, dst,
req->cryptlen - authsize, req->iv);
@ -264,12 +290,11 @@ static void authenc_verify_ahash_done(void *data, int err)
{
struct aead_request *req = data;
if (err)
goto out;
err = crypto_authenc_decrypt_tail(req, 0);
out:
if (err) {
aead_request_complete(req, err);
return;
}
err = crypto_authenc_decrypt_tail(req, CRYPTO_TFM_REQ_MAY_SLEEP);
authenc_request_complete(req, err);
}
@ -299,7 +324,7 @@ static int crypto_authenc_decrypt(struct aead_request *req)
if (err)
return err;
return crypto_authenc_decrypt_tail(req, aead_request_flags(req));
return crypto_authenc_decrypt_tail(req, 0);
}
static int crypto_authenc_init_tfm(struct crypto_aead *tfm)

View File

@ -50,6 +50,7 @@ static int seqiv_aead_encrypt(struct aead_request *req)
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
struct aead_request *subreq = aead_request_ctx(req);
crypto_completion_t compl;
bool unaligned_info;
void *data;
u8 *info;
unsigned int ivsize = 8;
@ -79,8 +80,9 @@ static int seqiv_aead_encrypt(struct aead_request *req)
return err;
}
if (unlikely(!IS_ALIGNED((unsigned long)info,
crypto_aead_alignmask(geniv) + 1))) {
unaligned_info = !IS_ALIGNED((unsigned long)info,
crypto_aead_alignmask(geniv) + 1);
if (unlikely(unaligned_info)) {
info = kmemdup(req->iv, ivsize, req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC);
@ -100,7 +102,7 @@ static int seqiv_aead_encrypt(struct aead_request *req)
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
err = crypto_aead_encrypt(subreq);
if (unlikely(info != req->iv))
if (unlikely(unaligned_info))
seqiv_aead_encrypt_complete2(req, err);
return err;
}

View File

@ -53,7 +53,7 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
struct pcc_data *data;
struct acpi_pcc_info *ctx = handler_context;
struct pcc_mbox_chan *pcc_chan;
static acpi_status ret;
acpi_status ret;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)

View File

@ -169,9 +169,12 @@ acpi_ns_walk_namespace(acpi_object_type type,
if (start_node == ACPI_ROOT_OBJECT) {
start_node = acpi_gbl_root_node;
if (!start_node) {
return_ACPI_STATUS(AE_NO_NAMESPACE);
}
}
/* Avoid walking the namespace if the StartNode is NULL */
if (!start_node) {
return_ACPI_STATUS(AE_NO_NAMESPACE);
}
/* Null child means "get first node" */

View File

@ -22,6 +22,7 @@
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
@ -528,6 +529,7 @@ static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
{
struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
int flags = sync ? MF_ACTION_REQUIRED : 0;
char error_type[120];
bool queued = false;
int sec_sev, i;
char *p;
@ -541,9 +543,8 @@ static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
p = (char *)(err + 1);
for (i = 0; i < err->err_info_num; i++) {
struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p;
bool is_cache = (err_info->type == CPER_ARM_CACHE_ERROR);
bool is_cache = err_info->type & CPER_ARM_CACHE_ERROR;
bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR);
const char *error_type = "unknown error";
/*
* The field (err_info->error_info & BIT(26)) is fixed to set to
@ -557,12 +558,15 @@ static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
continue;
}
if (err_info->type < ARRAY_SIZE(cper_proc_error_type_strs))
error_type = cper_proc_error_type_strs[err_info->type];
cper_bits_to_str(error_type, sizeof(error_type),
FIELD_GET(CPER_ARM_ERR_TYPE_MASK, err_info->type),
cper_proc_error_type_strs,
ARRAY_SIZE(cper_proc_error_type_strs));
pr_warn_ratelimited(FW_WARN GHES_PFX
"Unhandled processor error type: %s\n",
error_type);
"Unhandled processor error type 0x%02x: %s%s\n",
err_info->type, error_type,
(err_info->type & ~CPER_ARM_ERR_TYPE_MASK) ? " with reserved bit(s)" : "");
p += err_info->length;
}

View File

@ -1299,7 +1299,8 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
/* Are any of the regs PCC ?*/
if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg) ||
CPC_IN_PCC(guaranteed_reg)) {
if (pcc_ss_id < 0) {
pr_debug("Invalid pcc_ss_id\n");
return -ENODEV;

View File

@ -54,7 +54,7 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
if (device_declaration && (apic->uid == acpi_id)) {
if (apic->uid == acpi_id && (device_declaration || acpi_id < 255)) {
*apic_id = apic->local_apic_id;
return 0;
}

View File

@ -1370,7 +1370,7 @@ static struct fwnode_handle *acpi_graph_get_next_endpoint(
if (!prev) {
do {
port = fwnode_get_next_child_node(fwnode, port);
port = acpi_get_next_subnode(fwnode, port);
/*
* The names of the port nodes begin with "port@"
* followed by the number of the port node and they also
@ -1388,13 +1388,13 @@ static struct fwnode_handle *acpi_graph_get_next_endpoint(
if (!port)
return NULL;
endpoint = fwnode_get_next_child_node(port, prev);
endpoint = acpi_get_next_subnode(port, prev);
while (!endpoint) {
port = fwnode_get_next_child_node(fwnode, port);
port = acpi_get_next_subnode(fwnode, port);
if (!port)
break;
if (is_acpi_graph_node(port, "port"))
endpoint = fwnode_get_next_child_node(port, NULL);
endpoint = acpi_get_next_subnode(port, NULL);
}
/*
@ -1622,6 +1622,7 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
if (fwnode_property_read_u32(fwnode, "reg", &endpoint->id))
fwnode_property_read_u32(fwnode, "endpoint", &endpoint->id);
fwnode_handle_put(port_fwnode);
return 0;
}

View File

@ -144,6 +144,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn)
if (!dev)
return -EPROBE_DEFER;
ahb = dev_get_drvdata(dev);
put_device(dev);
val = gizmo_readl(ahb, AHB_ARBITRATION_XBAR_CTRL);
val |= AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE;
gizmo_writel(ahb, val, AHB_ARBITRATION_XBAR_CTRL);

View File

@ -1827,16 +1827,18 @@ void pm_runtime_init(struct device *dev)
*/
void pm_runtime_reinit(struct device *dev)
{
if (!pm_runtime_enabled(dev)) {
if (dev->power.runtime_status == RPM_ACTIVE)
pm_runtime_set_suspended(dev);
if (dev->power.irq_safe) {
spin_lock_irq(&dev->power.lock);
dev->power.irq_safe = 0;
spin_unlock_irq(&dev->power.lock);
if (dev->parent)
pm_runtime_put(dev->parent);
}
if (pm_runtime_enabled(dev))
return;
if (dev->power.runtime_status == RPM_ACTIVE)
pm_runtime_set_suspended(dev);
if (dev->power.irq_safe) {
spin_lock_irq(&dev->power.lock);
dev->power.irq_safe = 0;
spin_unlock_irq(&dev->power.lock);
if (dev->parent)
pm_runtime_put(dev->parent);
}
/*
* Clear power.needs_force_resume in case it has been set by

View File

@ -331,7 +331,7 @@ static bool initialized;
* This default is used whenever the current disk size is unknown.
* [Now it is rather a minimum]
*/
#define MAX_DISK_SIZE 4 /* 3984 */
#define MAX_DISK_SIZE (PAGE_SIZE / 1024)
/*
* globals used by 'result()'

View File

@ -912,9 +912,9 @@ static void recv_work(struct work_struct *work)
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
nbd_config_put(nbd);
atomic_dec(&config->recv_threads);
wake_up(&config->recv_wq);
nbd_config_put(nbd);
kfree(args);
}
@ -2147,12 +2147,13 @@ again:
ret = nbd_start_device(nbd);
out:
mutex_unlock(&nbd->config_lock);
if (!ret) {
set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
refcount_inc(&nbd->config_refs);
nbd_connect_reply(info, nbd->index);
}
mutex_unlock(&nbd->config_lock);
nbd_config_put(nbd);
if (put_dev)
nbd_put(nbd);

View File

@ -85,10 +85,14 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
struct bio_vec bvec;
rq_for_each_segment(bvec, req, iter) {
dev_dbg(&dev->sbd.core, "%s:%u: %u sectors from %llu\n",
__func__, __LINE__, bio_sectors(iter.bio),
iter.bio->bi_iter.bi_sector);
if (gather)
memcpy_from_bvec(dev->bounce_buf + offset, &bvec);
else
memcpy_to_bvec(&bvec, dev->bounce_buf + offset);
offset += bvec.bv_len;
}
}

View File

@ -1440,9 +1440,11 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
goto out_alloc;
}
ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
GFP_KERNEL);
if (ret < 0) {
dev->clt_device_id = ida_alloc_max(&index_ida,
(1 << (MINORBITS - RNBD_PART_BITS)) - 1,
GFP_KERNEL);
if (dev->clt_device_id < 0) {
ret = dev->clt_device_id;
pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
pathname, sess->sessname, ret);
goto out_queues;
@ -1451,10 +1453,9 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
dev->pathname = kstrdup(pathname, GFP_KERNEL);
if (!dev->pathname) {
ret = -ENOMEM;
goto out_queues;
goto out_ida;
}
dev->clt_device_id = ret;
dev->sess = sess;
dev->access_mode = access_mode;
dev->nr_poll_queues = nr_poll_queues;
@ -1470,6 +1471,8 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
return dev;
out_ida:
ida_free(&index_ida, dev->clt_device_id);
out_queues:
kfree(dev->hw_queues);
out_alloc:

View File

@ -112,7 +112,7 @@ struct rnbd_clt_dev {
struct rnbd_queue *hw_queues;
u32 device_id;
/* local Idr index - used to track minor number allocations. */
u32 clt_device_id;
int clt_device_id;
struct mutex lock;
enum rnbd_clt_dev_state dev_state;
refcount_t refcount;

View File

@ -1709,7 +1709,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
{
struct ublk_device *ub = cmd->file->private_data;
struct ublk_queue *ubq;
struct ublk_io *io;
struct ublk_io *io = NULL;
u32 cmd_op = cmd->cmd_op;
unsigned tag = ub_cmd->tag;
int ret = -EINVAL;
@ -1821,10 +1821,9 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
return -EIOCBQUEUED;
out:
io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
__func__, cmd_op, tag, ret, io->flags);
return -EIOCBQUEUED;
__func__, cmd_op, tag, ret, io ? io->flags : 0);
return ret;
}
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
@ -1857,7 +1856,8 @@ fail_put:
return NULL;
}
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
/*
* Not necessary for async retry, but let's keep it simple and always
@ -1871,9 +1871,31 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
.addr = READ_ONCE(ub_src->addr)
};
WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED);
return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
}
static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
int ret = ublk_ch_uring_cmd_local(cmd, issue_flags);
if (ret != -EIOCBQUEUED)
io_uring_cmd_done(cmd, ret, 0, issue_flags);
}
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
/* well-implemented server won't run into unlocked */
if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
io_uring_cmd_complete_in_task(cmd, ublk_ch_uring_cmd_cb);
return -EIOCBQUEUED;
}
return ublk_ch_uring_cmd_local(cmd, issue_flags);
}
static inline bool ublk_check_ubuf_dir(const struct request *req,
int ubuf_dir)
{
@ -2928,10 +2950,9 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
if (ub)
ublk_put_device(ub);
out:
io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
__func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
return -EIOCBQUEUED;
return ret;
}
static const struct file_operations ublk_ctl_fops = {

View File

@ -49,7 +49,7 @@
#define RTL_CHIP_SUBVER (&(struct rtl_vendor_cmd) {{0x10, 0x38, 0x04, 0x28, 0x80}})
#define RTL_CHIP_REV (&(struct rtl_vendor_cmd) {{0x10, 0x3A, 0x04, 0x28, 0x80}})
#define RTL_SEC_PROJ (&(struct rtl_vendor_cmd) {{0x10, 0xA4, 0x0D, 0x00, 0xb0}})
#define RTL_SEC_PROJ (&(struct rtl_vendor_cmd) {{0x10, 0xA4, 0xAD, 0x00, 0xb0}})
#define RTL_PATCH_SNIPPETS 0x01
#define RTL_PATCH_DUMMY_HEADER 0x02
@ -513,7 +513,6 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
{
struct rtl_epatch_header_v2 *hdr;
int rc;
u8 reg_val[2];
u8 key_id;
u32 num_sections;
struct rtl_section *section;
@ -528,14 +527,7 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
.len = btrtl_dev->fw_len - 7, /* Cut the tail */
};
rc = btrtl_vendor_read_reg16(hdev, RTL_SEC_PROJ, reg_val);
if (rc < 0)
return -EIO;
key_id = reg_val[0];
rtl_dev_dbg(hdev, "%s: key id %u", __func__, key_id);
btrtl_dev->key_id = key_id;
key_id = btrtl_dev->key_id;
hdr = rtl_iov_pull_data(&iov, sizeof(*hdr));
if (!hdr)
@ -1049,6 +1041,8 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
u16 hci_rev, lmp_subver;
u8 hci_ver, lmp_ver, chip_type = 0;
int ret;
int rc;
u8 key_id;
u8 reg_val[2];
btrtl_dev = kzalloc(sizeof(*btrtl_dev), GFP_KERNEL);
@ -1159,6 +1153,14 @@ next:
goto err_free;
}
rc = btrtl_vendor_read_reg16(hdev, RTL_SEC_PROJ, reg_val);
if (rc < 0)
goto err_free;
key_id = reg_val[0];
btrtl_dev->key_id = key_id;
rtl_dev_info(hdev, "%s: key id %u", __func__, key_id);
btrtl_dev->fw_len = -EIO;
if (lmp_subver == RTL_ROM_LMP_8852A && hci_rev == 0x000c) {
snprintf(fw_name, sizeof(fw_name), "%s_v2.bin",
@ -1181,7 +1183,7 @@ next:
goto err_free;
}
if (btrtl_dev->ic_info->cfg_name) {
if (btrtl_dev->ic_info->cfg_name && !btrtl_dev->key_id) {
if (postfix) {
snprintf(cfg_name, sizeof(cfg_name), "%s-%s.bin",
btrtl_dev->ic_info->cfg_name, postfix);

Some files were not shown because too many files have changed in this diff Show More