This is the 6.1.135 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmgLS1sACgkQONu9yGCS
 aT64dRAAuNws/uf11y3lncjb9gzPE0MZOuTnCrcuqxiSFYatngH3lqFhtqW6o3n/
 4izvRlH245yQsRitFZBiu06I1XiluTc7vPxCLQJuI3pa4W2DpGLv7FilFhmRVrrS
 7ehjlLD5JcHudmscV2LK7Gru1ClQwRH2eBOndNA2bVijWCXPM9ohE88ovPeogmVh
 oOnwkPWcsrsVRocTdu4SrjCGL9UZTv7QDPurEC81LHLtoIB8vK11QYsW4zf9rhDa
 TpSOGtkSSFSpuT/ZXhYesBdDwYibeC+drb2WszSPaSpFAXDuWddnOFVbSCe/im2e
 f/MhPDBfZ+c871sHWGUGCJA3otNOapO7STGD3G0dsKbS2stxmFU+HBnmY5v3WKEC
 lRwnE2OVZ6FrQ3q/aziYfyv1W6MdY2hZSUaHb77YiYUwEDDJjGYviHNtJCFP4VbD
 +wnTjI9WTH6LMM44h/XpAYDnMMPC5uou77GLir3l5hSYpjj5LrkphX+VYsobs6rB
 ShXUAux4go/+SQKETerw7M7mhnso7ghKZ7Clr87aginYYuZLTnAzwRi+1ZUbHqSd
 AjLIcXCR52qM9qE7PO4r1RkCqrgsPX1pgxZOyvfRoe/aA3iyvF1LaZS7nGKMu9g5
 5T/nnDY+BDdVmK9peXunL/2Qtafl9kwKVJ1AT+NAwTwLYR0L4AM=
 =kLlg
 -----END PGP SIGNATURE-----

Merge tag 'v6.1.135' into v6.1/standard/base

This is the 6.1.135 stable release

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmgLS1sACgkQONu9yGCS
# aT64dRAAuNws/uf11y3lncjb9gzPE0MZOuTnCrcuqxiSFYatngH3lqFhtqW6o3n/
# 4izvRlH245yQsRitFZBiu06I1XiluTc7vPxCLQJuI3pa4W2DpGLv7FilFhmRVrrS
# 7ehjlLD5JcHudmscV2LK7Gru1ClQwRH2eBOndNA2bVijWCXPM9ohE88ovPeogmVh
# oOnwkPWcsrsVRocTdu4SrjCGL9UZTv7QDPurEC81LHLtoIB8vK11QYsW4zf9rhDa
# TpSOGtkSSFSpuT/ZXhYesBdDwYibeC+drb2WszSPaSpFAXDuWddnOFVbSCe/im2e
# f/MhPDBfZ+c871sHWGUGCJA3otNOapO7STGD3G0dsKbS2stxmFU+HBnmY5v3WKEC
# lRwnE2OVZ6FrQ3q/aziYfyv1W6MdY2hZSUaHb77YiYUwEDDJjGYviHNtJCFP4VbD
# +wnTjI9WTH6LMM44h/XpAYDnMMPC5uou77GLir3l5hSYpjj5LrkphX+VYsobs6rB
# ShXUAux4go/+SQKETerw7M7mhnso7ghKZ7Clr87aginYYuZLTnAzwRi+1ZUbHqSd
# AjLIcXCR52qM9qE7PO4r1RkCqrgsPX1pgxZOyvfRoe/aA3iyvF1LaZS7nGKMu9g5
# 5T/nnDY+BDdVmK9peXunL/2Qtafl9kwKVJ1AT+NAwTwLYR0L4AM=
# =kLlg
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 25 Apr 2025 04:44:11 AM EDT
# gpg:                using RSA key 647F28654894E3BD457199BE38DBBDC86092693E
# gpg: Can't check signature: No public key
This commit is contained in:
Bruce Ashfield 2025-05-01 22:49:06 -04:00
commit ee3c71357c
279 changed files with 2906 additions and 1418 deletions

View File

@ -4846,6 +4846,7 @@ S: Maintained
F: Documentation/admin-guide/module-signing.rst F: Documentation/admin-guide/module-signing.rst
F: certs/ F: certs/
F: scripts/sign-file.c F: scripts/sign-file.c
F: scripts/ssl-common.h
F: tools/certs/ F: tools/certs/
CFAG12864B LCD DRIVER CFAG12864B LCD DRIVER

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 6 VERSION = 6
PATCHLEVEL = 1 PATCHLEVEL = 1
SUBLEVEL = 134 SUBLEVEL = 135
EXTRAVERSION = EXTRAVERSION =
NAME = Curry Ramen NAME = Curry Ramen
@ -1075,6 +1075,9 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
# Require designated initializers for all marked structures # Require designated initializers for all marked structures
KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
# Ensure compilers do not transform certain loops into calls to wcslen()
KBUILD_CFLAGS += -fno-builtin-wcslen
# change __FILE__ to the relative path from the srctree # change __FILE__ to the relative path from the srctree
KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)

View File

@ -1247,8 +1247,7 @@
}; };
pwm0: pwm@1401e000 { pwm0: pwm@1401e000 {
compatible = "mediatek,mt8173-disp-pwm", compatible = "mediatek,mt8173-disp-pwm";
"mediatek,mt6595-disp-pwm";
reg = <0 0x1401e000 0 0x1000>; reg = <0 0x1401e000 0 0x1000>;
#pwm-cells = <2>; #pwm-cells = <2>;
clocks = <&mmsys CLK_MM_DISP_PWM026M>, clocks = <&mmsys CLK_MM_DISP_PWM026M>,
@ -1258,8 +1257,7 @@
}; };
pwm1: pwm@1401f000 { pwm1: pwm@1401f000 {
compatible = "mediatek,mt8173-disp-pwm", compatible = "mediatek,mt8173-disp-pwm";
"mediatek,mt6595-disp-pwm";
reg = <0 0x1401f000 0 0x1000>; reg = <0 0x1401f000 0 0x1000>;
#pwm-cells = <2>; #pwm-cells = <2>;
clocks = <&mmsys CLK_MM_DISP_PWM126M>, clocks = <&mmsys CLK_MM_DISP_PWM126M>,

View File

@ -75,6 +75,7 @@
#define ARM_CPU_PART_CORTEX_A76 0xD0B #define ARM_CPU_PART_CORTEX_A76 0xD0B
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C #define ARM_CPU_PART_NEOVERSE_N1 0xD0C
#define ARM_CPU_PART_CORTEX_A77 0xD0D #define ARM_CPU_PART_CORTEX_A77 0xD0D
#define ARM_CPU_PART_CORTEX_A76AE 0xD0E
#define ARM_CPU_PART_NEOVERSE_V1 0xD40 #define ARM_CPU_PART_NEOVERSE_V1 0xD40
#define ARM_CPU_PART_CORTEX_A78 0xD41 #define ARM_CPU_PART_CORTEX_A78 0xD41
#define ARM_CPU_PART_CORTEX_A78AE 0xD42 #define ARM_CPU_PART_CORTEX_A78AE 0xD42
@ -119,6 +120,7 @@
#define QCOM_CPU_PART_KRYO 0x200 #define QCOM_CPU_PART_KRYO 0x200
#define QCOM_CPU_PART_KRYO_2XX_GOLD 0x800 #define QCOM_CPU_PART_KRYO_2XX_GOLD 0x800
#define QCOM_CPU_PART_KRYO_2XX_SILVER 0x801 #define QCOM_CPU_PART_KRYO_2XX_SILVER 0x801
#define QCOM_CPU_PART_KRYO_3XX_GOLD 0x802
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803 #define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804 #define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805 #define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
@ -151,6 +153,7 @@
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1) #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77) #define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
#define MIDR_CORTEX_A76AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76AE)
#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1) #define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78) #define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE) #define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
@ -188,6 +191,7 @@
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#define MIDR_QCOM_KRYO_2XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_GOLD) #define MIDR_QCOM_KRYO_2XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_GOLD)
#define MIDR_QCOM_KRYO_2XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_SILVER) #define MIDR_QCOM_KRYO_2XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_SILVER)
#define MIDR_QCOM_KRYO_3XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_GOLD)
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER) #define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD) #define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER) #define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)

View File

@ -56,11 +56,13 @@ extern void fpsimd_signal_preserve_current_state(void);
extern void fpsimd_preserve_current_state(void); extern void fpsimd_preserve_current_state(void);
extern void fpsimd_restore_current_state(void); extern void fpsimd_restore_current_state(void);
extern void fpsimd_update_current_state(struct user_fpsimd_state const *state); extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
extern void fpsimd_kvm_prepare(void);
extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state, extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
void *sve_state, unsigned int sve_vl, void *sve_state, unsigned int sve_vl,
void *za_state, unsigned int sme_vl, void *za_state, unsigned int sme_vl,
u64 *svcr); u64 *svcr, enum fp_type *type,
enum fp_type to_save);
extern void fpsimd_flush_task_state(struct task_struct *target); extern void fpsimd_flush_task_state(struct task_struct *target);
extern void fpsimd_save_and_flush_cpu_state(void); extern void fpsimd_save_and_flush_cpu_state(void);

View File

@ -67,6 +67,7 @@ enum kvm_mode kvm_get_mode(void);
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
extern unsigned int kvm_sve_max_vl; extern unsigned int kvm_sve_max_vl;
extern unsigned int kvm_host_sve_max_vl;
int kvm_arm_init_sve(void); int kvm_arm_init_sve(void);
u32 __attribute_const__ kvm_target_cpu(void); u32 __attribute_const__ kvm_target_cpu(void);
@ -309,8 +310,18 @@ struct vcpu_reset_state {
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt; struct kvm_cpu_context ctxt;
/* Guest floating point state */ /*
* Guest floating point state
*
* The architecture has two main floating point extensions,
* the original FPSIMD and SVE. These have overlapping
* register views, with the FPSIMD V registers occupying the
* low 128 bits of the SVE Z registers. When the core
* floating point code saves the register state of a task it
* records which view it saved in fp_type.
*/
void *sve_state; void *sve_state;
enum fp_type fp_type;
unsigned int sve_max_vl; unsigned int sve_max_vl;
u64 svcr; u64 svcr;
@ -320,7 +331,6 @@ struct kvm_vcpu_arch {
/* Values of trap registers for the guest. */ /* Values of trap registers for the guest. */
u64 hcr_el2; u64 hcr_el2;
u64 mdcr_el2; u64 mdcr_el2;
u64 cptr_el2;
/* Values of trap registers for the host before guest entry. */ /* Values of trap registers for the host before guest entry. */
u64 mdcr_el2_host; u64 mdcr_el2_host;
@ -370,7 +380,6 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch vcpu_debug_state; struct kvm_guest_debug_arch vcpu_debug_state;
struct kvm_guest_debug_arch external_debug_state; struct kvm_guest_debug_arch external_debug_state;
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
struct task_struct *parent_task; struct task_struct *parent_task;
struct { struct {
@ -547,10 +556,6 @@ struct kvm_vcpu_arch {
/* Save TRBE context if active */ /* Save TRBE context if active */
#define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6)) #define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
/* SVE enabled for host EL0 */
#define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0))
/* SME enabled for EL0 */
#define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1))
/* Physical CPU not in supported_cpus */ /* Physical CPU not in supported_cpus */
#define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2)) #define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2))
/* WFIT instruction trapped */ /* WFIT instruction trapped */

View File

@ -122,5 +122,6 @@ extern u64 kvm_nvhe_sym(id_aa64isar2_el1_sys_val);
extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val); extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val); extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val); extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val);
extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
#endif /* __ARM64_KVM_HYP_H__ */ #endif /* __ARM64_KVM_HYP_H__ */

View File

@ -122,6 +122,12 @@ enum vec_type {
ARM64_VEC_MAX, ARM64_VEC_MAX,
}; };
enum fp_type {
FP_STATE_CURRENT, /* Save based on current task state. */
FP_STATE_FPSIMD,
FP_STATE_SVE,
};
struct cpu_context { struct cpu_context {
unsigned long x19; unsigned long x19;
unsigned long x20; unsigned long x20;
@ -152,6 +158,7 @@ struct thread_struct {
struct user_fpsimd_state fpsimd_state; struct user_fpsimd_state fpsimd_state;
} uw; } uw;
enum fp_type fp_type; /* registers FPSIMD or SVE? */
unsigned int fpsimd_cpu; unsigned int fpsimd_cpu;
void *sve_state; /* SVE registers, if any */ void *sve_state; /* SVE registers, if any */
void *za_state; /* ZA register, if any */ void *za_state; /* ZA register, if any */

View File

@ -96,7 +96,6 @@ enum mitigation_state arm64_get_meltdown_state(void);
enum mitigation_state arm64_get_spectre_bhb_state(void); enum mitigation_state arm64_get_spectre_bhb_state(void);
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope); bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
u8 spectre_bhb_loop_affected(int scope);
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_SPECTRE_H */ #endif /* __ASM_SPECTRE_H */

View File

@ -125,6 +125,8 @@ struct fpsimd_last_state_struct {
u64 *svcr; u64 *svcr;
unsigned int sve_vl; unsigned int sve_vl;
unsigned int sme_vl; unsigned int sme_vl;
enum fp_type *fp_type;
enum fp_type to_save;
}; };
static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state); static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
@ -330,15 +332,6 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
* The task can execute SVE instructions while in userspace without * The task can execute SVE instructions while in userspace without
* trapping to the kernel. * trapping to the kernel.
* *
* When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
* corresponding Zn), P0-P15 and FFR are encoded in
* task->thread.sve_state, formatted appropriately for vector
* length task->thread.sve_vl or, if SVCR.SM is set,
* task->thread.sme_vl.
*
* task->thread.sve_state must point to a valid buffer at least
* sve_state_size(task) bytes in size.
*
* During any syscall, the kernel may optionally clear TIF_SVE and * During any syscall, the kernel may optionally clear TIF_SVE and
* discard the vector state except for the FPSIMD subset. * discard the vector state except for the FPSIMD subset.
* *
@ -348,7 +341,15 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
* do_sve_acc() to be called, which does some preparation and then * do_sve_acc() to be called, which does some preparation and then
* sets TIF_SVE. * sets TIF_SVE.
* *
* When stored, FPSIMD registers V0-V31 are encoded in * During any syscall, the kernel may optionally clear TIF_SVE and
* discard the vector state except for the FPSIMD subset.
*
* The data will be stored in one of two formats:
*
* * FPSIMD only - FP_STATE_FPSIMD:
*
* When the FPSIMD only state stored task->thread.fp_type is set to
* FP_STATE_FPSIMD, the FPSIMD registers V0-V31 are encoded in
* task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are * task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
* logically zero but not stored anywhere; P0-P15 and FFR are not * logically zero but not stored anywhere; P0-P15 and FFR are not
* stored and have unspecified values from userspace's point of * stored and have unspecified values from userspace's point of
@ -356,7 +357,23 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
* but userspace is discouraged from relying on this. * but userspace is discouraged from relying on this.
* *
* task->thread.sve_state does not need to be non-NULL, valid or any * task->thread.sve_state does not need to be non-NULL, valid or any
* particular size: it must not be dereferenced. * particular size: it must not be dereferenced and any data stored
* there should be considered stale and not referenced.
*
* * SVE state - FP_STATE_SVE:
*
* When the full SVE state is stored task->thread.fp_type is set to
* FP_STATE_SVE and Z0-Z31 (incorporating Vn in bits[127:0] or the
* corresponding Zn), P0-P15 and FFR are encoded in in
* task->thread.sve_state, formatted appropriately for vector
* length task->thread.sve_vl or, if SVCR.SM is set,
* task->thread.sme_vl. The storage for the vector registers in
* task->thread.uw.fpsimd_state should be ignored.
*
* task->thread.sve_state must point to a valid buffer at least
* sve_state_size(task) bytes in size. The data stored in
* task->thread.uw.fpsimd_state.vregs should be considered stale
* and not referenced.
* *
* * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
* irrespective of whether TIF_SVE is clear or set, since these are * irrespective of whether TIF_SVE is clear or set, since these are
@ -404,12 +421,15 @@ static void task_fpsimd_load(void)
} }
} }
if (restore_sve_regs) if (restore_sve_regs) {
WARN_ON_ONCE(current->thread.fp_type != FP_STATE_SVE);
sve_load_state(sve_pffr(&current->thread), sve_load_state(sve_pffr(&current->thread),
&current->thread.uw.fpsimd_state.fpsr, &current->thread.uw.fpsimd_state.fpsr,
restore_ffr); restore_ffr);
else } else {
WARN_ON_ONCE(current->thread.fp_type != FP_STATE_FPSIMD);
fpsimd_load_state(&current->thread.uw.fpsimd_state); fpsimd_load_state(&current->thread.uw.fpsimd_state);
}
} }
/* /*
@ -419,8 +439,8 @@ static void task_fpsimd_load(void)
* last, if KVM is involved this may be the guest VM context rather * last, if KVM is involved this may be the guest VM context rather
* than the host thread for the VM pointed to by current. This means * than the host thread for the VM pointed to by current. This means
* that we must always reference the state storage via last rather * that we must always reference the state storage via last rather
* than via current, other than the TIF_ flags which KVM will * than via current, if we are saving KVM state then it will have
* carefully maintain for us. * ensured that the type of registers to save is set in last->to_save.
*/ */
static void fpsimd_save(void) static void fpsimd_save(void)
{ {
@ -437,7 +457,8 @@ static void fpsimd_save(void)
if (test_thread_flag(TIF_FOREIGN_FPSTATE)) if (test_thread_flag(TIF_FOREIGN_FPSTATE))
return; return;
if (test_thread_flag(TIF_SVE)) { if ((last->to_save == FP_STATE_CURRENT && test_thread_flag(TIF_SVE)) ||
last->to_save == FP_STATE_SVE) {
save_sve_regs = true; save_sve_regs = true;
save_ffr = true; save_ffr = true;
vl = last->sve_vl; vl = last->sve_vl;
@ -474,8 +495,10 @@ static void fpsimd_save(void)
sve_save_state((char *)last->sve_state + sve_save_state((char *)last->sve_state +
sve_ffr_offset(vl), sve_ffr_offset(vl),
&last->st->fpsr, save_ffr); &last->st->fpsr, save_ffr);
*last->fp_type = FP_STATE_SVE;
} else { } else {
fpsimd_save_state(last->st); fpsimd_save_state(last->st);
*last->fp_type = FP_STATE_FPSIMD;
} }
} }
@ -851,8 +874,10 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
fpsimd_flush_task_state(task); fpsimd_flush_task_state(task);
if (test_and_clear_tsk_thread_flag(task, TIF_SVE) || if (test_and_clear_tsk_thread_flag(task, TIF_SVE) ||
thread_sm_enabled(&task->thread)) thread_sm_enabled(&task->thread)) {
sve_to_fpsimd(task); sve_to_fpsimd(task);
task->thread.fp_type = FP_STATE_FPSIMD;
}
if (system_supports_sme()) { if (system_supports_sme()) {
if (type == ARM64_VEC_SME || if (type == ARM64_VEC_SME ||
@ -1383,6 +1408,7 @@ static void sve_init_regs(void)
fpsimd_bind_task_to_cpu(); fpsimd_bind_task_to_cpu();
} else { } else {
fpsimd_to_sve(current); fpsimd_to_sve(current);
current->thread.fp_type = FP_STATE_SVE;
fpsimd_flush_task_state(current); fpsimd_flush_task_state(current);
} }
} }
@ -1612,6 +1638,8 @@ void fpsimd_flush_thread(void)
current->thread.svcr = 0; current->thread.svcr = 0;
} }
current->thread.fp_type = FP_STATE_FPSIMD;
put_cpu_fpsimd_context(); put_cpu_fpsimd_context();
kfree(sve_state); kfree(sve_state);
kfree(za_state); kfree(za_state);
@ -1660,6 +1688,8 @@ static void fpsimd_bind_task_to_cpu(void)
last->sve_vl = task_get_sve_vl(current); last->sve_vl = task_get_sve_vl(current);
last->sme_vl = task_get_sme_vl(current); last->sme_vl = task_get_sme_vl(current);
last->svcr = &current->thread.svcr; last->svcr = &current->thread.svcr;
last->fp_type = &current->thread.fp_type;
last->to_save = FP_STATE_CURRENT;
current->thread.fpsimd_cpu = smp_processor_id(); current->thread.fpsimd_cpu = smp_processor_id();
/* /*
@ -1683,7 +1713,8 @@ static void fpsimd_bind_task_to_cpu(void)
void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
unsigned int sve_vl, void *za_state, unsigned int sve_vl, void *za_state,
unsigned int sme_vl, u64 *svcr) unsigned int sme_vl, u64 *svcr,
enum fp_type *type, enum fp_type to_save)
{ {
struct fpsimd_last_state_struct *last = struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state); this_cpu_ptr(&fpsimd_last_state);
@ -1697,6 +1728,8 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
last->za_state = za_state; last->za_state = za_state;
last->sve_vl = sve_vl; last->sve_vl = sve_vl;
last->sme_vl = sme_vl; last->sme_vl = sme_vl;
last->fp_type = type;
last->to_save = to_save;
} }
/* /*

View File

@ -331,6 +331,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
clear_tsk_thread_flag(dst, TIF_SME); clear_tsk_thread_flag(dst, TIF_SME);
} }
dst->thread.fp_type = FP_STATE_FPSIMD;
/* clear any pending asynchronous tag fault raised by the parent */ /* clear any pending asynchronous tag fault raised by the parent */
clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT); clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);

View File

@ -857,52 +857,86 @@ static unsigned long system_bhb_mitigations;
* This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
* SCOPE_SYSTEM call will give the right answer. * SCOPE_SYSTEM call will give the right answer.
*/ */
u8 spectre_bhb_loop_affected(int scope) static bool is_spectre_bhb_safe(int scope)
{
static const struct midr_range spectre_bhb_safe_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A520),
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{},
};
static bool all_safe = true;
if (scope != SCOPE_LOCAL_CPU)
return all_safe;
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_safe_list))
return true;
all_safe = false;
return false;
}
static u8 spectre_bhb_loop_affected(void)
{ {
u8 k = 0; u8 k = 0;
static u8 max_bhb_k;
if (scope == SCOPE_LOCAL_CPU) { static const struct midr_range spectre_bhb_k132_list[] = {
static const struct midr_range spectre_bhb_k32_list[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE), };
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), static const struct midr_range spectre_bhb_k38_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), };
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), static const struct midr_range spectre_bhb_k32_list[] = {
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
{}, MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
}; MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
static const struct midr_range spectre_bhb_k24_list[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
{}, MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
}; {},
static const struct midr_range spectre_bhb_k11_list[] = { };
MIDR_ALL_VERSIONS(MIDR_AMPERE1), static const struct midr_range spectre_bhb_k24_list[] = {
{}, MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
}; MIDR_ALL_VERSIONS(MIDR_CORTEX_A76AE),
static const struct midr_range spectre_bhb_k8_list[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
{}, {},
}; };
static const struct midr_range spectre_bhb_k11_list[] = {
MIDR_ALL_VERSIONS(MIDR_AMPERE1),
{},
};
static const struct midr_range spectre_bhb_k8_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
{},
};
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list)) if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k132_list))
k = 32; k = 132;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k38_list))
k = 24; k = 38;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list)) else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
k = 11; k = 32;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list)) else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
k = 8; k = 24;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
max_bhb_k = max(max_bhb_k, k); k = 11;
} else { else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
k = max_bhb_k; k = 8;
}
return k; return k;
} }
@ -928,29 +962,13 @@ static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
} }
} }
static bool is_spectre_bhb_fw_affected(int scope) static bool has_spectre_bhb_fw_mitigation(void)
{ {
static bool system_affected;
enum mitigation_state fw_state; enum mitigation_state fw_state;
bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE; bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
{},
};
bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
spectre_bhb_firmware_mitigated_list);
if (scope != SCOPE_LOCAL_CPU)
return system_affected;
fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) { return has_smccc && fw_state == SPECTRE_MITIGATED;
system_affected = true;
return true;
}
return false;
} }
static bool supports_ecbhb(int scope) static bool supports_ecbhb(int scope)
@ -966,6 +984,8 @@ static bool supports_ecbhb(int scope)
ID_AA64MMFR1_EL1_ECBHB_SHIFT); ID_AA64MMFR1_EL1_ECBHB_SHIFT);
} }
static u8 max_bhb_k;
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
int scope) int scope)
{ {
@ -974,16 +994,18 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
if (supports_csv2p3(scope)) if (supports_csv2p3(scope))
return false; return false;
if (supports_clearbhb(scope)) if (is_spectre_bhb_safe(scope))
return true; return false;
if (spectre_bhb_loop_affected(scope)) /*
return true; * At this point the core isn't known to be "safe" so we're going to
* assume it's vulnerable. We still need to update `max_bhb_k` though,
* but only if we aren't mitigating with clearbhb though.
*/
if (scope == SCOPE_LOCAL_CPU && !supports_clearbhb(SCOPE_LOCAL_CPU))
max_bhb_k = max(max_bhb_k, spectre_bhb_loop_affected());
if (is_spectre_bhb_fw_affected(scope)) return true;
return true;
return false;
} }
static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
@ -1017,7 +1039,7 @@ early_param("nospectre_bhb", parse_spectre_bhb_param);
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
{ {
bp_hardening_cb_t cpu_cb; bp_hardening_cb_t cpu_cb;
enum mitigation_state fw_state, state = SPECTRE_VULNERABLE; enum mitigation_state state = SPECTRE_VULNERABLE;
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU)) if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
@ -1043,7 +1065,7 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN); this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
state = SPECTRE_MITIGATED; state = SPECTRE_MITIGATED;
set_bit(BHB_INSN, &system_bhb_mitigations); set_bit(BHB_INSN, &system_bhb_mitigations);
} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) { } else if (spectre_bhb_loop_affected()) {
/* /*
* Ensure KVM uses the indirect vector which will have the * Ensure KVM uses the indirect vector which will have the
* branchy-loop added. A57/A72-r0 will already have selected * branchy-loop added. A57/A72-r0 will already have selected
@ -1056,32 +1078,29 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP); this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
state = SPECTRE_MITIGATED; state = SPECTRE_MITIGATED;
set_bit(BHB_LOOP, &system_bhb_mitigations); set_bit(BHB_LOOP, &system_bhb_mitigations);
} else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) { } else if (has_spectre_bhb_fw_mitigation()) {
fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); /*
if (fw_state == SPECTRE_MITIGATED) { * Ensure KVM uses one of the spectre bp_hardening
/* * vectors. The indirect vector doesn't include the EL3
* Ensure KVM uses one of the spectre bp_hardening * call, so needs upgrading to
* vectors. The indirect vector doesn't include the EL3 * HYP_VECTOR_SPECTRE_INDIRECT.
* call, so needs upgrading to */
* HYP_VECTOR_SPECTRE_INDIRECT. if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
*/ data->slot += 1;
if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
data->slot += 1;
this_cpu_set_vectors(EL1_VECTOR_BHB_FW); this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
/* /*
* The WA3 call in the vectors supersedes the WA1 call * The WA3 call in the vectors supersedes the WA1 call
* made during context-switch. Uninstall any firmware * made during context-switch. Uninstall any firmware
* bp_hardening callback. * bp_hardening callback.
*/ */
cpu_cb = spectre_v2_get_sw_mitigation_cb(); cpu_cb = spectre_v2_get_sw_mitigation_cb();
if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb) if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
__this_cpu_write(bp_hardening_data.fn, NULL); __this_cpu_write(bp_hardening_data.fn, NULL);
state = SPECTRE_MITIGATED; state = SPECTRE_MITIGATED;
set_bit(BHB_FW, &system_bhb_mitigations); set_bit(BHB_FW, &system_bhb_mitigations);
}
} }
update_mitigation_state(&spectre_bhb_state, state); update_mitigation_state(&spectre_bhb_state, state);
@ -1115,7 +1134,6 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
{ {
u8 rd; u8 rd;
u32 insn; u32 insn;
u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
BUG_ON(nr_inst != 1); /* MOV -> MOV */ BUG_ON(nr_inst != 1); /* MOV -> MOV */
@ -1124,7 +1142,7 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
insn = le32_to_cpu(*origptr); insn = le32_to_cpu(*origptr);
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn); rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
insn = aarch64_insn_gen_movewide(rd, loop_count, 0, insn = aarch64_insn_gen_movewide(rd, max_bhb_k, 0,
AARCH64_INSN_VARIANT_64BIT, AARCH64_INSN_VARIANT_64BIT,
AARCH64_INSN_MOVEWIDE_ZERO); AARCH64_INSN_MOVEWIDE_ZERO);
*updptr++ = cpu_to_le32(insn); *updptr++ = cpu_to_le32(insn);

View File

@ -917,6 +917,7 @@ static int sve_set_common(struct task_struct *target,
clear_tsk_thread_flag(target, TIF_SVE); clear_tsk_thread_flag(target, TIF_SVE);
if (type == ARM64_VEC_SME) if (type == ARM64_VEC_SME)
fpsimd_force_sync_to_sve(target); fpsimd_force_sync_to_sve(target);
target->thread.fp_type = FP_STATE_FPSIMD;
goto out; goto out;
} }
@ -939,6 +940,7 @@ static int sve_set_common(struct task_struct *target,
if (!target->thread.sve_state) { if (!target->thread.sve_state) {
ret = -ENOMEM; ret = -ENOMEM;
clear_tsk_thread_flag(target, TIF_SVE); clear_tsk_thread_flag(target, TIF_SVE);
target->thread.fp_type = FP_STATE_FPSIMD;
goto out; goto out;
} }
@ -952,6 +954,7 @@ static int sve_set_common(struct task_struct *target,
fpsimd_sync_to_sve(target); fpsimd_sync_to_sve(target);
if (type == ARM64_VEC_SVE) if (type == ARM64_VEC_SVE)
set_tsk_thread_flag(target, TIF_SVE); set_tsk_thread_flag(target, TIF_SVE);
target->thread.fp_type = FP_STATE_SVE;
BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
start = SVE_PT_SVE_OFFSET; start = SVE_PT_SVE_OFFSET;

View File

@ -207,6 +207,7 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
__get_user_error(fpsimd.fpcr, &ctx->fpcr, err); __get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
clear_thread_flag(TIF_SVE); clear_thread_flag(TIF_SVE);
current->thread.fp_type = FP_STATE_FPSIMD;
/* load the hardware registers from the fpsimd_state structure */ /* load the hardware registers from the fpsimd_state structure */
if (!err) if (!err)
@ -297,6 +298,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
if (sve.head.size <= sizeof(*user->sve)) { if (sve.head.size <= sizeof(*user->sve)) {
clear_thread_flag(TIF_SVE); clear_thread_flag(TIF_SVE);
current->thread.svcr &= ~SVCR_SM_MASK; current->thread.svcr &= ~SVCR_SM_MASK;
current->thread.fp_type = FP_STATE_FPSIMD;
goto fpsimd_only; goto fpsimd_only;
} }
@ -332,6 +334,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
current->thread.svcr |= SVCR_SM_MASK; current->thread.svcr |= SVCR_SM_MASK;
else else
set_thread_flag(TIF_SVE); set_thread_flag(TIF_SVE);
current->thread.fp_type = FP_STATE_SVE;
fpsimd_only: fpsimd_only:
/* copy the FP and status/control registers */ /* copy the FP and status/control registers */
@ -937,9 +940,11 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
* FPSIMD register state - flush the saved FPSIMD * FPSIMD register state - flush the saved FPSIMD
* register state in case it gets loaded. * register state in case it gets loaded.
*/ */
if (current->thread.svcr & SVCR_SM_MASK) if (current->thread.svcr & SVCR_SM_MASK) {
memset(&current->thread.uw.fpsimd_state, 0, memset(&current->thread.uw.fpsimd_state, 0,
sizeof(current->thread.uw.fpsimd_state)); sizeof(current->thread.uw.fpsimd_state));
current->thread.fp_type = FP_STATE_FPSIMD;
}
current->thread.svcr &= ~(SVCR_ZA_MASK | current->thread.svcr &= ~(SVCR_ZA_MASK |
SVCR_SM_MASK); SVCR_SM_MASK);

View File

@ -371,7 +371,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
if (err) if (err)
return err; return err;
return kvm_share_hyp(vcpu, vcpu + 1); err = kvm_share_hyp(vcpu, vcpu + 1);
if (err)
kvm_vgic_vcpu_destroy(vcpu);
return err;
} }
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
@ -1230,7 +1234,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
} }
vcpu_reset_hcr(vcpu); vcpu_reset_hcr(vcpu);
vcpu->arch.cptr_el2 = CPTR_EL2_DEFAULT;
/* /*
* Handle the "start in power-off" case. * Handle the "start in power-off" case.

View File

@ -49,8 +49,6 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
if (ret) if (ret)
return ret; return ret;
vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
/* /*
* We need to keep current's task_struct pinned until its data has been * We need to keep current's task_struct pinned until its data has been
* unshared with the hypervisor to make sure it is not re-used by the * unshared with the hypervisor to make sure it is not re-used by the
@ -75,36 +73,20 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
{ {
BUG_ON(!current->mm); BUG_ON(!current->mm);
BUG_ON(test_thread_flag(TIF_SVE));
if (!system_supports_fpsimd()) if (!system_supports_fpsimd())
return; return;
vcpu->arch.fp_state = FP_STATE_HOST_OWNED;
vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
vcpu_set_flag(vcpu, HOST_SVE_ENABLED);
/* /*
* We don't currently support SME guests but if we leave * Ensure that any host FPSIMD/SVE/SME state is saved and unbound such
* things in streaming mode then when the guest starts running * that the host kernel is responsible for restoring this state upon
* FPSIMD or SVE code it may generate SME traps so as a * return to userspace, and the hyp code doesn't need to save anything.
* special case if we are in streaming mode we force the host *
* state to be saved now and exit streaming mode so that we * When the host may use SME, fpsimd_save_and_flush_cpu_state() ensures
* don't have to handle any SME traps for valid guest * that PSTATE.{SM,ZA} == {0,0}.
* operations. Do this for ZA as well for now for simplicity.
*/ */
if (system_supports_sme()) { fpsimd_save_and_flush_cpu_state();
vcpu_clear_flag(vcpu, HOST_SME_ENABLED); vcpu->arch.fp_state = FP_STATE_FREE;
if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
vcpu_set_flag(vcpu, HOST_SME_ENABLED);
if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
vcpu->arch.fp_state = FP_STATE_FREE;
fpsimd_save_and_flush_cpu_state();
}
}
} }
/* /*
@ -129,9 +111,16 @@ void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)
*/ */
void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
{ {
enum fp_type fp_type;
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) { if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
if (vcpu_has_sve(vcpu))
fp_type = FP_STATE_SVE;
else
fp_type = FP_STATE_FPSIMD;
/* /*
* Currently we do not support SME guests so SVCR is * Currently we do not support SME guests so SVCR is
* always 0 and we just need a variable to point to. * always 0 and we just need a variable to point to.
@ -139,10 +128,10 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs, fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs,
vcpu->arch.sve_state, vcpu->arch.sve_state,
vcpu->arch.sve_max_vl, vcpu->arch.sve_max_vl,
NULL, 0, &vcpu->arch.svcr); NULL, 0, &vcpu->arch.svcr,
&vcpu->arch.fp_type, fp_type);
clear_thread_flag(TIF_FOREIGN_FPSTATE); clear_thread_flag(TIF_FOREIGN_FPSTATE);
update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu));
} }
} }
@ -158,48 +147,19 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
local_irq_save(flags); local_irq_save(flags);
/*
* If we have VHE then the Hyp code will reset CPACR_EL1 to
* CPACR_EL1_DEFAULT and we need to reenable SME.
*/
if (has_vhe() && system_supports_sme()) {
/* Also restore EL0 state seen on entry */
if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
sysreg_clear_set(CPACR_EL1, 0,
CPACR_EL1_SMEN_EL0EN |
CPACR_EL1_SMEN_EL1EN);
else
sysreg_clear_set(CPACR_EL1,
CPACR_EL1_SMEN_EL0EN,
CPACR_EL1_SMEN_EL1EN);
}
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) { if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
if (vcpu_has_sve(vcpu)) {
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
/* Restore the VL that was saved when bound to the CPU */
if (!has_vhe())
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
SYS_ZCR_EL1);
}
fpsimd_save_and_flush_cpu_state();
} else if (has_vhe() && system_supports_sve()) {
/* /*
* The FPSIMD/SVE state in the CPU has not been touched, and we * Flush (save and invalidate) the fpsimd/sve state so that if
* have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been * the host tries to use fpsimd/sve, it's not using stale data
* reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE * from the guest.
* for EL0. To avoid spurious traps, restore the trap state *
* seen by kvm_arch_vcpu_load_fp(): * Flushing the state sets the TIF_FOREIGN_FPSTATE bit for the
* context unconditionally, in both nVHE and VHE. This allows
* the kernel to restore the fpsimd/sve state, including ZCR_EL1
* when needed.
*/ */
if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED)) fpsimd_save_and_flush_cpu_state();
sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
else
sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
} }
update_thread_flag(TIF_SVE, 0);
local_irq_restore(flags); local_irq_restore(flags);
} }

View File

@ -44,6 +44,11 @@ alternative_if ARM64_HAS_RAS_EXTN
alternative_else_nop_endif alternative_else_nop_endif
mrs x1, isr_el1 mrs x1, isr_el1
cbz x1, 1f cbz x1, 1f
// Ensure that __guest_enter() always provides a context
// synchronization event so that callers don't need ISBs for anything
// that would usually be synchonized by the ERET.
isb
mov x0, #ARM_EXCEPTION_IRQ mov x0, #ARM_EXCEPTION_IRQ
ret ret

View File

@ -167,13 +167,68 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR); write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
} }
static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
{
u64 zcr_el1, zcr_el2;
if (!guest_owns_fp_regs(vcpu))
return;
if (vcpu_has_sve(vcpu)) {
zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
write_sysreg_el2(zcr_el2, SYS_ZCR);
zcr_el1 = __vcpu_sys_reg(vcpu, ZCR_EL1);
write_sysreg_el1(zcr_el1, SYS_ZCR);
}
}
static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
{
u64 zcr_el1, zcr_el2;
if (!guest_owns_fp_regs(vcpu))
return;
/*
* When the guest owns the FP regs, we know that guest+hyp traps for
* any FPSIMD/SVE/SME features exposed to the guest have been disabled
* by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd()
* prior to __guest_entry(). As __guest_entry() guarantees a context
* synchronization event, we don't need an ISB here to avoid taking
* traps for anything that was exposed to the guest.
*/
if (vcpu_has_sve(vcpu)) {
zcr_el1 = read_sysreg_el1(SYS_ZCR);
__vcpu_sys_reg(vcpu, ZCR_EL1) = zcr_el1;
/*
* The guest's state is always saved using the guest's max VL.
* Ensure that the host has the guest's max VL active such that
* the host can save the guest's state lazily, but don't
* artificially restrict the host to the guest's max VL.
*/
if (has_vhe()) {
zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
write_sysreg_el2(zcr_el2, SYS_ZCR);
} else {
zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
write_sysreg_el2(zcr_el2, SYS_ZCR);
zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
write_sysreg_el1(zcr_el1, SYS_ZCR);
}
}
}
/* /*
* We trap the first access to the FP/SIMD to save the host context and * We trap the first access to the FP/SIMD to save the host context and
* restore the guest context lazily. * restore the guest context lazily.
* If FP/SIMD is not implemented, handle the trap and inject an undefined * If FP/SIMD is not implemented, handle the trap and inject an undefined
* instruction exception to the guest. Similarly for trapped SVE accesses. * instruction exception to the guest. Similarly for trapped SVE accesses.
*/ */
static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
{ {
bool sve_guest; bool sve_guest;
u8 esr_ec; u8 esr_ec;
@ -207,10 +262,6 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
} }
isb(); isb();
/* Write out the host state if it's in the registers */
if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED)
__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
/* Restore the guest state */ /* Restore the guest state */
if (sve_guest) if (sve_guest)
__hyp_sve_restore_guest(vcpu); __hyp_sve_restore_guest(vcpu);
@ -335,7 +386,7 @@ static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
return true; return true;
} }
static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
{ {
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
handle_tx2_tvm(vcpu)) handle_tx2_tvm(vcpu))
@ -351,7 +402,7 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
return false; return false;
} }
static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code) static inline bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
{ {
if (static_branch_unlikely(&vgic_v3_cpuif_trap) && if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
__vgic_v3_perform_cpuif_access(vcpu) == 1) __vgic_v3_perform_cpuif_access(vcpu) == 1)
@ -360,19 +411,18 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
return false; return false;
} }
static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code) static inline bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu,
u64 *exit_code)
{ {
if (!__populate_fault_info(vcpu)) if (!__populate_fault_info(vcpu))
return true; return true;
return false; return false;
} }
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) #define kvm_hyp_handle_iabt_low kvm_hyp_handle_memory_fault
__alias(kvm_hyp_handle_memory_fault); #define kvm_hyp_handle_watchpt_low kvm_hyp_handle_memory_fault
static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);
static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
{ {
if (kvm_hyp_handle_memory_fault(vcpu, exit_code)) if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
return true; return true;
@ -402,23 +452,16 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *); typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
/* /*
* Allow the hypervisor to handle the exit with an exit handler if it has one. * Allow the hypervisor to handle the exit with an exit handler if it has one.
* *
* Returns true if the hypervisor handled the exit, and control should go back * Returns true if the hypervisor handled the exit, and control should go back
* to the guest, or false if it hasn't. * to the guest, or false if it hasn't.
*/ */
static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code) static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
const exit_handler_fn *handlers)
{ {
const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu); exit_handler_fn fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
exit_handler_fn fn;
fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
if (fn) if (fn)
return fn(vcpu, exit_code); return fn(vcpu, exit_code);
@ -448,20 +491,9 @@ static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code
* the guest, false when we should restore the host state and return to the * the guest, false when we should restore the host state and return to the
* main run loop. * main run loop.
*/ */
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) static inline bool __fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
const exit_handler_fn *handlers)
{ {
/*
* Save PSTATE early so that we can evaluate the vcpu mode
* early on.
*/
synchronize_vcpu_pstate(vcpu, exit_code);
/*
* Check whether we want to repaint the state one way or
* another.
*/
early_exit_filter(vcpu, exit_code);
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR); vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
@ -491,7 +523,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
goto exit; goto exit;
/* Check if there's an exit handler and allow it to handle the exit. */ /* Check if there's an exit handler and allow it to handle the exit. */
if (kvm_hyp_handle_exit(vcpu, exit_code)) if (kvm_hyp_handle_exit(vcpu, exit_code, handlers))
goto guest; goto guest;
exit: exit:
/* Return to the host kernel and handle the exit */ /* Return to the host kernel and handle the exit */

View File

@ -5,6 +5,7 @@
*/ */
#include <hyp/adjust_pc.h> #include <hyp/adjust_pc.h>
#include <hyp/switch.h>
#include <asm/pgtable-types.h> #include <asm/pgtable-types.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
@ -25,7 +26,9 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
{ {
DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1); DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
fpsimd_lazy_switch_to_guest(kern_hyp_va(vcpu));
cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu)); cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
fpsimd_lazy_switch_to_host(kern_hyp_va(vcpu));
} }
static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt) static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
@ -285,11 +288,6 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
case ESR_ELx_EC_SMC64: case ESR_ELx_EC_SMC64:
handle_host_smc(host_ctxt); handle_host_smc(host_ctxt);
break; break;
case ESR_ELx_EC_SVE:
sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
isb();
sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
break;
case ESR_ELx_EC_IABT_LOW: case ESR_ELx_EC_IABT_LOW:
case ESR_ELx_EC_DABT_LOW: case ESR_ELx_EC_DABT_LOW:
handle_host_mem_abort(host_ctxt); handle_host_mem_abort(host_ctxt);

View File

@ -9,6 +9,8 @@
#include <nvhe/fixed_config.h> #include <nvhe/fixed_config.h>
#include <nvhe/trap_handler.h> #include <nvhe/trap_handler.h>
unsigned int kvm_host_sve_max_vl;
/* /*
* Set trap register values based on features in ID_AA64PFR0. * Set trap register values based on features in ID_AA64PFR0.
*/ */
@ -17,7 +19,6 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1); const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
u64 hcr_set = HCR_RW; u64 hcr_set = HCR_RW;
u64 hcr_clear = 0; u64 hcr_clear = 0;
u64 cptr_set = 0;
/* Protected KVM does not support AArch32 guests. */ /* Protected KVM does not support AArch32 guests. */
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
@ -44,16 +45,10 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
/* Trap AMU */ /* Trap AMU */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
hcr_clear |= HCR_AMVOFFEN; hcr_clear |= HCR_AMVOFFEN;
cptr_set |= CPTR_EL2_TAM;
} }
/* Trap SVE */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids))
cptr_set |= CPTR_EL2_TZ;
vcpu->arch.hcr_el2 |= hcr_set; vcpu->arch.hcr_el2 |= hcr_set;
vcpu->arch.hcr_el2 &= ~hcr_clear; vcpu->arch.hcr_el2 &= ~hcr_clear;
vcpu->arch.cptr_el2 |= cptr_set;
} }
/* /*
@ -83,7 +78,6 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1); const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
u64 mdcr_set = 0; u64 mdcr_set = 0;
u64 mdcr_clear = 0; u64 mdcr_clear = 0;
u64 cptr_set = 0;
/* Trap/constrain PMU */ /* Trap/constrain PMU */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
@ -110,13 +104,8 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids)) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
mdcr_set |= MDCR_EL2_TTRF; mdcr_set |= MDCR_EL2_TTRF;
/* Trap Trace */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids))
cptr_set |= CPTR_EL2_TTA;
vcpu->arch.mdcr_el2 |= mdcr_set; vcpu->arch.mdcr_el2 |= mdcr_set;
vcpu->arch.mdcr_el2 &= ~mdcr_clear; vcpu->arch.mdcr_el2 &= ~mdcr_clear;
vcpu->arch.cptr_el2 |= cptr_set;
} }
/* /*
@ -167,8 +156,6 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
/* Clear res0 and set res1 bits to trap potential new features. */ /* Clear res0 and set res1 bits to trap potential new features. */
vcpu->arch.hcr_el2 &= ~(HCR_RES0); vcpu->arch.hcr_el2 &= ~(HCR_RES0);
vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0); vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
} }
/* /*

View File

@ -36,23 +36,54 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc); extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
static void __activate_traps(struct kvm_vcpu *vcpu) static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
{ {
u64 val; u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
___activate_traps(vcpu); if (!guest_owns_fp_regs(vcpu))
__activate_traps_common(vcpu);
val = vcpu->arch.cptr_el2;
val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
if (!guest_owns_fp_regs(vcpu)) {
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
__activate_traps_fpsimd32(vcpu); __activate_traps_fpsimd32(vcpu);
}
if (cpus_have_final_cap(ARM64_SME)) /* !hVHE case upstream */
if (1) {
val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
/*
* Always trap SME since it's not supported in KVM.
* TSM is RES1 if SME isn't implemented.
*/
val |= CPTR_EL2_TSM; val |= CPTR_EL2_TSM;
write_sysreg(val, cptr_el2); if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs(vcpu))
val |= CPTR_EL2_TZ;
if (!guest_owns_fp_regs(vcpu))
val |= CPTR_EL2_TFP;
write_sysreg(val, cptr_el2);
}
}
static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
{
/* !hVHE case upstream */
if (1) {
u64 val = CPTR_NVHE_EL2_RES1;
if (!cpus_have_final_cap(ARM64_SVE))
val |= CPTR_EL2_TZ;
if (!cpus_have_final_cap(ARM64_SME))
val |= CPTR_EL2_TSM;
write_sysreg(val, cptr_el2);
}
}
static void __activate_traps(struct kvm_vcpu *vcpu)
{
___activate_traps(vcpu);
__activate_traps_common(vcpu);
__activate_cptr_traps(vcpu);
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2); write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
@ -73,7 +104,6 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
static void __deactivate_traps(struct kvm_vcpu *vcpu) static void __deactivate_traps(struct kvm_vcpu *vcpu)
{ {
extern char __kvm_hyp_host_vector[]; extern char __kvm_hyp_host_vector[];
u64 cptr;
___deactivate_traps(vcpu); ___deactivate_traps(vcpu);
@ -98,13 +128,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2); write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
cptr = CPTR_EL2_DEFAULT; __deactivate_cptr_traps(vcpu);
if (vcpu_has_sve(vcpu) && (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
cptr |= CPTR_EL2_TZ;
if (cpus_have_final_cap(ARM64_SME))
cptr &= ~CPTR_EL2_TSM;
write_sysreg(cptr, cptr_el2);
write_sysreg(__kvm_hyp_host_vector, vbar_el2); write_sysreg(__kvm_hyp_host_vector, vbar_el2);
} }
@ -209,21 +233,22 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
return hyp_exit_handlers; return hyp_exit_handlers;
} }
/* static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
* Some guests (e.g., protected VMs) are not be allowed to run in AArch32.
* The ARMv8 architecture does not give the hypervisor a mechanism to prevent a
* guest from dropping to AArch32 EL0 if implemented by the CPU. If the
* hypervisor spots a guest in such a state ensure it is handled, and don't
* trust the host to spot or fix it. The check below is based on the one in
* kvm_arch_vcpu_ioctl_run().
*
* Returns false if the guest ran in AArch32 when it shouldn't have, and
* thus should exit to the host, or true if a the guest run loop can continue.
*/
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
{ {
const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
struct kvm *kvm = kern_hyp_va(vcpu->kvm); struct kvm *kvm = kern_hyp_va(vcpu->kvm);
synchronize_vcpu_pstate(vcpu, exit_code);
/*
* Some guests (e.g., protected VMs) are not be allowed to run in
* AArch32. The ARMv8 architecture does not give the hypervisor a
* mechanism to prevent a guest from dropping to AArch32 EL0 if
* implemented by the CPU. If the hypervisor spots a guest in such a
* state ensure it is handled, and don't trust the host to spot or fix
* it. The check below is based on the one in
* kvm_arch_vcpu_ioctl_run().
*/
if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) { if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) {
/* /*
* As we have caught the guest red-handed, decide that it isn't * As we have caught the guest red-handed, decide that it isn't
@ -236,6 +261,8 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT); *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
*exit_code |= ARM_EXCEPTION_IL; *exit_code |= ARM_EXCEPTION_IL;
} }
return __fixup_guest_exit(vcpu, exit_code, handlers);
} }
/* Switch to the guest for legacy non-VHE systems */ /* Switch to the guest for legacy non-VHE systems */

View File

@ -114,13 +114,11 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
}; };
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
{ {
return hyp_exit_handlers; synchronize_vcpu_pstate(vcpu, exit_code);
}
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) return __fixup_guest_exit(vcpu, exit_code, hyp_exit_handlers);
{
} }
/* Switch to the guest for VHE systems running in EL2 */ /* Switch to the guest for VHE systems running in EL2 */
@ -136,6 +134,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_save_host_state_vhe(host_ctxt); sysreg_save_host_state_vhe(host_ctxt);
fpsimd_lazy_switch_to_guest(vcpu);
/* /*
* ARM erratum 1165522 requires us to configure both stage 1 and * ARM erratum 1165522 requires us to configure both stage 1 and
* stage 2 translation for the guest context before we clear * stage 2 translation for the guest context before we clear
@ -166,6 +166,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
__deactivate_traps(vcpu); __deactivate_traps(vcpu);
fpsimd_lazy_switch_to_host(vcpu);
sysreg_restore_host_state_vhe(host_ctxt); sysreg_restore_host_state_vhe(host_ctxt);
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)

View File

@ -42,11 +42,14 @@ static u32 kvm_ipa_limit;
PSR_AA32_I_BIT | PSR_AA32_F_BIT) PSR_AA32_I_BIT | PSR_AA32_F_BIT)
unsigned int kvm_sve_max_vl; unsigned int kvm_sve_max_vl;
unsigned int kvm_host_sve_max_vl;
int kvm_arm_init_sve(void) int kvm_arm_init_sve(void)
{ {
if (system_supports_sve()) { if (system_supports_sve()) {
kvm_sve_max_vl = sve_max_virtualisable_vl(); kvm_sve_max_vl = sve_max_virtualisable_vl();
kvm_host_sve_max_vl = sve_max_vl();
kvm_nvhe_sym(kvm_host_sve_max_vl) = kvm_host_sve_max_vl;
/* /*
* The get_sve_reg()/set_sve_reg() ioctl interface will need * The get_sve_reg()/set_sve_reg() ioctl interface will need

View File

@ -1578,7 +1578,8 @@ int arch_add_memory(int nid, u64 start, u64 size,
__remove_pgd_mapping(swapper_pg_dir, __remove_pgd_mapping(swapper_pg_dir,
__phys_to_virt(start), size); __phys_to_virt(start), size);
else { else {
max_pfn = PFN_UP(start + size); /* Address of hotplugged memory can be smaller */
max_pfn = max(max_pfn, PFN_UP(start + size));
max_low_pfn = max_pfn; max_low_pfn = max_pfn;
} }

View File

@ -173,18 +173,6 @@ static __init int setup_node(int pxm)
return acpi_map_pxm_to_node(pxm); return acpi_map_pxm_to_node(pxm);
} }
/*
* Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
* I/O localities since SRAT does not list them. I/O localities are
* not supported at this point.
*/
unsigned int numa_distance_cnt;
static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
{
return slit->locality_count;
}
void __init numa_set_distance(int from, int to, int distance) void __init numa_set_distance(int from, int to, int distance)
{ {
if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) { if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {

View File

@ -142,8 +142,6 @@ static void build_prologue(struct jit_ctx *ctx)
*/ */
if (seen_tail_call(ctx) && seen_call(ctx)) if (seen_tail_call(ctx) && seen_call(ctx))
move_reg(ctx, TCC_SAVED, REG_TCC); move_reg(ctx, TCC_SAVED, REG_TCC);
else
emit_insn(ctx, nop);
ctx->stack_size = stack_adjust; ctx->stack_size = stack_adjust;
} }

View File

@ -25,11 +25,6 @@ struct jit_data {
struct jit_ctx ctx; struct jit_ctx ctx;
}; };
static inline void emit_nop(union loongarch_instruction *insn)
{
insn->word = INSN_NOP;
}
#define emit_insn(ctx, func, ...) \ #define emit_insn(ctx, func, ...) \
do { \ do { \
if (ctx->image != NULL) { \ if (ctx->image != NULL) { \

View File

@ -42,7 +42,7 @@ int (*__pmax_close)(int);
* Detect which PROM the DECSTATION has, and set the callback vectors * Detect which PROM the DECSTATION has, and set the callback vectors
* appropriately. * appropriately.
*/ */
void __init which_prom(s32 magic, s32 *prom_vec) static void __init which_prom(s32 magic, s32 *prom_vec)
{ {
/* /*
* No sign of the REX PROM's magic number means we assume a non-REX * No sign of the REX PROM's magic number means we assume a non-REX

View File

@ -8,7 +8,7 @@
#define __ASM_DS1287_H #define __ASM_DS1287_H
extern int ds1287_timer_state(void); extern int ds1287_timer_state(void);
extern void ds1287_set_base_clock(unsigned int clock); extern int ds1287_set_base_clock(unsigned int hz);
extern int ds1287_clockevent_init(int irq); extern int ds1287_clockevent_init(int irq);
#endif #endif

View File

@ -10,6 +10,7 @@
#include <linux/mc146818rtc.h> #include <linux/mc146818rtc.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <asm/ds1287.h>
#include <asm/time.h> #include <asm/time.h>
int ds1287_timer_state(void) int ds1287_timer_state(void)

View File

@ -25,6 +25,7 @@
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/nospec.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_fdt.h> #include <linux/of_fdt.h>
@ -1178,6 +1179,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
|| nargs + nret > ARRAY_SIZE(args.args)) || nargs + nret > ARRAY_SIZE(args.args))
return -EINVAL; return -EINVAL;
nargs = array_index_nospec(nargs, ARRAY_SIZE(args.args));
nret = array_index_nospec(nret, ARRAY_SIZE(args.args) - nargs);
/* Copy in args. */ /* Copy in args. */
if (copy_from_user(args.args, uargs->args, if (copy_from_user(args.args, uargs->args,
nargs * sizeof(rtas_arg_t)) != 0) nargs * sizeof(rtas_arg_t)) != 0)

View File

@ -19,16 +19,9 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
void arch_kgdb_breakpoint(void);
extern unsigned long kgdb_compiled_break; extern unsigned long kgdb_compiled_break;
static inline void arch_kgdb_breakpoint(void)
{
asm(".global kgdb_compiled_break\n"
".option norvc\n"
"kgdb_compiled_break: ebreak\n"
".option rvc\n");
}
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#define DBG_REG_ZERO "zero" #define DBG_REG_ZERO "zero"

View File

@ -61,8 +61,11 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned long *args) unsigned long *args)
{ {
args[0] = regs->orig_a0; args[0] = regs->orig_a0;
args++; args[1] = regs->a1;
memcpy(args, &regs->a1, 5 * sizeof(args[0])); args[2] = regs->a2;
args[3] = regs->a3;
args[4] = regs->a4;
args[5] = regs->a5;
} }
static inline int syscall_get_arch(struct task_struct *task) static inline int syscall_get_arch(struct task_struct *task)

View File

@ -273,6 +273,12 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
regs->epc = pc; regs->epc = pc;
} }
noinline void arch_kgdb_breakpoint(void)
{
asm(".global kgdb_compiled_break\n"
"kgdb_compiled_break: ebreak\n");
}
void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer, void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
char *remcom_out_buffer) char *remcom_out_buffer)
{ {

View File

@ -76,6 +76,9 @@ static struct resource bss_res = { .name = "Kernel bss", };
static struct resource elfcorehdr_res = { .name = "ELF Core hdr", }; static struct resource elfcorehdr_res = { .name = "ELF Core hdr", };
#endif #endif
static int num_standard_resources;
static struct resource *standard_resources;
static int __init add_resource(struct resource *parent, static int __init add_resource(struct resource *parent,
struct resource *res) struct resource *res)
{ {
@ -149,7 +152,7 @@ static void __init init_resources(void)
struct resource *res = NULL; struct resource *res = NULL;
struct resource *mem_res = NULL; struct resource *mem_res = NULL;
size_t mem_res_sz = 0; size_t mem_res_sz = 0;
int num_resources = 0, res_idx = 0; int num_resources = 0, res_idx = 0, non_resv_res = 0;
int ret = 0; int ret = 0;
/* + 1 as memblock_alloc() might increase memblock.reserved.cnt */ /* + 1 as memblock_alloc() might increase memblock.reserved.cnt */
@ -213,6 +216,7 @@ static void __init init_resources(void)
/* Add /memory regions to the resource tree */ /* Add /memory regions to the resource tree */
for_each_mem_region(region) { for_each_mem_region(region) {
res = &mem_res[res_idx--]; res = &mem_res[res_idx--];
non_resv_res++;
if (unlikely(memblock_is_nomap(region))) { if (unlikely(memblock_is_nomap(region))) {
res->name = "Reserved"; res->name = "Reserved";
@ -230,6 +234,9 @@ static void __init init_resources(void)
goto error; goto error;
} }
num_standard_resources = non_resv_res;
standard_resources = &mem_res[res_idx + 1];
/* Clean-up any unused pre-allocated resources */ /* Clean-up any unused pre-allocated resources */
if (res_idx >= 0) if (res_idx >= 0)
memblock_free(mem_res, (res_idx + 1) * sizeof(*mem_res)); memblock_free(mem_res, (res_idx + 1) * sizeof(*mem_res));
@ -241,6 +248,33 @@ static void __init init_resources(void)
memblock_free(mem_res, mem_res_sz); memblock_free(mem_res, mem_res_sz);
} }
static int __init reserve_memblock_reserved_regions(void)
{
u64 i, j;
for (i = 0; i < num_standard_resources; i++) {
struct resource *mem = &standard_resources[i];
phys_addr_t r_start, r_end, mem_size = resource_size(mem);
if (!memblock_is_region_reserved(mem->start, mem_size))
continue;
for_each_reserved_mem_range(j, &r_start, &r_end) {
resource_size_t start, end;
start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
if (start > mem->end || end < mem->start)
continue;
reserve_region_with_split(mem, start, end, "Reserved");
}
}
return 0;
}
arch_initcall(reserve_memblock_reserved_regions);
static void __init parse_dtb(void) static void __init parse_dtb(void)
{ {

View File

@ -52,8 +52,10 @@ out:
void arch_enter_lazy_mmu_mode(void) void arch_enter_lazy_mmu_mode(void)
{ {
struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); struct tlb_batch *tb;
preempt_disable();
tb = this_cpu_ptr(&tlb_batch);
tb->active = 1; tb->active = 1;
} }
@ -64,6 +66,7 @@ void arch_leave_lazy_mmu_mode(void)
if (tb->tlb_nr) if (tb->tlb_nr)
flush_tlb_pending(); flush_tlb_pending();
tb->active = 0; tb->active = 0;
preempt_enable();
} }
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,

View File

@ -1149,8 +1149,10 @@ static u64 pebs_update_adaptive_cfg(struct perf_event *event)
* + precise_ip < 2 for the non event IP * + precise_ip < 2 for the non event IP
* + For RTM TSX weight we need GPRs for the abort code. * + For RTM TSX weight we need GPRs for the abort code.
*/ */
gprs = (sample_type & PERF_SAMPLE_REGS_INTR) && gprs = ((sample_type & PERF_SAMPLE_REGS_INTR) &&
(attr->sample_regs_intr & PEBS_GP_REGS); (attr->sample_regs_intr & PEBS_GP_REGS)) ||
((sample_type & PERF_SAMPLE_REGS_USER) &&
(attr->sample_regs_user & PEBS_GP_REGS));
tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT_TYPE) && tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
((attr->config & INTEL_ARCH_EVENT_MASK) == ((attr->config & INTEL_ARCH_EVENT_MASK) ==
@ -1792,7 +1794,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
regs->flags &= ~PERF_EFLAGS_EXACT; regs->flags &= ~PERF_EFLAGS_EXACT;
} }
if (sample_type & PERF_SAMPLE_REGS_INTR) if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER))
adaptive_pebs_save_regs(regs, gprs); adaptive_pebs_save_regs(regs, gprs);
} }

View File

@ -4656,28 +4656,28 @@ static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
/* Free-Running IIO BANDWIDTH IN Counters */ /* Free-Running IIO BANDWIDTH IN Counters */
INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"), INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"), INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"), INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"), INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"), INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"), INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"), INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"), INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
{ /* end: all zeroes */ }, { /* end: all zeroes */ },
}; };
@ -5250,37 +5250,6 @@ static struct freerunning_counters icx_iio_freerunning[] = {
[ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets }, [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
}; };
static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
/* Free-Running IIO CLOCKS Counter */
INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
/* Free-Running IIO BANDWIDTH IN Counters */
INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
{ /* end: all zeroes */ },
};
static struct intel_uncore_type icx_uncore_iio_free_running = { static struct intel_uncore_type icx_uncore_iio_free_running = {
.name = "iio_free_running", .name = "iio_free_running",
.num_counters = 9, .num_counters = 9,
@ -5288,7 +5257,7 @@ static struct intel_uncore_type icx_uncore_iio_free_running = {
.num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX, .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
.freerunning = icx_iio_freerunning, .freerunning = icx_iio_freerunning,
.ops = &skx_uncore_iio_freerunning_ops, .ops = &skx_uncore_iio_freerunning_ops,
.event_descs = icx_uncore_iio_freerunning_events, .event_descs = snr_uncore_iio_freerunning_events,
.format_group = &skx_uncore_iio_freerunning_format_group, .format_group = &skx_uncore_iio_freerunning_format_group,
}; };
@ -5857,69 +5826,13 @@ static struct freerunning_counters spr_iio_freerunning[] = {
[SPR_IIO_MSR_BW_OUT] = { 0x3808, 0x1, 0x10, 8, 48 }, [SPR_IIO_MSR_BW_OUT] = { 0x3808, 0x1, 0x10, 8, 48 },
}; };
static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
/* Free-Running IIO CLOCKS Counter */
INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
/* Free-Running IIO BANDWIDTH IN Counters */
INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
/* Free-Running IIO BANDWIDTH OUT Counters */
INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x30"),
INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x31"),
INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x32"),
INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x33"),
INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port4, "event=0xff,umask=0x34"),
INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port5, "event=0xff,umask=0x35"),
INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port6, "event=0xff,umask=0x36"),
INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port7, "event=0xff,umask=0x37"),
INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit, "MiB"),
{ /* end: all zeroes */ },
};
static struct intel_uncore_type spr_uncore_iio_free_running = { static struct intel_uncore_type spr_uncore_iio_free_running = {
.name = "iio_free_running", .name = "iio_free_running",
.num_counters = 17, .num_counters = 17,
.num_freerunning_types = SPR_IIO_FREERUNNING_TYPE_MAX, .num_freerunning_types = SPR_IIO_FREERUNNING_TYPE_MAX,
.freerunning = spr_iio_freerunning, .freerunning = spr_iio_freerunning,
.ops = &skx_uncore_iio_freerunning_ops, .ops = &skx_uncore_iio_freerunning_ops,
.event_descs = spr_uncore_iio_freerunning_events, .event_descs = snr_uncore_iio_freerunning_events,
.format_group = &skx_uncore_iio_freerunning_format_group, .format_group = &skx_uncore_iio_freerunning_format_group,
}; };

View File

@ -787,7 +787,7 @@ static void init_amd_k8(struct cpuinfo_x86 *c)
* (model = 0x14) and later actually support it. * (model = 0x14) and later actually support it.
* (AMD Erratum #110, docId: 25759). * (AMD Erratum #110, docId: 25759).
*/ */
if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM) && !cpu_has(c, X86_FEATURE_HYPERVISOR)) {
clear_cpu_cap(c, X86_FEATURE_LAHF_LM); clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
if (!rdmsrl_amd_safe(0xc001100d, &value)) { if (!rdmsrl_amd_safe(0xc001100d, &value)) {
value &= ~BIT_64(32); value &= ~BIT_64(32);

View File

@ -1204,7 +1204,13 @@ static void __split_lock_reenable(struct work_struct *work)
{ {
sld_update_msr(true); sld_update_msr(true);
} }
static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable); /*
* In order for each CPU to schedule its delayed work independently of the
* others, delayed work struct must be per-CPU. This is not required when
* sysctl_sld_mitigate is enabled because of the semaphore that limits
* the number of simultaneously scheduled delayed works to 1.
*/
static DEFINE_PER_CPU(struct delayed_work, sl_reenable);
/* /*
* If a CPU goes offline with pending delayed work to re-enable split lock * If a CPU goes offline with pending delayed work to re-enable split lock
@ -1225,7 +1231,7 @@ static int splitlock_cpu_offline(unsigned int cpu)
static void split_lock_warn(unsigned long ip) static void split_lock_warn(unsigned long ip)
{ {
struct delayed_work *work; struct delayed_work *work = NULL;
int cpu; int cpu;
if (!current->reported_split_lock) if (!current->reported_split_lock)
@ -1247,11 +1253,17 @@ static void split_lock_warn(unsigned long ip)
if (down_interruptible(&buslock_sem) == -EINTR) if (down_interruptible(&buslock_sem) == -EINTR)
return; return;
work = &sl_reenable_unlock; work = &sl_reenable_unlock;
} else {
work = &sl_reenable;
} }
cpu = get_cpu(); cpu = get_cpu();
if (!work) {
work = this_cpu_ptr(&sl_reenable);
/* Deferred initialization of per-CPU struct */
if (!work->work.func)
INIT_DELAYED_WORK(work, __split_lock_reenable);
}
schedule_delayed_work_on(cpu, work, 2); schedule_delayed_work_on(cpu, work, 2);
/* Disable split lock detection on this CPU to make progress */ /* Disable split lock detection on this CPU to make progress */

View File

@ -753,22 +753,21 @@ void __init e820__memory_setup_extended(u64 phys_addr, u32 data_len)
void __init e820__register_nosave_regions(unsigned long limit_pfn) void __init e820__register_nosave_regions(unsigned long limit_pfn)
{ {
int i; int i;
unsigned long pfn = 0; u64 last_addr = 0;
for (i = 0; i < e820_table->nr_entries; i++) { for (i = 0; i < e820_table->nr_entries; i++) {
struct e820_entry *entry = &e820_table->entries[i]; struct e820_entry *entry = &e820_table->entries[i];
if (pfn < PFN_UP(entry->addr))
register_nosave_region(pfn, PFN_UP(entry->addr));
pfn = PFN_DOWN(entry->addr + entry->size);
if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN) if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
register_nosave_region(PFN_UP(entry->addr), pfn); continue;
if (pfn >= limit_pfn) if (last_addr < entry->addr)
break; register_nosave_region(PFN_DOWN(last_addr), PFN_UP(entry->addr));
last_addr = entry->addr + entry->size;
} }
register_nosave_region(PFN_DOWN(last_addr), limit_pfn);
} }
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI

View File

@ -11460,6 +11460,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
if (kvm_mpx_supported()) if (kvm_mpx_supported())
kvm_load_guest_fpu(vcpu); kvm_load_guest_fpu(vcpu);
kvm_vcpu_srcu_read_lock(vcpu);
r = kvm_apic_accept_events(vcpu); r = kvm_apic_accept_events(vcpu);
if (r < 0) if (r < 0)
goto out; goto out;
@ -11473,6 +11475,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
mp_state->mp_state = vcpu->arch.mp_state; mp_state->mp_state = vcpu->arch.mp_state;
out: out:
kvm_vcpu_srcu_read_unlock(vcpu);
if (kvm_mpx_supported()) if (kvm_mpx_supported())
kvm_put_guest_fpu(vcpu); kvm_put_guest_fpu(vcpu);
vcpu_put(vcpu); vcpu_put(vcpu);

View File

@ -100,7 +100,12 @@ SYM_CODE_START_LOCAL(pvh_start_xen)
xor %edx, %edx xor %edx, %edx
wrmsr wrmsr
call xen_prepare_pvh /* Call xen_prepare_pvh() via the kernel virtual mapping */
leaq xen_prepare_pvh(%rip), %rax
subq phys_base(%rip), %rax
addq $__START_KERNEL_map, %rax
ANNOTATE_RETPOLINE_SAFE
call *%rax
/* startup_64 expects boot_params in %rsi. */ /* startup_64 expects boot_params in %rsi. */
mov $_pa(pvh_bootparams), %rsi mov $_pa(pvh_bootparams), %rsi

View File

@ -255,6 +255,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
blkg->pd[i] = pd; blkg->pd[i] = pd;
pd->blkg = blkg; pd->blkg = blkg;
pd->plid = i; pd->plid = i;
pd->online = false;
} }
return blkg; return blkg;
@ -326,8 +327,11 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk,
for (i = 0; i < BLKCG_MAX_POLS; i++) { for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i]; struct blkcg_policy *pol = blkcg_policy[i];
if (blkg->pd[i] && pol->pd_online_fn) if (blkg->pd[i]) {
pol->pd_online_fn(blkg->pd[i]); if (pol->pd_online_fn)
pol->pd_online_fn(blkg->pd[i]);
blkg->pd[i]->online = true;
}
} }
} }
blkg->online = true; blkg->online = true;
@ -432,8 +436,11 @@ static void blkg_destroy(struct blkcg_gq *blkg)
for (i = 0; i < BLKCG_MAX_POLS; i++) { for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i]; struct blkcg_policy *pol = blkcg_policy[i];
if (blkg->pd[i] && pol->pd_offline_fn) if (blkg->pd[i] && blkg->pd[i]->online) {
pol->pd_offline_fn(blkg->pd[i]); if (pol->pd_offline_fn)
pol->pd_offline_fn(blkg->pd[i]);
blkg->pd[i]->online = false;
}
} }
blkg->online = false; blkg->online = false;
@ -1422,6 +1429,7 @@ retry:
blkg->pd[pol->plid] = pd; blkg->pd[pol->plid] = pd;
pd->blkg = blkg; pd->blkg = blkg;
pd->plid = pol->plid; pd->plid = pol->plid;
pd->online = false;
} }
/* all allocated, init in the same order */ /* all allocated, init in the same order */
@ -1429,9 +1437,11 @@ retry:
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
pol->pd_init_fn(blkg->pd[pol->plid]); pol->pd_init_fn(blkg->pd[pol->plid]);
if (pol->pd_online_fn) list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) if (pol->pd_online_fn)
pol->pd_online_fn(blkg->pd[pol->plid]); pol->pd_online_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid]->online = true;
}
__set_bit(pol->plid, q->blkcg_pols); __set_bit(pol->plid, q->blkcg_pols);
ret = 0; ret = 0;
@ -1493,7 +1503,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
spin_lock(&blkcg->lock); spin_lock(&blkcg->lock);
if (blkg->pd[pol->plid]) { if (blkg->pd[pol->plid]) {
if (pol->pd_offline_fn) if (blkg->pd[pol->plid]->online && pol->pd_offline_fn)
pol->pd_offline_fn(blkg->pd[pol->plid]); pol->pd_offline_fn(blkg->pd[pol->plid]);
pol->pd_free_fn(blkg->pd[pol->plid]); pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL; blkg->pd[pol->plid] = NULL;

View File

@ -125,6 +125,7 @@ struct blkg_policy_data {
/* the blkg and policy id this per-policy data belongs to */ /* the blkg and policy id this per-policy data belongs to */
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
int plid; int plid;
bool online;
}; };
/* /*

View File

@ -1430,8 +1430,11 @@ static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
lockdep_assert_held(&iocg->ioc->lock); lockdep_assert_held(&iocg->ioc->lock);
lockdep_assert_held(&iocg->waitq.lock); lockdep_assert_held(&iocg->waitq.lock);
/* make sure that nobody messed with @iocg */ /*
WARN_ON_ONCE(list_empty(&iocg->active_list)); * make sure that nobody messed with @iocg. Check iocg->pd.online
* to avoid warn when removing blkcg or disk.
*/
WARN_ON_ONCE(list_empty(&iocg->active_list) && iocg->pd.online);
WARN_ON_ONCE(iocg->inuse > 1); WARN_ON_ONCE(iocg->inuse > 1);
iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt); iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);

View File

@ -84,5 +84,5 @@ targets += x509_revocation_list
hostprogs := extract-cert hostprogs := extract-cert
HOSTCFLAGS_extract-cert.o = $(shell $(HOSTPKG_CONFIG) --cflags libcrypto 2> /dev/null) HOSTCFLAGS_extract-cert.o = $(shell $(HOSTPKG_CONFIG) --cflags libcrypto 2> /dev/null) -I$(srctree)/scripts
HOSTLDLIBS_extract-cert = $(shell $(HOSTPKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto) HOSTLDLIBS_extract-cert = $(shell $(HOSTPKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto)

View File

@ -21,14 +21,17 @@
#include <openssl/bio.h> #include <openssl/bio.h>
#include <openssl/pem.h> #include <openssl/pem.h>
#include <openssl/err.h> #include <openssl/err.h>
#include <openssl/engine.h> #if OPENSSL_VERSION_MAJOR >= 3
# define USE_PKCS11_PROVIDER
/* # include <openssl/provider.h>
* OpenSSL 3.0 deprecates the OpenSSL's ENGINE API. # include <openssl/store.h>
* #else
* Remove this if/when that API is no longer used # if !defined(OPENSSL_NO_ENGINE) && !defined(OPENSSL_NO_DEPRECATED_3_0)
*/ # define USE_PKCS11_ENGINE
#pragma GCC diagnostic ignored "-Wdeprecated-declarations" # include <openssl/engine.h>
# endif
#endif
#include "ssl-common.h"
#define PKEY_ID_PKCS7 2 #define PKEY_ID_PKCS7 2
@ -40,41 +43,6 @@ void format(void)
exit(2); exit(2);
} }
static void display_openssl_errors(int l)
{
const char *file;
char buf[120];
int e, line;
if (ERR_peek_error() == 0)
return;
fprintf(stderr, "At main.c:%d:\n", l);
while ((e = ERR_get_error_line(&file, &line))) {
ERR_error_string(e, buf);
fprintf(stderr, "- SSL %s: %s:%d\n", buf, file, line);
}
}
static void drain_openssl_errors(void)
{
const char *file;
int line;
if (ERR_peek_error() == 0)
return;
while (ERR_get_error_line(&file, &line)) {}
}
#define ERR(cond, fmt, ...) \
do { \
bool __cond = (cond); \
display_openssl_errors(__LINE__); \
if (__cond) { \
err(1, fmt, ## __VA_ARGS__); \
} \
} while(0)
static const char *key_pass; static const char *key_pass;
static BIO *wb; static BIO *wb;
static char *cert_dst; static char *cert_dst;
@ -94,6 +62,66 @@ static void write_cert(X509 *x509)
fprintf(stderr, "Extracted cert: %s\n", buf); fprintf(stderr, "Extracted cert: %s\n", buf);
} }
static X509 *load_cert_pkcs11(const char *cert_src)
{
X509 *cert = NULL;
#ifdef USE_PKCS11_PROVIDER
OSSL_STORE_CTX *store;
if (!OSSL_PROVIDER_try_load(NULL, "pkcs11", true))
ERR(1, "OSSL_PROVIDER_try_load(pkcs11)");
if (!OSSL_PROVIDER_try_load(NULL, "default", true))
ERR(1, "OSSL_PROVIDER_try_load(default)");
store = OSSL_STORE_open(cert_src, NULL, NULL, NULL, NULL);
ERR(!store, "OSSL_STORE_open");
while (!OSSL_STORE_eof(store)) {
OSSL_STORE_INFO *info = OSSL_STORE_load(store);
if (!info) {
drain_openssl_errors(__LINE__, 0);
continue;
}
if (OSSL_STORE_INFO_get_type(info) == OSSL_STORE_INFO_CERT) {
cert = OSSL_STORE_INFO_get1_CERT(info);
ERR(!cert, "OSSL_STORE_INFO_get1_CERT");
}
OSSL_STORE_INFO_free(info);
if (cert)
break;
}
OSSL_STORE_close(store);
#elif defined(USE_PKCS11_ENGINE)
ENGINE *e;
struct {
const char *cert_id;
X509 *cert;
} parms;
parms.cert_id = cert_src;
parms.cert = NULL;
ENGINE_load_builtin_engines();
drain_openssl_errors(__LINE__, 1);
e = ENGINE_by_id("pkcs11");
ERR(!e, "Load PKCS#11 ENGINE");
if (ENGINE_init(e))
drain_openssl_errors(__LINE__, 1);
else
ERR(1, "ENGINE_init");
if (key_pass)
ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN");
ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, &parms, NULL, 1);
ERR(!parms.cert, "Get X.509 from PKCS#11");
cert = parms.cert;
#else
fprintf(stderr, "no pkcs11 engine/provider available\n");
exit(1);
#endif
return cert;
}
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
char *cert_src; char *cert_src;
@ -119,28 +147,10 @@ int main(int argc, char **argv)
fclose(f); fclose(f);
exit(0); exit(0);
} else if (!strncmp(cert_src, "pkcs11:", 7)) { } else if (!strncmp(cert_src, "pkcs11:", 7)) {
ENGINE *e; X509 *cert = load_cert_pkcs11(cert_src);
struct {
const char *cert_id;
X509 *cert;
} parms;
parms.cert_id = cert_src; ERR(!cert, "load_cert_pkcs11 failed");
parms.cert = NULL; write_cert(cert);
ENGINE_load_builtin_engines();
drain_openssl_errors();
e = ENGINE_by_id("pkcs11");
ERR(!e, "Load PKCS#11 ENGINE");
if (ENGINE_init(e))
drain_openssl_errors();
else
ERR(1, "ENGINE_init");
if (key_pass)
ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN");
ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, &parms, NULL, 1);
ERR(!parms.cert, "Get X.509 from PKCS#11");
write_cert(parms.cert);
} else { } else {
BIO *b; BIO *b;
X509 *x509; X509 *x509;

View File

@ -22,8 +22,8 @@ static const char * const profile_names[] = {
}; };
static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST); static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST);
static ssize_t platform_profile_choices_show(struct device *dev, static ssize_t platform_profile_choices_show(struct kobject *kobj,
struct device_attribute *attr, struct kobj_attribute *attr,
char *buf) char *buf)
{ {
int len = 0; int len = 0;
@ -49,8 +49,8 @@ static ssize_t platform_profile_choices_show(struct device *dev,
return len; return len;
} }
static ssize_t platform_profile_show(struct device *dev, static ssize_t platform_profile_show(struct kobject *kobj,
struct device_attribute *attr, struct kobj_attribute *attr,
char *buf) char *buf)
{ {
enum platform_profile_option profile = PLATFORM_PROFILE_BALANCED; enum platform_profile_option profile = PLATFORM_PROFILE_BALANCED;
@ -77,8 +77,8 @@ static ssize_t platform_profile_show(struct device *dev,
return sysfs_emit(buf, "%s\n", profile_names[profile]); return sysfs_emit(buf, "%s\n", profile_names[profile]);
} }
static ssize_t platform_profile_store(struct device *dev, static ssize_t platform_profile_store(struct kobject *kobj,
struct device_attribute *attr, struct kobj_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
{ {
int err, i; int err, i;
@ -115,12 +115,12 @@ static ssize_t platform_profile_store(struct device *dev,
return count; return count;
} }
static DEVICE_ATTR_RO(platform_profile_choices); static struct kobj_attribute attr_platform_profile_choices = __ATTR_RO(platform_profile_choices);
static DEVICE_ATTR_RW(platform_profile); static struct kobj_attribute attr_platform_profile = __ATTR_RW(platform_profile);
static struct attribute *platform_profile_attrs[] = { static struct attribute *platform_profile_attrs[] = {
&dev_attr_platform_profile_choices.attr, &attr_platform_profile_choices.attr,
&dev_attr_platform_profile.attr, &attr_platform_profile.attr,
NULL NULL
}; };

View File

@ -592,6 +592,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs }, .driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3), { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
.driver_data = board_ahci_yes_fbs }, .driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9215),
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230), { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
.driver_data = board_ahci_yes_fbs }, .driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9235), { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9235),

View File

@ -1510,8 +1510,15 @@ unsigned int atapi_eh_request_sense(struct ata_device *dev,
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.command = ATA_CMD_PACKET; tf.command = ATA_CMD_PACKET;
/* is it pointless to prefer PIO for "safety reasons"? */ /*
if (ap->flags & ATA_FLAG_PIO_DMA) { * Do not use DMA if the connected device only supports PIO, even if the
* port prefers PIO commands via DMA.
*
* Ideally, we should call atapi_check_dma() to check if it is safe for
* the LLD to use DMA for REQUEST_SENSE, but we don't have a qc.
* Since we can't check the command, perhaps we should only use pio?
*/
if ((ap->flags & ATA_FLAG_PIO_DMA) && !(dev->flags & ATA_DFLAG_PIO)) {
tf.protocol = ATAPI_PROT_DMA; tf.protocol = ATAPI_PROT_DMA;
tf.feature |= ATAPI_PKT_DMA; tf.feature |= ATAPI_PKT_DMA;
} else { } else {

View File

@ -223,10 +223,16 @@ static int pxa_ata_probe(struct platform_device *pdev)
ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start, ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start,
resource_size(cmd_res)); resource_size(cmd_res));
if (!ap->ioaddr.cmd_addr)
return -ENOMEM;
ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start, ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start,
resource_size(ctl_res)); resource_size(ctl_res));
if (!ap->ioaddr.ctl_addr)
return -ENOMEM;
ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start, ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start,
resource_size(dma_res)); resource_size(dma_res));
if (!ap->ioaddr.bmdma_addr)
return -ENOMEM;
/* /*
* Adjust register offsets * Adjust register offsets

View File

@ -1118,9 +1118,14 @@ static int pdc20621_prog_dimm0(struct ata_host *host)
mmio += PDC_CHIP0_OFS; mmio += PDC_CHIP0_OFS;
for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++) for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
pdc_i2c_read_data[i].reg, pdc_i2c_read_data[i].reg,
&spd0[pdc_i2c_read_data[i].ofs]); &spd0[pdc_i2c_read_data[i].ofs])) {
dev_err(host->dev,
"Failed in i2c read at index %d: device=%#x, reg=%#x\n",
i, PDC_DIMM0_SPD_DEV_ADDRESS, pdc_i2c_read_data[i].reg);
return -EIO;
}
data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4); data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) | data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
@ -1285,6 +1290,8 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
/* Programming DIMM0 Module Control Register (index_CID0:80h) */ /* Programming DIMM0 Module Control Register (index_CID0:80h) */
size = pdc20621_prog_dimm0(host); size = pdc20621_prog_dimm0(host);
if (size < 0)
return size;
dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size); dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size);
/* Programming DIMM Module Global Control Register (index_CID0:88h) */ /* Programming DIMM Module Global Control Register (index_CID0:88h) */

View File

@ -684,6 +684,13 @@ int devres_release_group(struct device *dev, void *id)
spin_unlock_irqrestore(&dev->devres_lock, flags); spin_unlock_irqrestore(&dev->devres_lock, flags);
release_nodes(dev, &todo); release_nodes(dev, &todo);
} else if (list_empty(&dev->devres_head)) {
/*
* dev is probably dying via devres_release_all(): groups
* have already been removed and are on the process of
* being released - don't touch and don't warn.
*/
spin_unlock_irqrestore(&dev->devres_lock, flags);
} else { } else {
WARN_ON(1); WARN_ON(1);
spin_unlock_irqrestore(&dev->devres_lock, flags); spin_unlock_irqrestore(&dev->devres_lock, flags);

View File

@ -624,19 +624,20 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
* dependency. * dependency.
*/ */
fput(old_file); fput(old_file);
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
if (partscan) if (partscan)
loop_reread_partitions(lo); loop_reread_partitions(lo);
error = 0; error = 0;
done: done:
/* enable and uncork uevent now that we are done */ kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
return error; return error;
out_err: out_err:
loop_global_unlock(lo, is_loop); loop_global_unlock(lo, is_loop);
out_putf: out_putf:
fput(file); fput(file);
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
goto done; goto done;
} }
@ -1122,8 +1123,8 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
if (partscan) if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
/* enable and uncork uevent now that we are done */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
loop_global_unlock(lo, is_loop); loop_global_unlock(lo, is_loop);
if (partscan) if (partscan)

View File

@ -807,6 +807,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
const char *firmware_name) const char *firmware_name)
{ {
struct qca_fw_config config = {}; struct qca_fw_config config = {};
const char *variant = "";
int err; int err;
u8 rom_ver = 0; u8 rom_ver = 0;
u32 soc_ver; u32 soc_ver;
@ -901,13 +902,11 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
case QCA_WCN3990: case QCA_WCN3990:
case QCA_WCN3991: case QCA_WCN3991:
case QCA_WCN3998: case QCA_WCN3998:
if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) { if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID)
snprintf(config.fwname, sizeof(config.fwname), variant = "u";
"qca/crnv%02xu.bin", rom_ver);
} else { snprintf(config.fwname, sizeof(config.fwname),
snprintf(config.fwname, sizeof(config.fwname), "qca/crnv%02x%s.bin", rom_ver, variant);
"qca/crnv%02x.bin", rom_ver);
}
break; break;
case QCA_WCN3988: case QCA_WCN3988:
snprintf(config.fwname, sizeof(config.fwname), snprintf(config.fwname, sizeof(config.fwname),

View File

@ -817,6 +817,8 @@ out_free:
rtl_dev_err(hdev, "mandatory config file %s not found", rtl_dev_err(hdev, "mandatory config file %s not found",
btrtl_dev->ic_info->cfg_name); btrtl_dev->ic_info->cfg_name);
ret = btrtl_dev->cfg_len; ret = btrtl_dev->cfg_len;
if (!ret)
ret = -EINVAL;
goto err_free; goto err_free;
} }
} }

View File

@ -102,7 +102,8 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
if (!skb) { if (!skb) {
percpu_down_read(&hu->proto_lock); percpu_down_read(&hu->proto_lock);
if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) if (test_bit(HCI_UART_PROTO_READY, &hu->flags) ||
test_bit(HCI_UART_PROTO_INIT, &hu->flags))
skb = hu->proto->dequeue(hu); skb = hu->proto->dequeue(hu);
percpu_up_read(&hu->proto_lock); percpu_up_read(&hu->proto_lock);
@ -124,7 +125,8 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
if (!percpu_down_read_trylock(&hu->proto_lock)) if (!percpu_down_read_trylock(&hu->proto_lock))
return 0; return 0;
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
!test_bit(HCI_UART_PROTO_INIT, &hu->flags))
goto no_schedule; goto no_schedule;
set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
@ -278,7 +280,8 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
percpu_down_read(&hu->proto_lock); percpu_down_read(&hu->proto_lock);
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
!test_bit(HCI_UART_PROTO_INIT, &hu->flags)) {
percpu_up_read(&hu->proto_lock); percpu_up_read(&hu->proto_lock);
return -EUNATCH; return -EUNATCH;
} }
@ -582,7 +585,8 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
if (tty != hu->tty) if (tty != hu->tty)
return; return;
if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) if (test_bit(HCI_UART_PROTO_READY, &hu->flags) ||
test_bit(HCI_UART_PROTO_INIT, &hu->flags))
hci_uart_tx_wakeup(hu); hci_uart_tx_wakeup(hu);
} }
@ -608,7 +612,8 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
percpu_down_read(&hu->proto_lock); percpu_down_read(&hu->proto_lock);
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
!test_bit(HCI_UART_PROTO_INIT, &hu->flags)) {
percpu_up_read(&hu->proto_lock); percpu_up_read(&hu->proto_lock);
return; return;
} }
@ -709,12 +714,16 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id)
hu->proto = p; hu->proto = p;
set_bit(HCI_UART_PROTO_INIT, &hu->flags);
err = hci_uart_register_dev(hu); err = hci_uart_register_dev(hu);
if (err) { if (err) {
return err; return err;
} }
set_bit(HCI_UART_PROTO_READY, &hu->flags); set_bit(HCI_UART_PROTO_READY, &hu->flags);
clear_bit(HCI_UART_PROTO_INIT, &hu->flags);
return 0; return 0;
} }

View File

@ -90,6 +90,7 @@ struct hci_uart {
#define HCI_UART_REGISTERED 1 #define HCI_UART_REGISTERED 1
#define HCI_UART_PROTO_READY 2 #define HCI_UART_PROTO_READY 2
#define HCI_UART_NO_SUSPEND_NOTIFIER 3 #define HCI_UART_NO_SUSPEND_NOTIFIER 3
#define HCI_UART_PROTO_INIT 4
/* TX states */ /* TX states */
#define HCI_UART_SENDING 1 #define HCI_UART_SENDING 1

View File

@ -1201,11 +1201,16 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
struct mhi_ring_element *mhi_tre; struct mhi_ring_element *mhi_tre;
struct mhi_buf_info *buf_info; struct mhi_buf_info *buf_info;
int eot, eob, chain, bei; int eot, eob, chain, bei;
int ret; int ret = 0;
/* Protect accesses for reading and incrementing WP */ /* Protect accesses for reading and incrementing WP */
write_lock_bh(&mhi_chan->lock); write_lock_bh(&mhi_chan->lock);
if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
ret = -ENODEV;
goto out;
}
buf_ring = &mhi_chan->buf_ring; buf_ring = &mhi_chan->buf_ring;
tre_ring = &mhi_chan->tre_ring; tre_ring = &mhi_chan->tre_ring;
@ -1223,10 +1228,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
if (!info->pre_mapped) { if (!info->pre_mapped) {
ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
if (ret) { if (ret)
write_unlock_bh(&mhi_chan->lock); goto out;
return ret;
}
} }
eob = !!(flags & MHI_EOB); eob = !!(flags & MHI_EOB);
@ -1243,9 +1246,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
mhi_add_ring_element(mhi_cntrl, tre_ring); mhi_add_ring_element(mhi_cntrl, tre_ring);
mhi_add_ring_element(mhi_cntrl, buf_ring); mhi_add_ring_element(mhi_cntrl, buf_ring);
out:
write_unlock_bh(&mhi_chan->lock); write_unlock_bh(&mhi_chan->lock);
return 0; return ret;
} }
int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,

View File

@ -104,11 +104,10 @@ again:
return 0; return 0;
/* process status changes without irq support */ /* process status changes without irq support */
do { do {
usleep_range(priv->timeout_min, priv->timeout_max);
status = chip->ops->status(chip); status = chip->ops->status(chip);
if ((status & mask) == mask) if ((status & mask) == mask)
return 0; return 0;
usleep_range(priv->timeout_min,
priv->timeout_max);
} while (time_before(jiffies, stop)); } while (time_before(jiffies, stop));
return -ETIME; return -ETIME;
} }
@ -433,7 +432,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
&priv->int_queue, false) < 0) { &priv->int_queue, false) < 0) {
rc = -ETIME; if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags))
rc = -EAGAIN;
else
rc = -ETIME;
goto out_err; goto out_err;
} }
status = tpm_tis_status(chip); status = tpm_tis_status(chip);
@ -450,7 +452,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
&priv->int_queue, false) < 0) { &priv->int_queue, false) < 0) {
rc = -ETIME; if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags))
rc = -EAGAIN;
else
rc = -ETIME;
goto out_err; goto out_err;
} }
status = tpm_tis_status(chip); status = tpm_tis_status(chip);
@ -505,9 +510,11 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
if (rc >= 0) if (rc >= 0)
/* Data transfer done successfully */ /* Data transfer done successfully */
break; break;
else if (rc != -EIO) else if (rc != -EAGAIN && rc != -EIO)
/* Data transfer failed, not recoverable */ /* Data transfer failed, not recoverable */
return rc; return rc;
usleep_range(priv->timeout_min, priv->timeout_max);
} }
rc = tpm_tis_verify_crc(priv, len, buf); rc = tpm_tis_verify_crc(priv, len, buf);
@ -1044,6 +1051,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
priv->timeout_max = TIS_TIMEOUT_MAX_ATML; priv->timeout_max = TIS_TIMEOUT_MAX_ATML;
} }
if (priv->manufacturer_id == TPM_VID_IFX)
set_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags);
if (is_bsw()) { if (is_bsw()) {
priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR, priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
ILB_REMAP_SIZE); ILB_REMAP_SIZE);

View File

@ -88,6 +88,7 @@ enum tpm_tis_flags {
TPM_TIS_INVALID_STATUS = 1, TPM_TIS_INVALID_STATUS = 1,
TPM_TIS_DEFAULT_CANCELLATION = 2, TPM_TIS_DEFAULT_CANCELLATION = 2,
TPM_TIS_IRQ_TESTED = 3, TPM_TIS_IRQ_TESTED = 3,
TPM_TIS_STATUS_VALID_RETRY = 4,
}; };
struct tpm_tis_data { struct tpm_tis_data {

View File

@ -290,6 +290,9 @@ static int gdsc_enable(struct generic_pm_domain *domain)
*/ */
udelay(1); udelay(1);
if (sc->flags & RETAIN_FF_ENABLE)
gdsc_retain_ff_on(sc);
/* Turn on HW trigger mode if supported */ /* Turn on HW trigger mode if supported */
if (sc->flags & HW_CTRL) { if (sc->flags & HW_CTRL) {
ret = gdsc_hwctrl(sc, true); ret = gdsc_hwctrl(sc, true);
@ -306,9 +309,6 @@ static int gdsc_enable(struct generic_pm_domain *domain)
udelay(1); udelay(1);
} }
if (sc->flags & RETAIN_FF_ENABLE)
gdsc_retain_ff_on(sc);
return 0; return 0;
} }
@ -418,13 +418,6 @@ static int gdsc_init(struct gdsc *sc)
goto err_disable_supply; goto err_disable_supply;
} }
/* Turn on HW trigger mode if supported */
if (sc->flags & HW_CTRL) {
ret = gdsc_hwctrl(sc, true);
if (ret < 0)
goto err_disable_supply;
}
/* /*
* Make sure the retain bit is set if the GDSC is already on, * Make sure the retain bit is set if the GDSC is already on,
* otherwise we end up turning off the GDSC and destroying all * otherwise we end up turning off the GDSC and destroying all
@ -432,6 +425,14 @@ static int gdsc_init(struct gdsc *sc)
*/ */
if (sc->flags & RETAIN_FF_ENABLE) if (sc->flags & RETAIN_FF_ENABLE)
gdsc_retain_ff_on(sc); gdsc_retain_ff_on(sc);
/* Turn on HW trigger mode if supported */
if (sc->flags & HW_CTRL) {
ret = gdsc_hwctrl(sc, true);
if (ret < 0)
goto err_disable_supply;
}
} else if (sc->flags & ALWAYS_ON) { } else if (sc->flags & ALWAYS_ON) {
/* If ALWAYS_ON GDSCs are not ON, turn them ON */ /* If ALWAYS_ON GDSCs are not ON, turn them ON */
gdsc_enable(&sc->pd); gdsc_enable(&sc->pd);
@ -463,6 +464,23 @@ err_disable_supply:
return ret; return ret;
} }
static void gdsc_pm_subdomain_remove(struct gdsc_desc *desc, size_t num)
{
struct device *dev = desc->dev;
struct gdsc **scs = desc->scs;
int i;
/* Remove subdomains */
for (i = num - 1; i >= 0; i--) {
if (!scs[i])
continue;
if (scs[i]->parent)
pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
else if (!IS_ERR_OR_NULL(dev->pm_domain))
pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
}
}
int gdsc_register(struct gdsc_desc *desc, int gdsc_register(struct gdsc_desc *desc,
struct reset_controller_dev *rcdev, struct regmap *regmap) struct reset_controller_dev *rcdev, struct regmap *regmap)
{ {
@ -507,30 +525,27 @@ int gdsc_register(struct gdsc_desc *desc,
if (!scs[i]) if (!scs[i])
continue; continue;
if (scs[i]->parent) if (scs[i]->parent)
pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd); ret = pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
else if (!IS_ERR_OR_NULL(dev->pm_domain)) else if (!IS_ERR_OR_NULL(dev->pm_domain))
pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd); ret = pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
if (ret)
goto err_pm_subdomain_remove;
} }
return of_genpd_add_provider_onecell(dev->of_node, data); return of_genpd_add_provider_onecell(dev->of_node, data);
err_pm_subdomain_remove:
gdsc_pm_subdomain_remove(desc, i);
return ret;
} }
void gdsc_unregister(struct gdsc_desc *desc) void gdsc_unregister(struct gdsc_desc *desc)
{ {
int i;
struct device *dev = desc->dev; struct device *dev = desc->dev;
struct gdsc **scs = desc->scs;
size_t num = desc->num; size_t num = desc->num;
/* Remove subdomains */ gdsc_pm_subdomain_remove(desc, num);
for (i = 0; i < num; i++) {
if (!scs[i])
continue;
if (scs[i]->parent)
pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
else if (!IS_ERR_OR_NULL(dev->pm_domain))
pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
}
of_genpd_del_provider(dev->of_node); of_genpd_del_provider(dev->of_node);
} }

View File

@ -168,9 +168,7 @@ static int stm32_clkevent_lp_probe(struct platform_device *pdev)
} }
if (of_property_read_bool(pdev->dev.parent->of_node, "wakeup-source")) { if (of_property_read_bool(pdev->dev.parent->of_node, "wakeup-source")) {
ret = device_init_wakeup(&pdev->dev, true); device_set_wakeup_capable(&pdev->dev, true);
if (ret)
goto out_clk_disable;
ret = dev_pm_set_wake_irq(&pdev->dev, irq); ret = dev_pm_set_wake_irq(&pdev->dev, irq);
if (ret) if (ret)

View File

@ -2673,10 +2673,18 @@ EXPORT_SYMBOL(cpufreq_update_policy);
*/ */
void cpufreq_update_limits(unsigned int cpu) void cpufreq_update_limits(unsigned int cpu)
{ {
struct cpufreq_policy *policy;
policy = cpufreq_cpu_get(cpu);
if (!policy)
return;
if (cpufreq_driver->update_limits) if (cpufreq_driver->update_limits)
cpufreq_driver->update_limits(cpu); cpufreq_driver->update_limits(cpu);
else else
cpufreq_update_policy(cpu); cpufreq_update_policy(cpu);
cpufreq_cpu_put(policy);
} }
EXPORT_SYMBOL_GPL(cpufreq_update_limits); EXPORT_SYMBOL_GPL(cpufreq_update_limits);

View File

@ -115,12 +115,12 @@ int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
qm_fd_addr_set64(&fd, addr); qm_fd_addr_set64(&fd, addr);
do { do {
refcount_inc(&req->drv_ctx->refcnt);
ret = qman_enqueue(req->drv_ctx->req_fq, &fd); ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
if (likely(!ret)) { if (likely(!ret))
refcount_inc(&req->drv_ctx->refcnt);
return 0; return 0;
}
refcount_dec(&req->drv_ctx->refcnt);
if (ret != -EBUSY) if (ret != -EBUSY)
break; break;
num_retries++; num_retries++;

View File

@ -179,14 +179,17 @@ static bool sp_pci_is_master(struct sp_device *sp)
pdev_new = to_pci_dev(dev_new); pdev_new = to_pci_dev(dev_new);
pdev_cur = to_pci_dev(dev_cur); pdev_cur = to_pci_dev(dev_cur);
if (pdev_new->bus->number < pdev_cur->bus->number) if (pci_domain_nr(pdev_new->bus) != pci_domain_nr(pdev_cur->bus))
return true; return pci_domain_nr(pdev_new->bus) < pci_domain_nr(pdev_cur->bus);
if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn)) if (pdev_new->bus->number != pdev_cur->bus->number)
return true; return pdev_new->bus->number < pdev_cur->bus->number;
if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn)) if (PCI_SLOT(pdev_new->devfn) != PCI_SLOT(pdev_cur->devfn))
return true; return PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn);
if (PCI_FUNC(pdev_new->devfn) != PCI_FUNC(pdev_cur->devfn))
return PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn);
return false; return false;
} }

View File

@ -753,6 +753,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
struct gpio_irq_chip *irq; struct gpio_irq_chip *irq;
struct tegra_gpio *gpio; struct tegra_gpio *gpio;
struct device_node *np; struct device_node *np;
struct resource *res;
char **names; char **names;
int err; int err;
@ -772,19 +773,19 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->num_banks++; gpio->num_banks++;
/* get register apertures */ /* get register apertures */
gpio->secure = devm_platform_ioremap_resource_byname(pdev, "security"); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "security");
if (IS_ERR(gpio->secure)) { if (!res)
gpio->secure = devm_platform_ioremap_resource(pdev, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (IS_ERR(gpio->secure)) gpio->secure = devm_ioremap_resource(&pdev->dev, res);
return PTR_ERR(gpio->secure); if (IS_ERR(gpio->secure))
} return PTR_ERR(gpio->secure);
gpio->base = devm_platform_ioremap_resource_byname(pdev, "gpio"); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpio");
if (IS_ERR(gpio->base)) { if (!res)
gpio->base = devm_platform_ioremap_resource(pdev, 1); res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (IS_ERR(gpio->base)) gpio->base = devm_ioremap_resource(&pdev->dev, res);
return PTR_ERR(gpio->base); if (IS_ERR(gpio->base))
} return PTR_ERR(gpio->base);
err = platform_irq_count(pdev); err = platform_irq_count(pdev);
if (err < 0) if (err < 0)

View File

@ -1012,6 +1012,7 @@ static int zynq_gpio_remove(struct platform_device *pdev)
ret = pm_runtime_get_sync(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) if (ret < 0)
dev_warn(&pdev->dev, "pm_runtime_get_sync() Failed\n"); dev_warn(&pdev->dev, "pm_runtime_get_sync() Failed\n");
device_init_wakeup(&pdev->dev, 0);
gpiochip_remove(&gpio->chip); gpiochip_remove(&gpio->chip);
clk_disable_unprepare(gpio->clk); clk_disable_unprepare(gpio->clk);
device_set_wakeup_capable(&pdev->dev, 0); device_set_wakeup_capable(&pdev->dev, 0);

View File

@ -6186,6 +6186,7 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
{ {
struct dma_fence *old = NULL; struct dma_fence *old = NULL;
dma_fence_get(gang);
do { do {
dma_fence_put(old); dma_fence_put(old);
rcu_read_lock(); rcu_read_lock();
@ -6195,12 +6196,19 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
if (old == gang) if (old == gang)
break; break;
if (!dma_fence_is_signaled(old)) if (!dma_fence_is_signaled(old)) {
dma_fence_put(gang);
return old; return old;
}
} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit, } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
old, gang) != old); old, gang) != old);
/*
* Drop it once for the exchanged reference in adev and once for the
* thread local reference acquired in amdgpu_device_get_gang().
*/
dma_fence_put(old);
dma_fence_put(old); dma_fence_put(old);
return NULL; return NULL;
} }

View File

@ -210,7 +210,7 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt, struct sg_table *sgt,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
if (sgt->sgl->page_link) { if (sg_page(sgt->sgl)) {
dma_unmap_sgtable(attach->dev, sgt, dir, 0); dma_unmap_sgtable(attach->dev, sgt, dir, 0);
sg_free_table(sgt); sg_free_table(sgt);
kfree(sgt); kfree(sgt);

View File

@ -1662,7 +1662,6 @@ static const u16 amdgpu_unsupported_pciidlist[] = {
}; };
static const struct pci_device_id pciidlist[] = { static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
{0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@ -1735,8 +1734,6 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
{0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
{0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */ /* Kaveri */
{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
@ -1819,7 +1816,6 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
#endif
/* topaz */ /* topaz */
{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
@ -2099,14 +2095,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENOTSUPP; return -ENOTSUPP;
} }
switch (flags & AMD_ASIC_MASK) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
#ifdef CONFIG_DRM_AMDGPU_SI #ifdef CONFIG_DRM_AMDGPU_SI
if (!amdgpu_si_support) { if (!amdgpu_si_support) {
switch (flags & AMD_ASIC_MASK) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
dev_info(&pdev->dev, dev_info(&pdev->dev,
"SI support provided by radeon.\n"); "SI support provided by radeon.\n");
dev_info(&pdev->dev, dev_info(&pdev->dev,
@ -2114,16 +2110,18 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
); );
return -ENODEV; return -ENODEV;
} }
} break;
#else
dev_info(&pdev->dev, "amdgpu is built without SI support.\n");
return -ENODEV;
#endif #endif
case CHIP_KAVERI:
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KABINI:
case CHIP_MULLINS:
#ifdef CONFIG_DRM_AMDGPU_CIK #ifdef CONFIG_DRM_AMDGPU_CIK
if (!amdgpu_cik_support) { if (!amdgpu_cik_support) {
switch (flags & AMD_ASIC_MASK) {
case CHIP_KAVERI:
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KABINI:
case CHIP_MULLINS:
dev_info(&pdev->dev, dev_info(&pdev->dev,
"CIK support provided by radeon.\n"); "CIK support provided by radeon.\n");
dev_info(&pdev->dev, dev_info(&pdev->dev,
@ -2131,8 +2129,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
); );
return -ENODEV; return -ENODEV;
} }
} break;
#else
dev_info(&pdev->dev, "amdgpu is built without CIK support.\n");
return -ENODEV;
#endif #endif
default:
break;
}
adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev); adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
if (IS_ERR(adev)) if (IS_ERR(adev))

View File

@ -208,6 +208,11 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
return -EINVAL; return -EINVAL;
} }
if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) {
args->ring_size = KFD_MIN_QUEUE_RING_SIZE;
pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE");
}
if (!access_ok((const void __user *) args->read_pointer_address, if (!access_ok((const void __user *) args->read_pointer_address,
sizeof(uint32_t))) { sizeof(uint32_t))) {
pr_err("Can't access read pointer\n"); pr_err("Can't access read pointer\n");
@ -464,6 +469,11 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
return -EINVAL; return -EINVAL;
} }
if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) {
args->ring_size = KFD_MIN_QUEUE_RING_SIZE;
pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE");
}
properties.queue_address = args->ring_base_address; properties.queue_address = args->ring_base_address;
properties.queue_size = args->ring_size; properties.queue_size = args->ring_size;
properties.queue_percent = args->queue_percentage; properties.queue_percent = args->queue_percentage;

View File

@ -36,6 +36,7 @@
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include "amdgpu_amdkfd.h" #include "amdgpu_amdkfd.h"
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_reset.h"
struct mm_struct; struct mm_struct;
@ -1114,6 +1115,17 @@ static void kfd_process_remove_sysfs(struct kfd_process *p)
p->kobj = NULL; p->kobj = NULL;
} }
/*
* If any GPU is ongoing reset, wait for reset complete.
*/
static void kfd_process_wait_gpu_reset_complete(struct kfd_process *p)
{
int i;
for (i = 0; i < p->n_pdds; i++)
flush_workqueue(p->pdds[i]->dev->adev->reset_domain->wq);
}
/* No process locking is needed in this function, because the process /* No process locking is needed in this function, because the process
* is not findable any more. We must assume that no other thread is * is not findable any more. We must assume that no other thread is
* using it any more, otherwise we couldn't safely free the process * using it any more, otherwise we couldn't safely free the process
@ -1127,6 +1139,11 @@ static void kfd_process_wq_release(struct work_struct *work)
kfd_process_dequeue_from_all_devices(p); kfd_process_dequeue_from_all_devices(p);
pqm_uninit(&p->pqm); pqm_uninit(&p->pqm);
/*
* If GPU in reset, user queues may still running, wait for reset complete.
*/
kfd_process_wait_gpu_reset_complete(p);
/* Signal the eviction fence after user mode queues are /* Signal the eviction fence after user mode queues are
* destroyed. This allows any BOs to be freed without * destroyed. This allows any BOs to be freed without
* triggering pointless evictions or waiting for fences. * triggering pointless evictions or waiting for fences.

View File

@ -429,7 +429,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n", pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
pqm->process->pasid, pqm->process->pasid,
pqn->q->properties.queue_id, retval); pqn->q->properties.queue_id, retval);
if (retval != -ETIME) if (retval != -ETIME && retval != -EIO)
goto err_destroy_queue; goto err_destroy_queue;
} }

View File

@ -4499,17 +4499,17 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
} }
} }
if (link_cnt > (MAX_PIPES * 2)) {
DRM_ERROR(
"KMS: Cannot support more than %d display indexes\n",
MAX_PIPES * 2);
goto fail;
}
/* loops over all connectors on the board */ /* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) { for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL; struct dc_link *link = NULL;
if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
DRM_ERROR(
"KMS: Cannot support more than %d display indexes\n",
AMDGPU_DM_MAX_DISPLAY_INDEX);
continue;
}
aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
if (!aconnector) if (!aconnector)
goto fail; goto fail;

View File

@ -1947,20 +1947,11 @@ static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
dc->hwss.get_position(&pipe_ctx, 1, &position); dc->hwss.get_position(&pipe_ctx, 1, &position);
vpos = position.vertical_count; vpos = position.vertical_count;
/* Avoid wraparound calculation issues */
vupdate_start += stream->timing.v_total;
vupdate_end += stream->timing.v_total;
vpos += stream->timing.v_total;
if (vpos <= vupdate_start) { if (vpos <= vupdate_start) {
/* VPOS is in VACTIVE or back porch. */ /* VPOS is in VACTIVE or back porch. */
lines_to_vupdate = vupdate_start - vpos; lines_to_vupdate = vupdate_start - vpos;
} else if (vpos > vupdate_end) {
/* VPOS is in the front porch. */
return;
} else { } else {
/* VPOS is in VUPDATE. */ lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
lines_to_vupdate = 0;
} }
/* Calculate time until VUPDATE in microseconds. */ /* Calculate time until VUPDATE in microseconds. */
@ -1968,13 +1959,18 @@ static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz; stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
us_to_vupdate = lines_to_vupdate * us_per_line; us_to_vupdate = lines_to_vupdate * us_per_line;
/* Stall out until the cursor update completes. */
if (vupdate_end < vupdate_start)
vupdate_end += stream->timing.v_total;
/* Position is in the range of vupdate start and end*/
if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
us_to_vupdate = 0;
/* 70 us is a conservative estimate of cursor update time*/ /* 70 us is a conservative estimate of cursor update time*/
if (us_to_vupdate > 70) if (us_to_vupdate > 70)
return; return;
/* Stall out until the cursor update completes. */
if (vupdate_end < vupdate_start)
vupdate_end += stream->timing.v_total;
us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line; us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
udelay(us_to_vupdate + us_vupdate); udelay(us_to_vupdate + us_vupdate);
} }

View File

@ -44,7 +44,7 @@ void hubp31_set_unbounded_requesting(struct hubp *hubp, bool enable)
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(DCHUBP_CNTL, HUBP_UNBOUNDED_REQ_MODE, enable); REG_UPDATE(DCHUBP_CNTL, HUBP_UNBOUNDED_REQ_MODE, enable);
REG_UPDATE(CURSOR_CONTROL, CURSOR_REQ_MODE, enable); REG_UPDATE(CURSOR_CONTROL, CURSOR_REQ_MODE, 1);
} }
void hubp31_soft_reset(struct hubp *hubp, bool reset) void hubp31_soft_reset(struct hubp *hubp, bool reset)

View File

@ -51,6 +51,11 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
hwmgr->adev = adev; hwmgr->adev = adev;
hwmgr->not_vf = !amdgpu_sriov_vf(adev); hwmgr->not_vf = !amdgpu_sriov_vf(adev);
hwmgr->device = amdgpu_cgs_create_device(adev); hwmgr->device = amdgpu_cgs_create_device(adev);
if (!hwmgr->device) {
kfree(hwmgr);
return -ENOMEM;
}
mutex_init(&hwmgr->msg_lock); mutex_init(&hwmgr->msg_lock);
hwmgr->chip_family = adev->family; hwmgr->chip_family = adev->family;
hwmgr->chip_id = adev->asic_type; hwmgr->chip_id = adev->asic_type;

View File

@ -267,10 +267,10 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
if (hwmgr->thermal_controller.fanInfo.bNoFan || if (hwmgr->thermal_controller.fanInfo.bNoFan ||
(hwmgr->thermal_controller.fanInfo. (hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution == 0) || ucTachometerPulsesPerRevolution == 0) ||
speed == 0 || (!speed || speed > UINT_MAX/8) ||
(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
return 0; return -EINVAL;
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
smu7_fan_ctrl_stop_smc_fan_control(hwmgr); smu7_fan_ctrl_stop_smc_fan_control(hwmgr);

View File

@ -307,10 +307,10 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
int result = 0; int result = 0;
if (hwmgr->thermal_controller.fanInfo.bNoFan || if (hwmgr->thermal_controller.fanInfo.bNoFan ||
speed == 0 || (!speed || speed > UINT_MAX/8) ||
(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
return -1; return -EINVAL;
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr); result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);

View File

@ -191,7 +191,7 @@ int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
uint32_t tach_period, crystal_clock_freq; uint32_t tach_period, crystal_clock_freq;
int result = 0; int result = 0;
if (!speed) if (!speed || speed > UINT_MAX/8)
return -EINVAL; return -EINVAL;
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) { if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {

View File

@ -1273,6 +1273,9 @@ static int arcturus_set_fan_speed_rpm(struct smu_context *smu,
uint32_t crystal_clock_freq = 2500; uint32_t crystal_clock_freq = 2500;
uint32_t tach_period; uint32_t tach_period;
if (!speed || speed > UINT_MAX/8)
return -EINVAL;
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
WREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT, WREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT,
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT), REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT),

View File

@ -1228,7 +1228,7 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t crystal_clock_freq = 2500; uint32_t crystal_clock_freq = 2500;
uint32_t tach_period; uint32_t tach_period;
if (speed == 0) if (!speed || speed > UINT_MAX/8)
return -EINVAL; return -EINVAL;
/* /*
* To prevent from possible overheat, some ASICs may have requirement * To prevent from possible overheat, some ASICs may have requirement

View File

@ -1265,7 +1265,7 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t tach_period; uint32_t tach_period;
int ret; int ret;
if (!speed) if (!speed || speed > UINT_MAX/8)
return -EINVAL; return -EINVAL;
ret = smu_v13_0_auto_fan_control(smu, 0); ret = smu_v13_0_auto_fan_control(smu, 0);

View File

@ -1389,7 +1389,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
mode = &new_crtc_state->mode; mode = &new_crtc_state->mode;
adjusted_mode = &new_crtc_state->adjusted_mode; adjusted_mode = &new_crtc_state->adjusted_mode;
if (!new_crtc_state->mode_changed) if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
continue; continue;
drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n", drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n",

View File

@ -49,7 +49,7 @@ static LIST_HEAD(panel_list);
* @dev: parent device of the panel * @dev: parent device of the panel
* @funcs: panel operations * @funcs: panel operations
* @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to * @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to
* the panel interface * the panel interface (must NOT be DRM_MODE_CONNECTOR_Unknown)
* *
* Initialize the panel structure for subsequent registration with * Initialize the panel structure for subsequent registration with
* drm_panel_add(). * drm_panel_add().
@ -57,6 +57,9 @@ static LIST_HEAD(panel_list);
void drm_panel_init(struct drm_panel *panel, struct device *dev, void drm_panel_init(struct drm_panel *panel, struct device *dev,
const struct drm_panel_funcs *funcs, int connector_type) const struct drm_panel_funcs *funcs, int connector_type)
{ {
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
DRM_WARN("%s: %s: a valid connector type is required!\n", __func__, dev_name(dev));
INIT_LIST_HEAD(&panel->list); INIT_LIST_HEAD(&panel->list);
panel->dev = dev; panel->dev = dev;
panel->funcs = funcs; panel->funcs = funcs;

View File

@ -93,6 +93,12 @@ static const struct drm_dmi_panel_orientation_data onegx1_pro = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
}; };
static const struct drm_dmi_panel_orientation_data lcd640x960_leftside_up = {
.width = 640,
.height = 960,
.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
};
static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = { static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
.width = 720, .width = 720,
.height = 1280, .height = 1280,
@ -123,6 +129,12 @@ static const struct drm_dmi_panel_orientation_data lcd1080x1920_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
}; };
static const struct drm_dmi_panel_orientation_data lcd1200x1920_leftside_up = {
.width = 1200,
.height = 1920,
.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
};
static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = { static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
.width = 1200, .width = 1200,
.height = 1920, .height = 1920,
@ -184,10 +196,10 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
}, },
.driver_data = (void *)&lcd800x1280_rightside_up, .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* AYA NEO AYANEO 2 */ }, { /* AYA NEO AYANEO 2/2S */
.matches = { .matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"), DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"), DMI_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"),
}, },
.driver_data = (void *)&lcd1200x1920_rightside_up, .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* AYA NEO 2021 */ }, { /* AYA NEO 2021 */
@ -202,6 +214,18 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "AIR"), DMI_MATCH(DMI_PRODUCT_NAME, "AIR"),
}, },
.driver_data = (void *)&lcd1080x1920_leftside_up, .driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* AYA NEO Flip DS Bottom Screen */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FLIP DS"),
},
.driver_data = (void *)&lcd640x960_leftside_up,
}, { /* AYA NEO Flip KB/DS Top Screen */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
DMI_MATCH(DMI_PRODUCT_NAME, "FLIP"),
},
.driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* AYA NEO Founder */ }, { /* AYA NEO Founder */
.matches = { .matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYA NEO"), DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYA NEO"),
@ -226,6 +250,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_BOARD_NAME, "KUN"), DMI_MATCH(DMI_BOARD_NAME, "KUN"),
}, },
.driver_data = (void *)&lcd1600x2560_rightside_up, .driver_data = (void *)&lcd1600x2560_rightside_up,
}, { /* AYA NEO SLIDE */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
DMI_MATCH(DMI_PRODUCT_NAME, "SLIDE"),
},
.driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* AYN Loki Max */ }, { /* AYN Loki Max */
.matches = { .matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"), DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"),
@ -315,6 +345,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
}, },
.driver_data = (void *)&gpd_win2, .driver_data = (void *)&gpd_win2,
}, { /* GPD Win 2 (correct DMI strings) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "WIN2")
},
.driver_data = (void *)&lcd720x1280_rightside_up,
}, { /* GPD Win 3 */ }, { /* GPD Win 3 */
.matches = { .matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"), DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
@ -443,6 +479,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
}, },
.driver_data = (void *)&lcd1600x2560_leftside_up, .driver_data = (void *)&lcd1600x2560_leftside_up,
}, { /* OneXPlayer Mini (Intel) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONE-NETBOOK TECHNOLOGY CO., LTD."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
},
.driver_data = (void *)&lcd1200x1920_leftside_up,
}, { /* OrangePi Neo */ }, { /* OrangePi Neo */
.matches = { .matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"), DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"),

View File

@ -222,7 +222,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
u8 *buf; u8 *buf;
struct opregion_header *header; struct opregion_header *header;
struct vbt v; struct vbt v;
const char opregion_signature[16] = OPREGION_SIGNATURE;
gvt_dbg_core("init vgpu%d opregion\n", vgpu->id); gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
@ -236,8 +235,10 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
/* emulated opregion with VBT mailbox only */ /* emulated opregion with VBT mailbox only */
buf = (u8 *)vgpu_opregion(vgpu)->va; buf = (u8 *)vgpu_opregion(vgpu)->va;
header = (struct opregion_header *)buf; header = (struct opregion_header *)buf;
memcpy(header->signature, opregion_signature,
sizeof(opregion_signature)); static_assert(sizeof(header->signature) == sizeof(OPREGION_SIGNATURE) - 1);
memcpy(header->signature, OPREGION_SIGNATURE, sizeof(header->signature));
header->size = 0x8; header->size = 0x8;
header->opregion_ver = 0x02000000; header->opregion_ver = 0x02000000;
header->mboxes = MBOX_VBT; header->mboxes = MBOX_VBT;

View File

@ -125,14 +125,14 @@ struct mtk_dpi_yc_limit {
* @is_ck_de_pol: Support CK/DE polarity. * @is_ck_de_pol: Support CK/DE polarity.
* @swap_input_support: Support input swap function. * @swap_input_support: Support input swap function.
* @support_direct_pin: IP supports direct connection to dpi panels. * @support_direct_pin: IP supports direct connection to dpi panels.
* @input_2pixel: Input pixel of dp_intf is 2 pixel per round, so enable this
* config to enable this feature.
* @dimension_mask: Mask used for HWIDTH, HPORCH, VSYNC_WIDTH and VSYNC_PORCH * @dimension_mask: Mask used for HWIDTH, HPORCH, VSYNC_WIDTH and VSYNC_PORCH
* (no shift). * (no shift).
* @hvsize_mask: Mask of HSIZE and VSIZE mask (no shift). * @hvsize_mask: Mask of HSIZE and VSIZE mask (no shift).
* @channel_swap_shift: Shift value of channel swap. * @channel_swap_shift: Shift value of channel swap.
* @yuv422_en_bit: Enable bit of yuv422. * @yuv422_en_bit: Enable bit of yuv422.
* @csc_enable_bit: Enable bit of CSC. * @csc_enable_bit: Enable bit of CSC.
* @input_2p_en_bit: Enable bit for input two pixel per round feature.
* If present, implies that the feature must be enabled.
* @pixels_per_iter: Quantity of transferred pixels per iteration. * @pixels_per_iter: Quantity of transferred pixels per iteration.
*/ */
struct mtk_dpi_conf { struct mtk_dpi_conf {
@ -145,12 +145,12 @@ struct mtk_dpi_conf {
bool is_ck_de_pol; bool is_ck_de_pol;
bool swap_input_support; bool swap_input_support;
bool support_direct_pin; bool support_direct_pin;
bool input_2pixel;
u32 dimension_mask; u32 dimension_mask;
u32 hvsize_mask; u32 hvsize_mask;
u32 channel_swap_shift; u32 channel_swap_shift;
u32 yuv422_en_bit; u32 yuv422_en_bit;
u32 csc_enable_bit; u32 csc_enable_bit;
u32 input_2p_en_bit;
u32 pixels_per_iter; u32 pixels_per_iter;
}; };
@ -463,6 +463,7 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi)
mtk_dpi_disable(dpi); mtk_dpi_disable(dpi);
clk_disable_unprepare(dpi->pixel_clk); clk_disable_unprepare(dpi->pixel_clk);
clk_disable_unprepare(dpi->tvd_clk);
clk_disable_unprepare(dpi->engine_clk); clk_disable_unprepare(dpi->engine_clk);
} }
@ -479,6 +480,12 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
goto err_refcount; goto err_refcount;
} }
ret = clk_prepare_enable(dpi->tvd_clk);
if (ret) {
dev_err(dpi->dev, "Failed to enable tvd pll: %d\n", ret);
goto err_engine;
}
ret = clk_prepare_enable(dpi->pixel_clk); ret = clk_prepare_enable(dpi->pixel_clk);
if (ret) { if (ret) {
dev_err(dpi->dev, "Failed to enable pixel clock: %d\n", ret); dev_err(dpi->dev, "Failed to enable pixel clock: %d\n", ret);
@ -488,6 +495,8 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
return 0; return 0;
err_pixel: err_pixel:
clk_disable_unprepare(dpi->tvd_clk);
err_engine:
clk_disable_unprepare(dpi->engine_clk); clk_disable_unprepare(dpi->engine_clk);
err_refcount: err_refcount:
dpi->refcount--; dpi->refcount--;
@ -602,9 +611,9 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
mtk_dpi_dual_edge(dpi); mtk_dpi_dual_edge(dpi);
mtk_dpi_config_disable_edge(dpi); mtk_dpi_config_disable_edge(dpi);
} }
if (dpi->conf->input_2pixel) { if (dpi->conf->input_2p_en_bit) {
mtk_dpi_mask(dpi, DPI_CON, DPINTF_INPUT_2P_EN, mtk_dpi_mask(dpi, DPI_CON, dpi->conf->input_2p_en_bit,
DPINTF_INPUT_2P_EN); dpi->conf->input_2p_en_bit);
} }
mtk_dpi_sw_reset(dpi, false); mtk_dpi_sw_reset(dpi, false);
@ -952,12 +961,12 @@ static const struct mtk_dpi_conf mt8195_dpintf_conf = {
.output_fmts = mt8195_output_fmts, .output_fmts = mt8195_output_fmts,
.num_output_fmts = ARRAY_SIZE(mt8195_output_fmts), .num_output_fmts = ARRAY_SIZE(mt8195_output_fmts),
.pixels_per_iter = 4, .pixels_per_iter = 4,
.input_2pixel = true,
.dimension_mask = DPINTF_HPW_MASK, .dimension_mask = DPINTF_HPW_MASK,
.hvsize_mask = DPINTF_HSIZE_MASK, .hvsize_mask = DPINTF_HSIZE_MASK,
.channel_swap_shift = DPINTF_CH_SWAP, .channel_swap_shift = DPINTF_CH_SWAP,
.yuv422_en_bit = DPINTF_YUV422_EN, .yuv422_en_bit = DPINTF_YUV422_EN,
.csc_enable_bit = DPINTF_CSC_ENABLE, .csc_enable_bit = DPINTF_CSC_ENABLE,
.input_2p_en_bit = DPINTF_INPUT_2P_EN,
}; };
static int mtk_dpi_probe(struct platform_device *pdev) static int mtk_dpi_probe(struct platform_device *pdev)

View File

@ -1070,50 +1070,51 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
u32 val; u32 val;
int ret;
/* /*
* The GMU may still be in slumber unless the GPU started so check and * GMU firmware's internal power state gets messed up if we send "prepare_slumber" hfi when
* skip putting it back into slumber if so * oob_gpu handshake wasn't done after the last wake up. So do a dummy handshake here when
* required
*/ */
val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); if (adreno_gpu->base.needs_hw_init) {
if (a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET))
goto force_off;
if (val != 0xf) { a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
int ret = a6xx_gmu_wait_for_idle(gmu);
/* If the GMU isn't responding assume it is hung */
if (ret) {
a6xx_gmu_force_off(gmu);
return;
}
a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
/* tell the GMU we want to slumber */
ret = a6xx_gmu_notify_slumber(gmu);
if (ret) {
a6xx_gmu_force_off(gmu);
return;
}
ret = gmu_poll_timeout(gmu,
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
!(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
100, 10000);
/*
* Let the user know we failed to slumber but don't worry too
* much because we are powering down anyway
*/
if (ret)
DRM_DEV_ERROR(gmu->dev,
"Unable to slumber GMU: status = 0%x/0%x\n",
gmu_read(gmu,
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
gmu_read(gmu,
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
} }
ret = a6xx_gmu_wait_for_idle(gmu);
/* If the GMU isn't responding assume it is hung */
if (ret)
goto force_off;
a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
/* tell the GMU we want to slumber */
ret = a6xx_gmu_notify_slumber(gmu);
if (ret)
goto force_off;
ret = gmu_poll_timeout(gmu,
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
!(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
100, 10000);
/*
* Let the user know we failed to slumber but don't worry too
* much because we are powering down anyway
*/
if (ret)
DRM_DEV_ERROR(gmu->dev,
"Unable to slumber GMU: status = 0%x/0%x\n",
gmu_read(gmu,
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
gmu_read(gmu,
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
/* Turn off HFI */ /* Turn off HFI */
a6xx_hfi_stop(gmu); a6xx_hfi_stop(gmu);
@ -1122,6 +1123,11 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
/* Tell RPMh to power off the GPU */ /* Tell RPMh to power off the GPU */
a6xx_rpmh_stop(gmu); a6xx_rpmh_stop(gmu);
return;
force_off:
a6xx_gmu_force_off(gmu);
} }

View File

@ -143,6 +143,9 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
nouveau_bo_del_io_reserve_lru(bo); nouveau_bo_del_io_reserve_lru(bo);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL); nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
if (bo->base.import_attach)
drm_prime_gem_destroy(&bo->base, bo->sg);
/* /*
* If nouveau_bo_new() allocated this buffer, the GEM object was never * If nouveau_bo_new() allocated this buffer, the GEM object was never
* initialized, so don't attempt to release it. * initialized, so don't attempt to release it.

View File

@ -87,9 +87,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
return; return;
} }
if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg);
ttm_bo_put(&nvbo->bo); ttm_bo_put(&nvbo->bo);
pm_runtime_mark_last_busy(dev); pm_runtime_mark_last_busy(dev);

View File

@ -7,8 +7,6 @@ sti-drm-y := \
sti_compositor.o \ sti_compositor.o \
sti_crtc.o \ sti_crtc.o \
sti_plane.o \ sti_plane.o \
sti_crtc.o \
sti_plane.o \
sti_hdmi.o \ sti_hdmi.o \
sti_hdmi_tx3g4c28phy.o \ sti_hdmi_tx3g4c28phy.o \
sti_dvo.o \ sti_dvo.o \

View File

@ -455,7 +455,7 @@ static void repaper_frame_fixed_repeat(struct repaper_epd *epd, u8 fixed_value,
enum repaper_stage stage) enum repaper_stage stage)
{ {
u64 start = local_clock(); u64 start = local_clock();
u64 end = start + (epd->factored_stage_time * 1000 * 1000); u64 end = start + ((u64)epd->factored_stage_time * 1000 * 1000);
do { do {
repaper_frame_fixed(epd, fixed_value, stage); repaper_frame_fixed(epd, fixed_value, stage);
@ -466,7 +466,7 @@ static void repaper_frame_data_repeat(struct repaper_epd *epd, const u8 *image,
const u8 *mask, enum repaper_stage stage) const u8 *mask, enum repaper_stage stage)
{ {
u64 start = local_clock(); u64 start = local_clock();
u64 end = start + (epd->factored_stage_time * 1000 * 1000); u64 end = start + ((u64)epd->factored_stage_time * 1000 * 1000);
do { do {
repaper_frame_data(epd, image, mask, stage); repaper_frame_data(epd, image, mask, stage);

View File

@ -21,6 +21,7 @@
#include "usbhid.h" #include "usbhid.h"
#define PID_EFFECTS_MAX 64 #define PID_EFFECTS_MAX 64
#define PID_INFINITE 0xffff
/* Report usage table used to put reports into an array */ /* Report usage table used to put reports into an array */
@ -261,10 +262,22 @@ static void pidff_set_envelope_report(struct pidff_device *pidff,
static int pidff_needs_set_envelope(struct ff_envelope *envelope, static int pidff_needs_set_envelope(struct ff_envelope *envelope,
struct ff_envelope *old) struct ff_envelope *old)
{ {
return envelope->attack_level != old->attack_level || bool needs_new_envelope;
envelope->fade_level != old->fade_level || needs_new_envelope = envelope->attack_level != 0 ||
envelope->fade_level != 0 ||
envelope->attack_length != 0 ||
envelope->fade_length != 0;
if (!needs_new_envelope)
return false;
if (!old)
return needs_new_envelope;
return envelope->attack_level != old->attack_level ||
envelope->fade_level != old->fade_level ||
envelope->attack_length != old->attack_length || envelope->attack_length != old->attack_length ||
envelope->fade_length != old->fade_length; envelope->fade_length != old->fade_length;
} }
/* /*
@ -301,7 +314,12 @@ static void pidff_set_effect_report(struct pidff_device *pidff,
pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0]; pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
pidff->set_effect_type->value[0] = pidff->set_effect_type->value[0] =
pidff->create_new_effect_type->value[0]; pidff->create_new_effect_type->value[0];
pidff->set_effect[PID_DURATION].value[0] = effect->replay.length;
/* Convert infinite length from Linux API (0)
to PID standard (NULL) if needed */
pidff->set_effect[PID_DURATION].value[0] =
effect->replay.length == 0 ? PID_INFINITE : effect->replay.length;
pidff->set_effect[PID_TRIGGER_BUTTON].value[0] = effect->trigger.button; pidff->set_effect[PID_TRIGGER_BUTTON].value[0] = effect->trigger.button;
pidff->set_effect[PID_TRIGGER_REPEAT_INT].value[0] = pidff->set_effect[PID_TRIGGER_REPEAT_INT].value[0] =
effect->trigger.interval; effect->trigger.interval;
@ -574,11 +592,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
pidff_set_effect_report(pidff, effect); pidff_set_effect_report(pidff, effect);
if (!old || pidff_needs_set_constant(effect, old)) if (!old || pidff_needs_set_constant(effect, old))
pidff_set_constant_force_report(pidff, effect); pidff_set_constant_force_report(pidff, effect);
if (!old || if (pidff_needs_set_envelope(&effect->u.constant.envelope,
pidff_needs_set_envelope(&effect->u.constant.envelope, old ? &old->u.constant.envelope : NULL))
&old->u.constant.envelope)) pidff_set_envelope_report(pidff, &effect->u.constant.envelope);
pidff_set_envelope_report(pidff,
&effect->u.constant.envelope);
break; break;
case FF_PERIODIC: case FF_PERIODIC:
@ -613,11 +629,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
pidff_set_effect_report(pidff, effect); pidff_set_effect_report(pidff, effect);
if (!old || pidff_needs_set_periodic(effect, old)) if (!old || pidff_needs_set_periodic(effect, old))
pidff_set_periodic_report(pidff, effect); pidff_set_periodic_report(pidff, effect);
if (!old || if (pidff_needs_set_envelope(&effect->u.periodic.envelope,
pidff_needs_set_envelope(&effect->u.periodic.envelope, old ? &old->u.periodic.envelope : NULL))
&old->u.periodic.envelope)) pidff_set_envelope_report(pidff, &effect->u.periodic.envelope);
pidff_set_envelope_report(pidff,
&effect->u.periodic.envelope);
break; break;
case FF_RAMP: case FF_RAMP:
@ -631,11 +645,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
pidff_set_effect_report(pidff, effect); pidff_set_effect_report(pidff, effect);
if (!old || pidff_needs_set_ramp(effect, old)) if (!old || pidff_needs_set_ramp(effect, old))
pidff_set_ramp_force_report(pidff, effect); pidff_set_ramp_force_report(pidff, effect);
if (!old || if (pidff_needs_set_envelope(&effect->u.ramp.envelope,
pidff_needs_set_envelope(&effect->u.ramp.envelope, old ? &old->u.ramp.envelope : NULL))
&old->u.ramp.envelope)) pidff_set_envelope_report(pidff, &effect->u.ramp.envelope);
pidff_set_envelope_report(pidff,
&effect->u.ramp.envelope);
break; break;
case FF_SPRING: case FF_SPRING:
@ -758,6 +770,11 @@ static void pidff_set_autocenter(struct input_dev *dev, u16 magnitude)
static int pidff_find_fields(struct pidff_usage *usage, const u8 *table, static int pidff_find_fields(struct pidff_usage *usage, const u8 *table,
struct hid_report *report, int count, int strict) struct hid_report *report, int count, int strict)
{ {
if (!report) {
pr_debug("pidff_find_fields, null report\n");
return -1;
}
int i, j, k, found; int i, j, k, found;
for (k = 0; k < count; k++) { for (k = 0; k < count; k++) {
@ -871,6 +888,11 @@ static int pidff_reports_ok(struct pidff_device *pidff)
static struct hid_field *pidff_find_special_field(struct hid_report *report, static struct hid_field *pidff_find_special_field(struct hid_report *report,
int usage, int enforce_min) int usage, int enforce_min)
{ {
if (!report) {
pr_debug("pidff_find_special_field, null report\n");
return NULL;
}
int i; int i;
for (i = 0; i < report->maxfield; i++) { for (i = 0; i < report->maxfield; i++) {

View File

@ -403,6 +403,7 @@ static void ssip_reset(struct hsi_client *cl)
del_timer(&ssi->rx_wd); del_timer(&ssi->rx_wd);
del_timer(&ssi->tx_wd); del_timer(&ssi->tx_wd);
del_timer(&ssi->keep_alive); del_timer(&ssi->keep_alive);
cancel_work_sync(&ssi->work);
ssi->main_state = 0; ssi->main_state = 0;
ssi->send_state = 0; ssi->send_state = 0;
ssi->recv_state = 0; ssi->recv_state = 0;

View File

@ -247,6 +247,9 @@ static int ec_i2c_probe(struct platform_device *pdev)
u32 remote_bus; u32 remote_bus;
int err; int err;
if (!ec)
return dev_err_probe(dev, -EPROBE_DEFER, "couldn't find parent EC device\n");
if (!ec->cmd_xfer) { if (!ec->cmd_xfer) {
dev_err(dev, "Missing sendrecv\n"); dev_err(dev, "Missing sendrecv\n");
return -EINVAL; return -EINVAL;

View File

@ -2495,6 +2495,9 @@ static void i3c_master_unregister_i3c_devs(struct i3c_master_controller *master)
*/ */
void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot) void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot)
{ {
if (!dev->ibi || !slot)
return;
atomic_inc(&dev->ibi->pending_ibis); atomic_inc(&dev->ibi->pending_ibis);
queue_work(dev->common.master->wq, &slot->work); queue_work(dev->common.master->wq, &slot->work);
} }

View File

@ -368,7 +368,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
slot->len < SVC_I3C_FIFO_SIZE) { slot->len < SVC_I3C_FIFO_SIZE) {
mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL); mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl); count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
readsl(master->regs + SVC_I3C_MRDATAB, buf, count); readsb(master->regs + SVC_I3C_MRDATAB, buf, count);
slot->len += count; slot->len += count;
buf += count; buf += count;
} }

Some files were not shown because too many files have changed in this diff Show More