Add the mitigation logic for Transient Scheduler Attacks (TSA)

TSA are new aspeculative side channel attacks related to the execution
 timing of instructions under specific microarchitectural conditions. In
 some cases, an attacker may be able to use this timing information to
 infer data from other contexts, resulting in information leakage.
 
 Add the usual controls of the mitigation and integrate it into the
 existing speculation bugs infrastructure in the kernel.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmhSsvQACgkQEsHwGGHe
 VUrWNw//V+ZabYq3Nnvh4jEe6Altobnpn8bOIWmcBx6I3xuuArb9bLqcbKerDIcC
 POVVW6zrdNigDe/U4aqaJXE7qCRX55uTYbhp8OLH0zzqX3Pjl/hUnEXWtMtlXj/G
 CIM5mqjqEFp5JRGXetdjjuvjG1IPf+CbjKqj2WXbi//T6F3LiAFxkzdUhd+clBF/
 ztWchjwUmqU0WJd6+Smb8ZnvWrLoZuOFldjhFad820B7fqkdJhzjHMmwBHJKUEZu
 oABv8B0/4IALrx6LenCspWS4OuTOGG7DKyIgzitByXygXXb4L3ZUKpuqkxBU7hFx
 bscwtOP7e5HIYAekx6ZSLZoZpYQXr1iH0aRGrjwapi3ASIpUwI0UA9ck2PdGo0IY
 0GvmN0vbybskewBQyG819BM+DCau5pOLWuL7cYmaD2eTNoOHOknMDNlO8VzXqJxa
 NnignSuEWFm2vNV1FXEav2YbVjlanV6JleiPDGBe5Xd9dnxZTvg9HuP2NkYio4dZ
 mb/kEU/kTcN8nWh0Q96tX45kmj0vCbBgrSQkmUpyAugp38n69D1tp3ii9D/hyQFH
 hKGcFC9m+rYVx1NLyAxhTGxaEqF801d5Qawwud8HsnQudTpCdSXD9fcBg9aCbWEa
 FymtDpIeUQrFAjDpVEp6Syh3odKvLXsGEzL+DVvqKDuA8r6DxFo=
 =2cLl
 -----END PGP SIGNATURE-----

Merge tag 'tsa_x86_bugs_for_6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull CPU speculation fixes from Borislav Petkov:
 "Add the mitigation logic for Transient Scheduler Attacks (TSA)

  TSA are new aspeculative side channel attacks related to the execution
  timing of instructions under specific microarchitectural conditions.
  In some cases, an attacker may be able to use this timing information
  to infer data from other contexts, resulting in information leakage.

  Add the usual controls of the mitigation and integrate it into the
  existing speculation bugs infrastructure in the kernel"

* tag 'tsa_x86_bugs_for_6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/process: Move the buffer clearing before MONITOR
  x86/microcode/AMD: Add TSA microcode SHAs
  KVM: SVM: Advertise TSA CPUID bits to guests
  x86/bugs: Add a Transient Scheduler Attacks mitigation
  x86/bugs: Rename MDS machinery to something more generic
This commit is contained in:
Linus Torvalds 2025-07-07 17:08:36 -07:00
commit 6e9128ff9d
23 changed files with 418 additions and 53 deletions

View File

@ -584,6 +584,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/spectre_v1
/sys/devices/system/cpu/vulnerabilities/spectre_v2
/sys/devices/system/cpu/vulnerabilities/srbds
/sys/devices/system/cpu/vulnerabilities/tsa
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>

View File

@ -157,9 +157,7 @@ This is achieved by using the otherwise unused and obsolete VERW instruction in
combination with a microcode update. The microcode clears the affected CPU
buffers when the VERW instruction is executed.
Kernel reuses the MDS function to invoke the buffer clearing:
mds_clear_cpu_buffers()
Kernel does the buffer clearing with x86_clear_cpu_buffers().
On MDS affected CPUs, the kernel already invokes CPU buffer clear on
kernel/userspace, hypervisor/guest and C-state (idle) transitions. No

View File

@ -7488,6 +7488,19 @@
having this key zero'ed is acceptable. E.g. in testing
scenarios.
tsa= [X86] Control mitigation for Transient Scheduler
Attacks on AMD CPUs. Search the following in your
favourite search engine for more details:
"Technical guidance for mitigating transient scheduler
attacks".
off - disable the mitigation
on - enable the mitigation (default)
user - mitigate only user/kernel transitions
vm - mitigate only guest/host transitions
tsc= Disable clocksource stability checks for TSC.
Format: <string>
[x86] reliable: mark tsc clocksource as reliable, this

View File

@ -93,7 +93,7 @@ enters a C-state.
The kernel provides a function to invoke the buffer clearing:
mds_clear_cpu_buffers()
x86_clear_cpu_buffers()
Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
Other than CFLAGS.ZF, this macro doesn't clobber any registers.
@ -185,9 +185,9 @@ Mitigation points
idle clearing would be a window dressing exercise and is therefore not
activated.
The invocation is controlled by the static key mds_idle_clear which is
switched depending on the chosen mitigation mode and the SMT state of
the system.
The invocation is controlled by the static key cpu_buf_idle_clear which is
switched depending on the chosen mitigation mode and the SMT state of the
system.
The buffer clear is only invoked before entering the C-State to prevent
that stale data from the idling CPU from spilling to the Hyper-Thread

View File

@ -2695,6 +2695,15 @@ config MITIGATION_ITS
disabled, mitigation cannot be enabled via cmdline.
See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
config MITIGATION_TSA
bool "Mitigate Transient Scheduler Attacks"
depends on CPU_SUP_AMD
default y
help
Enable mitigation for Transient Scheduler Attacks. TSA is a hardware
security vulnerability on AMD CPUs which can lead to forwarding of
invalid info to subsequent instructions and thus can affect their
timing and thereby cause a leakage.
endif
config ARCH_HAS_ADD_PAGES

View File

@ -36,20 +36,20 @@ EXPORT_SYMBOL_GPL(write_ibpb);
/*
* Define the VERW operand that is disguised as entry code so that
* it can be referenced with KPTI enabled. This ensure VERW can be
* it can be referenced with KPTI enabled. This ensures VERW can be
* used late in exit-to-user path after page tables are switched.
*/
.pushsection .entry.text, "ax"
.align L1_CACHE_BYTES, 0xcc
SYM_CODE_START_NOALIGN(mds_verw_sel)
SYM_CODE_START_NOALIGN(x86_verw_sel)
UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR
.word __KERNEL_DS
.align L1_CACHE_BYTES, 0xcc
SYM_CODE_END(mds_verw_sel);
SYM_CODE_END(x86_verw_sel);
/* For KVM */
EXPORT_SYMBOL_GPL(mds_verw_sel);
EXPORT_SYMBOL_GPL(x86_verw_sel);
.popsection

View File

@ -456,6 +456,7 @@
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */
#define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */
#define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* LFENCE always serializing / synchronizes RDTSC */
#define X86_FEATURE_VERW_CLEAR (20*32+ 5) /* The memory form of VERW mitigates TSA */
#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* Null Selector Clears Base */
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* Automatic IBRS */
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* SMM_CTL MSR is not present */
@ -487,6 +488,9 @@
#define X86_FEATURE_PREFER_YMM (21*32+ 8) /* Avoid ZMM registers due to downclocking */
#define X86_FEATURE_APX (21*32+ 9) /* Advanced Performance Extensions */
#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32+10) /* Use thunk for indirect branches in lower half of cacheline */
#define X86_FEATURE_TSA_SQ_NO (21*32+11) /* AMD CPU not vulnerable to TSA-SQ */
#define X86_FEATURE_TSA_L1_NO (21*32+12) /* AMD CPU not vulnerable to TSA-L1 */
#define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* Clear CPU buffers using VERW before VMRUN */
/*
* BUG word(s)
@ -542,5 +546,5 @@
#define X86_BUG_OLD_MICROCODE X86_BUG( 1*32+ 6) /* "old_microcode" CPU has old microcode, it is surely vulnerable to something */
#define X86_BUG_ITS X86_BUG( 1*32+ 7) /* "its" CPU is affected by Indirect Target Selection */
#define X86_BUG_ITS_NATIVE_ONLY X86_BUG( 1*32+ 8) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
#define X86_BUG_TSA X86_BUG( 1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
#endif /* _ASM_X86_CPUFEATURES_H */

View File

@ -44,13 +44,13 @@ static __always_inline void native_irq_enable(void)
static __always_inline void native_safe_halt(void)
{
mds_idle_clear_cpu_buffers();
x86_idle_clear_cpu_buffers();
asm volatile("sti; hlt": : :"memory");
}
static __always_inline void native_halt(void)
{
mds_idle_clear_cpu_buffers();
x86_idle_clear_cpu_buffers();
asm volatile("hlt": : :"memory");
}

View File

@ -764,6 +764,7 @@ enum kvm_only_cpuid_leafs {
CPUID_8000_0022_EAX,
CPUID_7_2_EDX,
CPUID_24_0_EBX,
CPUID_8000_0021_ECX,
NR_KVM_CPU_CAPS,
NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,

View File

@ -43,8 +43,6 @@ static __always_inline void __monitorx(const void *eax, u32 ecx, u32 edx)
static __always_inline void __mwait(u32 eax, u32 ecx)
{
mds_idle_clear_cpu_buffers();
/*
* Use the instruction mnemonic with implicit operands, as the LLVM
* assembler fails to assemble the mnemonic with explicit operands:
@ -80,7 +78,7 @@ static __always_inline void __mwait(u32 eax, u32 ecx)
*/
static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx)
{
/* No MDS buffer clear as this is AMD/HYGON only */
/* No need for TSA buffer clearing on AMD */
/* "mwaitx %eax, %ebx, %ecx" */
asm volatile(".byte 0x0f, 0x01, 0xfb"
@ -98,7 +96,6 @@ static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx)
*/
static __always_inline void __sti_mwait(u32 eax, u32 ecx)
{
mds_idle_clear_cpu_buffers();
asm volatile("sti; mwait" :: "a" (eax), "c" (ecx));
}
@ -115,21 +112,29 @@ static __always_inline void __sti_mwait(u32 eax, u32 ecx)
*/
static __always_inline void mwait_idle_with_hints(u32 eax, u32 ecx)
{
if (need_resched())
return;
x86_idle_clear_cpu_buffers();
if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
const void *addr = &current_thread_info()->flags;
alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
__monitor(addr, 0, 0);
if (!need_resched()) {
if (ecx & 1) {
__mwait(eax, ecx);
} else {
__sti_mwait(eax, ecx);
raw_local_irq_disable();
}
if (need_resched())
goto out;
if (ecx & 1) {
__mwait(eax, ecx);
} else {
__sti_mwait(eax, ecx);
raw_local_irq_disable();
}
}
out:
current_clr_polling();
}

View File

@ -302,25 +302,31 @@
.endm
/*
* Macro to execute VERW instruction that mitigate transient data sampling
* attacks such as MDS. On affected systems a microcode update overloaded VERW
* instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
*
* Macro to execute VERW insns that mitigate transient data sampling
* attacks such as MDS or TSA. On affected systems a microcode update
* overloaded VERW insns to also clear the CPU buffers. VERW clobbers
* CFLAGS.ZF.
* Note: Only the memory operand variant of VERW clears the CPU buffers.
*/
.macro CLEAR_CPU_BUFFERS
.macro __CLEAR_CPU_BUFFERS feature
#ifdef CONFIG_X86_64
ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature
#else
/*
* In 32bit mode, the memory operand must be a %cs reference. The data
* segments may not be usable (vm86 mode), and the stack segment may not
* be flat (ESPFIX32).
*/
ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature
#endif
.endm
#define CLEAR_CPU_BUFFERS \
__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF
#define VM_CLEAR_CPU_BUFFERS \
__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM
#ifdef CONFIG_X86_64
.macro CLEAR_BRANCH_HISTORY
ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
@ -567,24 +573,24 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
DECLARE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
extern u16 mds_verw_sel;
extern u16 x86_verw_sel;
#include <asm/segment.h>
/**
* mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
* x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns
*
* This uses the otherwise unused and obsolete VERW instruction in
* combination with microcode which triggers a CPU buffer flush when the
* instruction is executed.
*/
static __always_inline void mds_clear_cpu_buffers(void)
static __always_inline void x86_clear_cpu_buffers(void)
{
static const u16 ds = __KERNEL_DS;
@ -601,14 +607,15 @@ static __always_inline void mds_clear_cpu_buffers(void)
}
/**
* mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
* x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
* and TSA vulnerabilities.
*
* Clear CPU buffers if the corresponding static key is enabled
*/
static __always_inline void mds_idle_clear_cpu_buffers(void)
static __always_inline void x86_idle_clear_cpu_buffers(void)
{
if (static_branch_likely(&mds_idle_clear))
mds_clear_cpu_buffers();
if (static_branch_likely(&cpu_buf_idle_clear))
x86_clear_cpu_buffers();
}
#endif /* __ASSEMBLER__ */

View File

@ -377,6 +377,47 @@ static void bsp_determine_snp(struct cpuinfo_x86 *c)
#endif
}
#define ZEN_MODEL_STEP_UCODE(fam, model, step, ucode) \
X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, fam, model), \
step, step, ucode)
static const struct x86_cpu_id amd_tsa_microcode[] = {
ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x1, 0x0a0011d7),
ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x2, 0x0a00123b),
ZEN_MODEL_STEP_UCODE(0x19, 0x08, 0x2, 0x0a00820d),
ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x1, 0x0a10114c),
ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x2, 0x0a10124c),
ZEN_MODEL_STEP_UCODE(0x19, 0x18, 0x1, 0x0a108109),
ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x0, 0x0a20102e),
ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x2, 0x0a201211),
ZEN_MODEL_STEP_UCODE(0x19, 0x44, 0x1, 0x0a404108),
ZEN_MODEL_STEP_UCODE(0x19, 0x50, 0x0, 0x0a500012),
ZEN_MODEL_STEP_UCODE(0x19, 0x61, 0x2, 0x0a60120a),
ZEN_MODEL_STEP_UCODE(0x19, 0x74, 0x1, 0x0a704108),
ZEN_MODEL_STEP_UCODE(0x19, 0x75, 0x2, 0x0a705208),
ZEN_MODEL_STEP_UCODE(0x19, 0x78, 0x0, 0x0a708008),
ZEN_MODEL_STEP_UCODE(0x19, 0x7c, 0x0, 0x0a70c008),
ZEN_MODEL_STEP_UCODE(0x19, 0xa0, 0x2, 0x0aa00216),
{},
};
static void tsa_init(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
return;
if (cpu_has(c, X86_FEATURE_ZEN3) ||
cpu_has(c, X86_FEATURE_ZEN4)) {
if (x86_match_min_microcode_rev(amd_tsa_microcode))
setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR);
else
pr_debug("%s: current revision: 0x%x\n", __func__, c->microcode);
} else {
setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO);
setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO);
}
}
static void bsp_init_amd(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
@ -489,6 +530,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
}
bsp_determine_snp(c);
tsa_init(c);
return;
warn:

View File

@ -94,6 +94,8 @@ static void __init bhi_apply_mitigation(void);
static void __init its_select_mitigation(void);
static void __init its_update_mitigation(void);
static void __init its_apply_mitigation(void);
static void __init tsa_select_mitigation(void);
static void __init tsa_apply_mitigation(void);
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base;
@ -169,9 +171,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
EXPORT_SYMBOL_GPL(switch_vcpu_ibpb);
/* Control MDS CPU buffer clear before idling (halt, mwait) */
DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
EXPORT_SYMBOL_GPL(mds_idle_clear);
/* Control CPU buffer clear before idling (halt, mwait) */
DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
/*
* Controls whether l1d flush based mitigations are enabled,
@ -225,6 +227,7 @@ void __init cpu_select_mitigations(void)
gds_select_mitigation();
its_select_mitigation();
bhi_select_mitigation();
tsa_select_mitigation();
/*
* After mitigations are selected, some may need to update their
@ -272,6 +275,7 @@ void __init cpu_select_mitigations(void)
gds_apply_mitigation();
its_apply_mitigation();
bhi_apply_mitigation();
tsa_apply_mitigation();
}
/*
@ -637,7 +641,7 @@ static void __init mmio_apply_mitigation(void)
* is required irrespective of SMT state.
*/
if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
static_branch_enable(&mds_idle_clear);
static_branch_enable(&cpu_buf_idle_clear);
if (mmio_nosmt || cpu_mitigations_auto_nosmt())
cpu_smt_disable(false);
@ -1487,6 +1491,94 @@ static void __init its_apply_mitigation(void)
set_return_thunk(its_return_thunk);
}
#undef pr_fmt
#define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt
enum tsa_mitigations {
TSA_MITIGATION_NONE,
TSA_MITIGATION_AUTO,
TSA_MITIGATION_UCODE_NEEDED,
TSA_MITIGATION_USER_KERNEL,
TSA_MITIGATION_VM,
TSA_MITIGATION_FULL,
};
static const char * const tsa_strings[] = {
[TSA_MITIGATION_NONE] = "Vulnerable",
[TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
[TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary",
[TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM",
[TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
};
static enum tsa_mitigations tsa_mitigation __ro_after_init =
IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE;
static int __init tsa_parse_cmdline(char *str)
{
if (!str)
return -EINVAL;
if (!strcmp(str, "off"))
tsa_mitigation = TSA_MITIGATION_NONE;
else if (!strcmp(str, "on"))
tsa_mitigation = TSA_MITIGATION_FULL;
else if (!strcmp(str, "user"))
tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
else if (!strcmp(str, "vm"))
tsa_mitigation = TSA_MITIGATION_VM;
else
pr_err("Ignoring unknown tsa=%s option.\n", str);
return 0;
}
early_param("tsa", tsa_parse_cmdline);
static void __init tsa_select_mitigation(void)
{
if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) {
tsa_mitigation = TSA_MITIGATION_NONE;
return;
}
if (tsa_mitigation == TSA_MITIGATION_NONE)
return;
if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) {
tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
goto out;
}
if (tsa_mitigation == TSA_MITIGATION_AUTO)
tsa_mitigation = TSA_MITIGATION_FULL;
/*
* No need to set verw_clear_cpu_buf_mitigation_selected - it
* doesn't fit all cases here and it is not needed because this
* is the only VERW-based mitigation on AMD.
*/
out:
pr_info("%s\n", tsa_strings[tsa_mitigation]);
}
static void __init tsa_apply_mitigation(void)
{
switch (tsa_mitigation) {
case TSA_MITIGATION_USER_KERNEL:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
break;
case TSA_MITIGATION_VM:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
break;
case TSA_MITIGATION_FULL:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
break;
default:
break;
}
}
#undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 : " fmt
@ -2249,10 +2341,10 @@ static void update_mds_branch_idle(void)
return;
if (sched_smt_active()) {
static_branch_enable(&mds_idle_clear);
static_branch_enable(&cpu_buf_idle_clear);
} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
static_branch_disable(&mds_idle_clear);
static_branch_disable(&cpu_buf_idle_clear);
}
}
@ -2316,6 +2408,25 @@ void cpu_bugs_smt_update(void)
break;
}
switch (tsa_mitigation) {
case TSA_MITIGATION_USER_KERNEL:
case TSA_MITIGATION_VM:
case TSA_MITIGATION_AUTO:
case TSA_MITIGATION_FULL:
/*
* TSA-SQ can potentially lead to info leakage between
* SMT threads.
*/
if (sched_smt_active())
static_branch_enable(&cpu_buf_idle_clear);
else
static_branch_disable(&cpu_buf_idle_clear);
break;
case TSA_MITIGATION_NONE:
case TSA_MITIGATION_UCODE_NEEDED:
break;
}
mutex_unlock(&spec_ctrl_mutex);
}
@ -3265,6 +3376,11 @@ static ssize_t gds_show_state(char *buf)
return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
}
static ssize_t tsa_show_state(char *buf)
{
return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
}
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug)
{
@ -3328,6 +3444,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_ITS:
return its_show_state(buf);
case X86_BUG_TSA:
return tsa_show_state(buf);
default:
break;
}
@ -3414,6 +3533,11 @@ ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_att
{
return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
}
ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
{
return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
}
#endif
void __warn_thunk(void)

View File

@ -1233,6 +1233,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
#define ITS BIT(8)
/* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
#define ITS_NATIVE_ONLY BIT(9)
/* CPU is affected by Transient Scheduler Attacks */
#define TSA BIT(10)
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE, X86_STEP_MAX, SRBDS),
@ -1280,7 +1282,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_AMD(0x16, RETBLEED),
VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
VULNBL_AMD(0x19, SRSO),
VULNBL_AMD(0x19, SRSO | TSA),
VULNBL_AMD(0x1a, SRSO),
{}
};
@ -1530,6 +1532,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
}
if (c->x86_vendor == X86_VENDOR_AMD) {
if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) ||
!cpu_has(c, X86_FEATURE_TSA_L1_NO)) {
if (cpu_matches(cpu_vuln_blacklist, TSA) ||
/* Enable bug on Zen guests to allow for live migration. */
(cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN)))
setup_force_cpu_bug(X86_BUG_TSA);
}
}
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;

View File

@ -231,6 +231,13 @@ static const struct patch_digest phashes[] = {
0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21,
}
},
{ 0xa0011d7, {
0x35,0x07,0xcd,0x40,0x94,0xbc,0x81,0x6b,
0xfc,0x61,0x56,0x1a,0xe2,0xdb,0x96,0x12,
0x1c,0x1c,0x31,0xb1,0x02,0x6f,0xe5,0xd2,
0xfe,0x1b,0x04,0x03,0x2c,0x8f,0x4c,0x36,
}
},
{ 0xa001223, {
0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8,
0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4,
@ -294,6 +301,13 @@ static const struct patch_digest phashes[] = {
0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59,
}
},
{ 0xa00123b, {
0xef,0xa1,0x1e,0x71,0xf1,0xc3,0x2c,0xe2,
0xc3,0xef,0x69,0x41,0x7a,0x54,0xca,0xc3,
0x8f,0x62,0x84,0xee,0xc2,0x39,0xd9,0x28,
0x95,0xa7,0x12,0x49,0x1e,0x30,0x71,0x72,
}
},
{ 0xa00820c, {
0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3,
0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63,
@ -301,6 +315,13 @@ static const struct patch_digest phashes[] = {
0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2,
}
},
{ 0xa00820d, {
0xf9,0x2a,0xc0,0xf4,0x9e,0xa4,0x87,0xa4,
0x7d,0x87,0x00,0xfd,0xab,0xda,0x19,0xca,
0x26,0x51,0x32,0xc1,0x57,0x91,0xdf,0xc1,
0x05,0xeb,0x01,0x7c,0x5a,0x95,0x21,0xb7,
}
},
{ 0xa10113e, {
0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10,
0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0,
@ -322,6 +343,13 @@ static const struct patch_digest phashes[] = {
0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4,
}
},
{ 0xa10114c, {
0x9e,0xb6,0xa2,0xd9,0x87,0x38,0xc5,0x64,
0xd8,0x88,0xfa,0x78,0x98,0xf9,0x6f,0x74,
0x39,0x90,0x1b,0xa5,0xcf,0x5e,0xb4,0x2a,
0x02,0xff,0xd4,0x8c,0x71,0x8b,0xe2,0xc0,
}
},
{ 0xa10123e, {
0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18,
0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d,
@ -343,6 +371,13 @@ static const struct patch_digest phashes[] = {
0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75,
}
},
{ 0xa10124c, {
0x29,0xea,0xf1,0x2c,0xb2,0xe4,0xef,0x90,
0xa4,0xcd,0x1d,0x86,0x97,0x17,0x61,0x46,
0xfc,0x22,0xcb,0x57,0x75,0x19,0xc8,0xcc,
0x0c,0xf5,0xbc,0xac,0x81,0x9d,0x9a,0xd2,
}
},
{ 0xa108108, {
0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9,
0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6,
@ -350,6 +385,13 @@ static const struct patch_digest phashes[] = {
0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16,
}
},
{ 0xa108109, {
0x85,0xb4,0xbd,0x7c,0x49,0xa7,0xbd,0xfa,
0x49,0x36,0x80,0x81,0xc5,0xb7,0x39,0x1b,
0x9a,0xaa,0x50,0xde,0x9b,0xe9,0x32,0x35,
0x42,0x7e,0x51,0x4f,0x52,0x2c,0x28,0x59,
}
},
{ 0xa20102d, {
0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11,
0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89,
@ -357,6 +399,13 @@ static const struct patch_digest phashes[] = {
0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4,
}
},
{ 0xa20102e, {
0xbe,0x1f,0x32,0x04,0x0d,0x3c,0x9c,0xdd,
0xe1,0xa4,0xbf,0x76,0x3a,0xec,0xc2,0xf6,
0x11,0x00,0xa7,0xaf,0x0f,0xe5,0x02,0xc5,
0x54,0x3a,0x1f,0x8c,0x16,0xb5,0xff,0xbe,
}
},
{ 0xa201210, {
0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe,
0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9,
@ -364,6 +413,13 @@ static const struct patch_digest phashes[] = {
0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41,
}
},
{ 0xa201211, {
0x69,0xa1,0x17,0xec,0xd0,0xf6,0x6c,0x95,
0xe2,0x1e,0xc5,0x59,0x1a,0x52,0x0a,0x27,
0xc4,0xed,0xd5,0x59,0x1f,0xbf,0x00,0xff,
0x08,0x88,0xb5,0xe1,0x12,0xb6,0xcc,0x27,
}
},
{ 0xa404107, {
0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45,
0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0,
@ -371,6 +427,13 @@ static const struct patch_digest phashes[] = {
0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99,
}
},
{ 0xa404108, {
0x69,0x67,0x43,0x06,0xf8,0x0c,0x62,0xdc,
0xa4,0x21,0x30,0x4f,0x0f,0x21,0x2c,0xcb,
0xcc,0x37,0xf1,0x1c,0xc3,0xf8,0x2f,0x19,
0xdf,0x53,0x53,0x46,0xb1,0x15,0xea,0x00,
}
},
{ 0xa500011, {
0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4,
0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1,
@ -378,6 +441,13 @@ static const struct patch_digest phashes[] = {
0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74,
}
},
{ 0xa500012, {
0xeb,0x74,0x0d,0x47,0xa1,0x8e,0x09,0xe4,
0x93,0x4c,0xad,0x03,0x32,0x4c,0x38,0x16,
0x10,0x39,0xdd,0x06,0xaa,0xce,0xd6,0x0f,
0x62,0x83,0x9d,0x8e,0x64,0x55,0xbe,0x63,
}
},
{ 0xa601209, {
0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32,
0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30,
@ -385,6 +455,13 @@ static const struct patch_digest phashes[] = {
0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d,
}
},
{ 0xa60120a, {
0x0c,0x8b,0x3d,0xfd,0x52,0x52,0x85,0x7d,
0x20,0x3a,0xe1,0x7e,0xa4,0x21,0x3b,0x7b,
0x17,0x86,0xae,0xac,0x13,0xb8,0x63,0x9d,
0x06,0x01,0xd0,0xa0,0x51,0x9a,0x91,0x2c,
}
},
{ 0xa704107, {
0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6,
0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93,
@ -392,6 +469,13 @@ static const struct patch_digest phashes[] = {
0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39,
}
},
{ 0xa704108, {
0xd7,0x55,0x15,0x2b,0xfe,0xc4,0xbc,0x93,
0xec,0x91,0xa0,0xae,0x45,0xb7,0xc3,0x98,
0x4e,0xff,0x61,0x77,0x88,0xc2,0x70,0x49,
0xe0,0x3a,0x1d,0x84,0x38,0x52,0xbf,0x5a,
}
},
{ 0xa705206, {
0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4,
0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7,
@ -399,6 +483,13 @@ static const struct patch_digest phashes[] = {
0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc,
}
},
{ 0xa705208, {
0x30,0x1d,0x55,0x24,0xbc,0x6b,0x5a,0x19,
0x0c,0x7d,0x1d,0x74,0xaa,0xd1,0xeb,0xd2,
0x16,0x62,0xf7,0x5b,0xe1,0x1f,0x18,0x11,
0x5c,0xf0,0x94,0x90,0x26,0xec,0x69,0xff,
}
},
{ 0xa708007, {
0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3,
0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2,
@ -406,6 +497,13 @@ static const struct patch_digest phashes[] = {
0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93,
}
},
{ 0xa708008, {
0x08,0x6e,0xf0,0x22,0x4b,0x8e,0xc4,0x46,
0x58,0x34,0xe6,0x47,0xa2,0x28,0xfd,0xab,
0x22,0x3d,0xdd,0xd8,0x52,0x9e,0x1d,0x16,
0xfa,0x01,0x68,0x14,0x79,0x3e,0xe8,0x6b,
}
},
{ 0xa70c005, {
0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b,
0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f,
@ -413,6 +511,13 @@ static const struct patch_digest phashes[] = {
0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13,
}
},
{ 0xa70c008, {
0x0f,0xdb,0x37,0xa1,0x10,0xaf,0xd4,0x21,
0x94,0x0d,0xa4,0xa2,0xe9,0x86,0x6c,0x0e,
0x85,0x7c,0x36,0x30,0xa3,0x3a,0x78,0x66,
0x18,0x10,0x60,0x0d,0x78,0x3d,0x44,0xd0,
}
},
{ 0xaa00116, {
0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63,
0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5,
@ -441,4 +546,11 @@ static const struct patch_digest phashes[] = {
0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef,
}
},
{ 0xaa00216, {
0x79,0xfb,0x5b,0x9f,0xb6,0xe6,0xa8,0xf5,
0x4e,0x7c,0x4f,0x8e,0x1d,0xad,0xd0,0x08,
0xc2,0x43,0x7c,0x8b,0xe6,0xdb,0xd0,0xd2,
0xe8,0x39,0x26,0xc1,0xe5,0x5a,0x48,0xf1,
}
},
};

View File

@ -50,6 +50,8 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
{ X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },
{ X86_FEATURE_TSA_L1_NO, CPUID_ECX, 2, 0x80000021, 0 },
{ X86_FEATURE_AMD_WORKLOAD_CLASS, CPUID_EAX, 22, 0x80000021, 0 },
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
{ X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },

View File

@ -907,16 +907,24 @@ static __init bool prefer_mwait_c1_over_halt(void)
*/
static __cpuidle void mwait_idle(void)
{
if (need_resched())
return;
x86_idle_clear_cpu_buffers();
if (!current_set_polling_and_test()) {
const void *addr = &current_thread_info()->flags;
alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
__monitor(addr, 0, 0);
if (!need_resched()) {
__sti_mwait(0, 0);
raw_local_irq_disable();
}
if (need_resched())
goto out;
__sti_mwait(0, 0);
raw_local_irq_disable();
}
out:
__current_clr_polling();
}

View File

@ -1165,6 +1165,8 @@ void kvm_set_cpu_caps(void)
*/
SYNTHESIZED_F(LFENCE_RDTSC),
/* SmmPgCfgLock */
/* 4: Resv */
SYNTHESIZED_F(VERW_CLEAR),
F(NULL_SEL_CLR_BASE),
/* UpperAddressIgnore */
F(AUTOIBRS),
@ -1179,6 +1181,11 @@ void kvm_set_cpu_caps(void)
F(SRSO_USER_KERNEL_NO),
);
kvm_cpu_cap_init(CPUID_8000_0021_ECX,
SYNTHESIZED_F(TSA_SQ_NO),
SYNTHESIZED_F(TSA_L1_NO),
);
kvm_cpu_cap_init(CPUID_8000_0022_EAX,
F(PERFMON_V2),
);
@ -1748,8 +1755,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
break;
case 0x80000021:
entry->ebx = entry->ecx = entry->edx = 0;
entry->ebx = entry->edx = 0;
cpuid_entry_override(entry, CPUID_8000_0021_EAX);
cpuid_entry_override(entry, CPUID_8000_0021_ECX);
break;
/* AMD Extended Performance Monitoring and Debug */
case 0x80000022: {

View File

@ -52,6 +52,10 @@
/* CPUID level 0x80000022 (EAX) */
#define KVM_X86_FEATURE_PERFMON_V2 KVM_X86_FEATURE(CPUID_8000_0022_EAX, 0)
/* CPUID level 0x80000021 (ECX) */
#define KVM_X86_FEATURE_TSA_SQ_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 1)
#define KVM_X86_FEATURE_TSA_L1_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 2)
struct cpuid_reg {
u32 function;
u32 index;
@ -82,6 +86,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
[CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX},
[CPUID_7_2_EDX] = { 7, 2, CPUID_EDX},
[CPUID_24_0_EBX] = { 0x24, 0, CPUID_EBX},
[CPUID_8000_0021_ECX] = {0x80000021, 0, CPUID_ECX},
};
/*
@ -121,6 +126,8 @@ static __always_inline u32 __feature_translate(int x86_feature)
KVM_X86_TRANSLATE_FEATURE(PERFMON_V2);
KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL);
KVM_X86_TRANSLATE_FEATURE(BHI_CTRL);
KVM_X86_TRANSLATE_FEATURE(TSA_SQ_NO);
KVM_X86_TRANSLATE_FEATURE(TSA_L1_NO);
default:
return x86_feature;
}

View File

@ -169,6 +169,9 @@ SYM_FUNC_START(__svm_vcpu_run)
#endif
mov VCPU_RDI(%_ASM_DI), %_ASM_DI
/* Clobbers EFLAGS.ZF */
VM_CLEAR_CPU_BUFFERS
/* Enter guest mode */
3: vmrun %_ASM_AX
4:
@ -335,6 +338,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
mov SVM_current_vmcb(%rdi), %rax
mov KVM_VMCB_pa(%rax), %rax
/* Clobbers EFLAGS.ZF */
VM_CLEAR_CPU_BUFFERS
/* Enter guest mode */
1: vmrun %rax
2:

View File

@ -7291,7 +7291,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
vmx_l1d_flush(vcpu);
else if (static_branch_unlikely(&cpu_buf_vm_clear) &&
kvm_arch_has_assigned_device(vcpu->kvm))
mds_clear_cpu_buffers();
x86_clear_cpu_buffers();
vmx_disable_fb_clear(vmx);

View File

@ -602,6 +602,7 @@ CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
CPU_SHOW_VULN_FALLBACK(ghostwrite);
CPU_SHOW_VULN_FALLBACK(old_microcode);
CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
CPU_SHOW_VULN_FALLBACK(tsa);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@ -620,6 +621,7 @@ static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling
static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL);
static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@ -639,6 +641,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_ghostwrite.attr,
&dev_attr_old_microcode.attr,
&dev_attr_indirect_target_selection.attr,
&dev_attr_tsa.attr,
NULL
};

View File

@ -82,6 +82,7 @@ extern ssize_t cpu_show_old_microcode(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,