lkml.org 
[lkml]   [2018]   [Sep]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[Patch v2 3/4] x86/speculation: Extend per process STIBP to AMD cpus.
Date
From: Thomas Lendacky <Thomas.Lendacky@amd.com>

We extend the app to app spectre v2 mitigation using STIBP
to the AMD cpus. We need to take care of special
cases for AMD cpu's update of SPEC_CTRL MSR to avoid double
writing of MSRs from update to SSBD and STIBP.

Originally-by: Thomas Lendacky <Thomas.Lendacky@amd.com>
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
---
arch/x86/kernel/process.c | 48 +++++++++++++++++++++++++++++++++++++----------
1 file changed, 38 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index cb24014..4a3a672 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -399,6 +399,10 @@ static __always_inline void set_spec_ctrl_state(unsigned long tifn)
{
u64 msr = x86_spec_ctrl_base;

+ /*
+ * AMD cpu may have used a different method to update SSBD, so
+ * we need to be sure we are using the SPEC_CTRL MSR for SSBD.
+ */
if (static_cpu_has(X86_FEATURE_SSBD))
msr |= ssbd_tif_to_spec_ctrl(tifn);

@@ -408,20 +412,45 @@ static __always_inline void set_spec_ctrl_state(unsigned long tifn)
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
}

-static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+static __always_inline void __speculative_store_bypass_update(unsigned long tifp,
+ unsigned long tifn)
{
- if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
- amd_set_ssb_virt_state(tifn);
- else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
- amd_set_core_ssb_state(tifn);
- else
- set_spec_ctrl_state(tifn);
+ bool stibp = !!((tifp ^ tifn) & _TIF_STIBP);
+ bool ssbd = !!((tifp ^ tifn) & _TIF_SSBD);
+
+ if (!ssbd && !stibp)
+ return;
+
+ if (ssbd) {
+ /*
+ * For AMD, try these methods first. The ssbd variable will
+ * reflect if the SPEC_CTRL MSR method is needed.
+ */
+ ssbd = false;
+
+ if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
+ amd_set_ssb_virt_state(tifn);
+ else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ amd_set_core_ssb_state(tifn);
+ else
+ ssbd = true;
+ }
+
+ /* Avoid a possible extra MSR write, recheck the flags */
+ if (!ssbd && !stibp)
+ return;
+
+ set_spec_ctrl_state(tifn);
}

void speculative_store_bypass_update(unsigned long tif)
{
+ /*
+ * On this path we're forcing the update, so use ~tif as the
+ * previous flags.
+ */
preempt_disable();
- __speculative_store_bypass_update(tif);
+ __speculative_store_bypass_update(~tif, tif);
preempt_enable();
}

@@ -457,8 +486,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
if ((tifp ^ tifn) & _TIF_NOCPUID)
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));

- if ((tifp ^ tifn) & (_TIF_SSBD | _TIF_STIBP))
- __speculative_store_bypass_update(tifn);
+ __speculative_store_bypass_update(tifp, tifn);
}

/*
--
2.9.4
\
 
 \ /
  Last update: 2018-09-26 03:19    [W:0.162 / U:0.180 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site