lkml.org 
[lkml]   [2020]   [Feb]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
SubjectRe: [PATCH 1/3] KVM: VMX: Always VMCLEAR in-use VMCSes during crash with kexec support
From
Date
On 27/02/20 23:30, Sean Christopherson wrote:
> -void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
> +void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs, bool in_use)
> {
> vmcs_clear(loaded_vmcs->vmcs);
> if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
> vmcs_clear(loaded_vmcs->shadow_vmcs);
> +
> + if (in_use) {
> + list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
> +
> + /*
> + * Ensure deleting loaded_vmcs from its current percpu list
> + * completes before setting loaded_vmcs->vcpu to -1, otherwise
> + * a different cpu can see vcpu == -1 first and add loaded_vmcs
> + * to its percpu list before it's deleted from this cpu's list.
> + * Pairs with the smp_rmb() in vmx_vcpu_load_vmcs().
> + */
> + smp_wmb();
> + }
> +

I'd like to avoid the new in_use argument and, also, I think it's a
little bit nicer to always invoke the memory barrier. Even though we
use "asm volatile" for vmclear and therefore the compiler is already
taken care of, in principle it's more correct to order the ->cpu write
against vmclear's.

This gives the following patch on top:

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index c9d6152e7a4d..77a64110577b 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -656,25 +656,24 @@ static int vmx_set_guest_msr(struct vcpu_vmx *vmx, struct shared_msr_entry *msr,
return ret;
}

-void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs, bool in_use)
+void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
{
vmcs_clear(loaded_vmcs->vmcs);
if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
vmcs_clear(loaded_vmcs->shadow_vmcs);

- if (in_use) {
+ if (!list_empty(&loaded_vmcs->loaded_vmcss_on_cpu_link))
list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);

- /*
- * Ensure deleting loaded_vmcs from its current percpu list
- * completes before setting loaded_vmcs->vcpu to -1, otherwise
- * a different cpu can see vcpu == -1 first and add loaded_vmcs
- * to its percpu list before it's deleted from this cpu's list.
- * Pairs with the smp_rmb() in vmx_vcpu_load_vmcs().
- */
- smp_wmb();
- }
-
+ /*
+ * Ensure all writes to loaded_vmcs, including deleting it
+ * from its current percpu list, complete before setting
+ * loaded_vmcs->vcpu to -1; otherwise,, a different cpu can
+ * see vcpu == -1 first and add loaded_vmcs to its percpu
+ * list before it's deleted from this cpu's list. Pairs
+ * with the smp_rmb() in vmx_vcpu_load_vmcs().
+ */
+ smp_wmb();
loaded_vmcs->cpu = -1;
loaded_vmcs->launched = 0;
}
@@ -701,7 +700,7 @@ static void __loaded_vmcs_clear(void *arg)
if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
per_cpu(current_vmcs, cpu) = NULL;

- loaded_vmcs_init(loaded_vmcs, true);
+ loaded_vmcs_init(loaded_vmcs);
}

void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
@@ -2568,7 +2567,8 @@ int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)

loaded_vmcs->shadow_vmcs = NULL;
loaded_vmcs->hv_timer_soft_disabled = false;
- loaded_vmcs_init(loaded_vmcs, false);
+ INIT_LIST_HEAD(&loaded_vmcs->loaded_vmcss_on_cpu_link);
+ loaded_vmcs_init(loaded_vmcs);

if (cpu_has_vmx_msr_bitmap()) {
loaded_vmcs->msr_bitmap = (unsigned long *)
Paolo

\
 
 \ /
  Last update: 2020-02-28 11:16    [W:0.061 / U:0.660 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site