lkml.org 
[lkml]   [2016]   [Dec]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH] KVM: hyperv: split lock to protect struct kvm_hv
Date
Otherwise, there is an AB-BA deadlock between kvm->lock and
vcpu->mutex.

Reported-by: Dmitry Vyukov <dvyukov@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
Compile-tested only.

Documentation/virtual/kvm/locking.txt | 2 ++
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/hyperv.c | 10 +++++-----
arch/x86/kvm/x86.c | 1 +
4 files changed, 9 insertions(+), 5 deletions(-)

diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt
index e5dd9f4d6100..5dd06289ce59 100644
--- a/Documentation/virtual/kvm/locking.txt
+++ b/Documentation/virtual/kvm/locking.txt
@@ -16,6 +16,8 @@ The acquisition orders for mutexes are as follows:
For spinlocks, kvm_lock is taken outside kvm->mmu_lock. Everything
else is a leaf: no other lock is taken inside the critical sections.

+In particular, on x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock.
+
2: Exception
------------

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 7892530cbacf..2e25038dbd93 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -704,6 +704,7 @@ struct kvm_apic_map {

/* Hyper-V emulation context */
struct kvm_hv {
+ struct mutex hv_lock;
u64 hv_guest_os_id;
u64 hv_hypercall;
u64 hv_tsc_page;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 99cde5220e07..021abafabc12 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1142,9 +1142,9 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
if (kvm_hv_msr_partition_wide(msr)) {
int r;

- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
return r;
} else
return kvm_hv_set_msr(vcpu, msr, data, host);
@@ -1155,9 +1155,9 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
if (kvm_hv_msr_partition_wide(msr)) {
int r;

- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
return r;
} else
return kvm_hv_get_msr(vcpu, msr, pdata);
@@ -1165,7 +1165,7 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)

bool kvm_hv_hypercall_enabled(struct kvm *kvm)
{
- return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
+ return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
}

static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f0aee98e7492..30fc403df802 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7861,6 +7861,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)

raw_spin_lock_init(&kvm->arch.tsc_write_lock);
mutex_init(&kvm->arch.apic_map_lock);
+ mutex_init(&kvm->arch.hyperv.hv_lock);
spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);

kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
--
2.9.3
\
 
 \ /
  Last update: 2016-12-10 10:09    [W:0.040 / U:0.540 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site