lkml.org 
[lkml]   [2009]   [May]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 26/45] KVM: MMU: protect kvm_mmu_change_mmu_pages with mmu_lock
    Date
    From: Marcelo Tosatti <mtosatti@redhat.com>

    kvm_handle_hva, called by MMU notifiers, manipulates mmu data only with
    the protection of mmu_lock.

    Update kvm_mmu_change_mmu_pages callers to take mmu_lock, thus protecting
    against kvm_handle_hva.

    Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
    Signed-off-by: Avi Kivity <avi@redhat.com>
    ---
    arch/x86/kvm/mmu.c | 2 --
    arch/x86/kvm/x86.c | 6 ++++++
    2 files changed, 6 insertions(+), 2 deletions(-)

    diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
    index 8fcdae9..80c76f4 100644
    --- a/arch/x86/kvm/mmu.c
    +++ b/arch/x86/kvm/mmu.c
    @@ -2723,7 +2723,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
    {
    struct kvm_mmu_page *sp;

    - spin_lock(&kvm->mmu_lock);
    list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
    int i;
    u64 *pt;
    @@ -2738,7 +2737,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
    pt[i] &= ~PT_WRITABLE_MASK;
    }
    kvm_flush_remote_tlbs(kvm);
    - spin_unlock(&kvm->mmu_lock);
    }

    void kvm_mmu_zap_all(struct kvm *kvm)
    diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
    index cf97ea0..ecc35c6 100644
    --- a/arch/x86/kvm/x86.c
    +++ b/arch/x86/kvm/x86.c
    @@ -1621,10 +1621,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
    return -EINVAL;

    down_write(&kvm->slots_lock);
    + spin_lock(&kvm->mmu_lock);

    kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
    kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;

    + spin_unlock(&kvm->mmu_lock);
    up_write(&kvm->slots_lock);
    return 0;
    }
    @@ -1800,7 +1802,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,

    /* If nothing is dirty, don't bother messing with page tables. */
    if (is_dirty) {
    + spin_lock(&kvm->mmu_lock);
    kvm_mmu_slot_remove_write_access(kvm, log->slot);
    + spin_unlock(&kvm->mmu_lock);
    kvm_flush_remote_tlbs(kvm);
    memslot = &kvm->memslots[log->slot];
    n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
    @@ -4544,12 +4548,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
    }
    }

    + spin_lock(&kvm->mmu_lock);
    if (!kvm->arch.n_requested_mmu_pages) {
    unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
    kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
    }

    kvm_mmu_slot_remove_write_access(kvm, mem->slot);
    + spin_unlock(&kvm->mmu_lock);
    kvm_flush_remote_tlbs(kvm);

    return 0;
    --
    1.6.0.6


    \
     
     \ /
      Last update: 2009-05-24 18:07    [W:0.021 / U:89.416 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site