lkml.org 
[lkml]   [2010]   [Apr]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[192/197] KVM: fix the handling of dirty bitmaps to avoid overflows
    2.6.32-stable review patch.  If anyone has any objections, please let us know.

    ------------------

    From: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>

    (Cherry-picked from commit 87bf6e7de1134f48681fd2ce4b7c1ec45458cb6d)

    Int is not long enough to store the size of a dirty bitmap.

    This patch fixes this problem with the introduction of a wrapper
    function to calculate the sizes of dirty bitmaps.

    Note: in mark_page_dirty(), we have to consider the fact that
    __set_bit() takes the offset as int, not long.

    Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
    Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
    Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

    ---
    arch/ia64/kvm/kvm-ia64.c | 9 +++++----
    arch/x86/kvm/x86.c | 4 ++--
    include/linux/kvm_host.h | 5 +++++
    virt/kvm/kvm_main.c | 13 ++++++++-----
    4 files changed, 20 insertions(+), 11 deletions(-)

    --- a/arch/ia64/kvm/kvm-ia64.c
    +++ b/arch/ia64/kvm/kvm-ia64.c
    @@ -1797,7 +1797,8 @@ static int kvm_ia64_sync_dirty_log(struc
    {
    struct kvm_memory_slot *memslot;
    int r, i;
    - long n, base;
    + long base;
    + unsigned long n;
    unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
    offsetof(struct kvm_vm_data, kvm_mem_dirty_log));

    @@ -1810,7 +1811,7 @@ static int kvm_ia64_sync_dirty_log(struc
    if (!memslot->dirty_bitmap)
    goto out;

    - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
    + n = kvm_dirty_bitmap_bytes(memslot);
    base = memslot->base_gfn / BITS_PER_LONG;

    for (i = 0; i < n/sizeof(long); ++i) {
    @@ -1826,7 +1827,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv
    struct kvm_dirty_log *log)
    {
    int r;
    - int n;
    + unsigned long n;
    struct kvm_memory_slot *memslot;
    int is_dirty = 0;

    @@ -1844,7 +1845,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv
    if (is_dirty) {
    kvm_flush_remote_tlbs(kvm);
    memslot = &kvm->memslots[log->slot];
    - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
    + n = kvm_dirty_bitmap_bytes(memslot);
    memset(memslot->dirty_bitmap, 0, n);
    }
    r = 0;
    --- a/arch/x86/kvm/x86.c
    +++ b/arch/x86/kvm/x86.c
    @@ -2133,7 +2133,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv
    struct kvm_dirty_log *log)
    {
    int r;
    - int n;
    + unsigned long n;
    struct kvm_memory_slot *memslot;
    int is_dirty = 0;

    @@ -2149,7 +2149,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv
    kvm_mmu_slot_remove_write_access(kvm, log->slot);
    spin_unlock(&kvm->mmu_lock);
    memslot = &kvm->memslots[log->slot];
    - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
    + n = kvm_dirty_bitmap_bytes(memslot);
    memset(memslot->dirty_bitmap, 0, n);
    }
    r = 0;
    --- a/include/linux/kvm_host.h
    +++ b/include/linux/kvm_host.h
    @@ -116,6 +116,11 @@ struct kvm_memory_slot {
    int user_alloc;
    };

    +static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
    +{
    + return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
    +}
    +
    struct kvm_kernel_irq_routing_entry {
    u32 gsi;
    u32 type;
    --- a/virt/kvm/kvm_main.c
    +++ b/virt/kvm/kvm_main.c
    @@ -1226,7 +1226,7 @@ skip_lpage:

    /* Allocate page dirty bitmap if needed */
    if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
    - unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
    + unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);

    new.dirty_bitmap = vmalloc(dirty_bytes);
    if (!new.dirty_bitmap)
    @@ -1309,7 +1309,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
    {
    struct kvm_memory_slot *memslot;
    int r, i;
    - int n;
    + unsigned long n;
    unsigned long any = 0;

    r = -EINVAL;
    @@ -1321,7 +1321,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
    if (!memslot->dirty_bitmap)
    goto out;

    - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
    + n = kvm_dirty_bitmap_bytes(memslot);

    for (i = 0; !any && i < n/sizeof(long); ++i)
    any = memslot->dirty_bitmap[i];
    @@ -1663,10 +1663,13 @@ void mark_page_dirty(struct kvm *kvm, gf
    memslot = gfn_to_memslot_unaliased(kvm, gfn);
    if (memslot && memslot->dirty_bitmap) {
    unsigned long rel_gfn = gfn - memslot->base_gfn;
    + unsigned long *p = memslot->dirty_bitmap +
    + rel_gfn / BITS_PER_LONG;
    + int offset = rel_gfn % BITS_PER_LONG;

    /* avoid RMW */
    - if (!test_bit(rel_gfn, memslot->dirty_bitmap))
    - set_bit(rel_gfn, memslot->dirty_bitmap);
    + if (!test_bit(offset, p))
    + set_bit(offset, p);
    }
    }




    \
     
     \ /
      Last update: 2010-04-22 21:35    [W:0.027 / U:60.780 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site