lkml.org 
[lkml]   [2011]   [Jan]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[RFC -v3 PATCH 3/3] Subject: kvm: use yield_to instead of sleep in kvm_vcpu_on_spin
    Instead of sleeping in kvm_vcpu_on_spin, which can cause gigantic
    slowdowns of certain workloads, we instead use yield_to to hand
    the rest of our timeslice to another vcpu in the same KVM guest.

    Signed-off-by: Rik van Riel <riel@redhat.com>
    Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

    diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
    index c011ba3..ad3cb4a 100644
    --- a/include/linux/kvm_host.h
    +++ b/include/linux/kvm_host.h
    @@ -185,6 +185,7 @@ struct kvm {
    #endif
    struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
    atomic_t online_vcpus;
    + int last_boosted_vcpu;
    struct list_head vm_list;
    struct mutex lock;
    struct kvm_io_bus *buses[KVM_NR_BUSES];
    diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
    index 21f816c..5822246 100644
    --- a/virt/kvm/kvm_main.c
    +++ b/virt/kvm/kvm_main.c
    @@ -1878,18 +1878,44 @@ void kvm_resched(struct kvm_vcpu *vcpu)
    }
    EXPORT_SYMBOL_GPL(kvm_resched);

    -void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
    +void kvm_vcpu_on_spin(struct kvm_vcpu *me)
    {
    - ktime_t expires;
    - DEFINE_WAIT(wait);
    -
    - prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
    -
    - /* Sleep for 100 us, and hope lock-holder got scheduled */
    - expires = ktime_add_ns(ktime_get(), 100000UL);
    - schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
    + struct kvm *kvm = me->kvm;
    + struct kvm_vcpu *vcpu;
    + int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
    + int yielded = 0;
    + int pass;
    + int i;

    - finish_wait(&vcpu->wq, &wait);
    + /*
    + * We boost the priority of a VCPU that is runnable but not
    + * currently running, because it got preempted by something
    + * else and called schedule in __vcpu_run. Hopefully that
    + * VCPU is holding the lock that we need and will release it.
    + * We approximate round-robin by starting at the last boosted VCPU.
    + */
    + for (pass = 0; pass < 2 && !yielded; pass++) {
    + kvm_for_each_vcpu(i, vcpu, kvm) {
    + struct task_struct *task = vcpu->task;
    + if (!pass && i < last_boosted_vcpu) {
    + i = last_boosted_vcpu;
    + continue;
    + } else if (pass && i > last_boosted_vcpu)
    + break;
    + if (vcpu == me)
    + continue;
    + if (!task)
    + continue;
    + if (waitqueue_active(&vcpu->wq))
    + continue;
    + if (task->flags & PF_VCPU)
    + continue;
    + kvm->last_boosted_vcpu = i;
    + yielded = 1;
    + yield_to(task, 1);
    + break;
    + }
    + }
    }
    EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);



    \
     
     \ /
      Last update: 2011-01-03 22:33    [W:0.021 / U:0.700 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site