lkml.org 
[lkml]   [2011]   [Jan]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[RFC -v5 PATCH 4/4] kvm: use yield_to instead of sleep in kvm_vcpu_on_spin
    Instead of sleeping in kvm_vcpu_on_spin, which can cause gigantic
    slowdowns of certain workloads, we instead use yield_to to hand
    the rest of our timeslice to another vcpu in the same KVM guest.

    Signed-off-by: Rik van Riel <riel@redhat.com>
    Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

    diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
    index 9d56ed5..fab2250 100644
    --- a/include/linux/kvm_host.h
    +++ b/include/linux/kvm_host.h
    @@ -187,6 +187,7 @@ struct kvm {
    #endif
    struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
    atomic_t online_vcpus;
    + int last_boosted_vcpu;
    struct list_head vm_list;
    struct mutex lock;
    struct kvm_io_bus *buses[KVM_NR_BUSES];
    diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
    index 65e997a..a7c45c8 100644
    --- a/virt/kvm/kvm_main.c
    +++ b/virt/kvm/kvm_main.c
    @@ -1292,18 +1292,52 @@ void kvm_resched(struct kvm_vcpu *vcpu)
    }
    EXPORT_SYMBOL_GPL(kvm_resched);

    -void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
    +void kvm_vcpu_on_spin(struct kvm_vcpu *me)
    {
    - ktime_t expires;
    - DEFINE_WAIT(wait);
    -
    - prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
    -
    - /* Sleep for 100 us, and hope lock-holder got scheduled */
    - expires = ktime_add_ns(ktime_get(), 100000UL);
    - schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
    + struct kvm *kvm = me->kvm;
    + struct kvm_vcpu *vcpu;
    + int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
    + int yielded = 0;
    + int pass;
    + int i;

    - finish_wait(&vcpu->wq, &wait);
    + /*
    + * We boost the priority of a VCPU that is runnable but not
    + * currently running, because it got preempted by something
    + * else and called schedule in __vcpu_run. Hopefully that
    + * VCPU is holding the lock that we need and will release it.
    + * We approximate round-robin by starting at the last boosted VCPU.
    + */
    + for (pass = 0; pass < 2 && !yielded; pass++) {
    + kvm_for_each_vcpu(i, vcpu, kvm) {
    + struct task_struct *task;
    + if (!pass && i < last_boosted_vcpu) {
    + i = last_boosted_vcpu;
    + continue;
    + } else if (pass && i > last_boosted_vcpu)
    + break;
    + if (vcpu == me)
    + continue;
    + if (!vcpu->pid)
    + continue;
    + if (waitqueue_active(&vcpu->wq))
    + continue;
    + task = get_pid_task(vcpu->pid, PIDTYPE_PID);
    + if (!task)
    + continue;
    + if (task->flags & PF_VCPU) {
    + put_task_struct(task);
    + continue;
    + }
    + if (yield_to(task, 1)) {
    + put_task_struct(task);
    + kvm->last_boosted_vcpu = i;
    + yielded = 1;
    + break;
    + }
    + put_task_struct(task);
    + }
    + }
    }
    EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);


    \
     
     \ /
      Last update: 2011-01-14 09:09    [W:0.023 / U:0.900 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site