lkml.org 
[lkml]   [2010]   [Jul]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 03/18] TSC reset compensation
    Date
    Attempt to synchronize TSCs which are reset to the same value.  In the
    case of a reliable hardware TSC, we can just re-use the same offset, but
    on non-reliable hardware, we can get closer by adjusting the offset to
    match the elapsed time.

    Signed-off-by: Zachary Amsden <zamsden@redhat.com>
    ---
    arch/x86/include/asm/kvm_host.h | 3 +++
    arch/x86/kvm/x86.c | 31 ++++++++++++++++++++++++++++++-
    2 files changed, 33 insertions(+), 1 deletions(-)

    diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
    index 3b4efe2..4b42893 100644
    --- a/arch/x86/include/asm/kvm_host.h
    +++ b/arch/x86/include/asm/kvm_host.h
    @@ -396,6 +396,9 @@ struct kvm_arch {
    unsigned long irq_sources_bitmap;
    s64 kvmclock_offset;
    spinlock_t tsc_write_lock;
    + u64 last_tsc_nsec;
    + u64 last_tsc_offset;
    + u64 last_tsc_write;

    struct kvm_xen_hvm_config xen_hvm_config;

    diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
    index a2ee975..bb7451b 100644
    --- a/arch/x86/kvm/x86.c
    +++ b/arch/x86/kvm/x86.c
    @@ -896,10 +896,39 @@ static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
    void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)
    {
    struct kvm *kvm = vcpu->kvm;
    - u64 offset;
    + u64 offset, ns, elapsed;
    + struct timespec ts;

    spin_lock(&kvm->arch.tsc_write_lock);
    offset = data - native_read_tsc();
    + ktime_get_ts(&ts);
    + monotonic_to_bootbased(&ts);
    + ns = timespec_to_ns(&ts);
    + elapsed = ns - kvm->arch.last_tsc_nsec;
    +
    + /*
    + * Special case: identical write to TSC within 5 seconds of
    + * another CPU is interpreted as an attempt to synchronize
    + * (the 5 seconds is to accomodate host load / swapping).
    + *
    + * In that case, for a reliable TSC, we can match TSC offsets,
    + * or make a best guest using kernel_ns value.
    + */
    + if (data == kvm->arch.last_tsc_write && elapsed < 5ULL * NSEC_PER_SEC) {
    + if (!check_tsc_unstable()) {
    + offset = kvm->arch.last_tsc_offset;
    + pr_debug("kvm: matched tsc offset for %llu\n", data);
    + } else {
    + u64 tsc_delta = elapsed * __get_cpu_var(cpu_tsc_khz);
    + tsc_delta = tsc_delta / USEC_PER_SEC;
    + offset += tsc_delta;
    + pr_debug("kvm: adjusted tsc offset by %llu\n", tsc_delta);
    + }
    + ns = kvm->arch.last_tsc_nsec;
    + }
    + kvm->arch.last_tsc_nsec = ns;
    + kvm->arch.last_tsc_write = data;
    + kvm->arch.last_tsc_offset = offset;
    kvm_x86_ops->write_tsc_offset(vcpu, offset);
    spin_unlock(&kvm->arch.tsc_write_lock);

    --
    1.7.1


    \
     
     \ /
      Last update: 2010-07-13 04:11    [W:0.020 / U:29.544 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site