lkml.org 
[lkml]   [2024]   [Apr]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 09/10] KVM: x86: Kill KVM_REQ_GLOBAL_CLOCK_UPDATE
Date
From: David Woodhouse <dwmw@amazon.co.uk>

This was introduced in commit 0061d53daf26 ("KVM: x86: limit difference
between kvmclock updates") to reduce cross-vCPU differences which arose
because the KVM clock was based on CLOCK_MONOTONIC and thus subject to
NTP frequency corrections.

However, commit 53fafdbb8b21 ("KVM: x86: switch KVMCLOCK base to
monotonic raw clock") switched to using CLOCK_MONOTONIC_RAW as the basis
for the KVM clock, avoiding the NTP frequency skew altogether.

So remove KVM_REQ_GLOBAL_CLOCK_UPDATE. In kvm_write_system_time(), all
that's needed is a single KVM_REQ_CLOCK_UPDATE for the vCPU whose KVM
clock is being configured.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kvm/x86.c | 57 ++-------------------------------
2 files changed, 4 insertions(+), 55 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 8440c4081727..cfac72b4aa64 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -98,7 +98,7 @@
KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_SCAN_IOAPIC \
KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_GLOBAL_CLOCK_UPDATE KVM_ARCH_REQ(16)
+/* KVM_ARCH_REQ(16) is available to be reused */
#define KVM_REQ_APIC_PAGE_RELOAD \
KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_HV_CRASH KVM_ARCH_REQ(18)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4ec4eb850c5b..f870e29d2558 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2354,13 +2354,13 @@ static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
}

vcpu->arch.time = system_time;
- kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);

/* we verify if the enable bit is set... */
- if (system_time & 1)
+ if (system_time & 1) {
kvm_gpc_activate(&vcpu->arch.pv_time, system_time & ~1ULL,
sizeof(struct pvclock_vcpu_time_info));
- else
+ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+ } else
kvm_gpc_deactivate(&vcpu->arch.pv_time);

return;
@@ -3391,46 +3391,6 @@ uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm)
return ktime_get_real_ns() - get_kvmclock_ns(kvm);
}

-/*
- * kvmclock updates which are isolated to a given vcpu, such as
- * vcpu->cpu migration, should not allow system_timestamp from
- * the rest of the vcpus to remain static. Otherwise ntp frequency
- * correction applies to one vcpu's system_timestamp but not
- * the others.
- *
- * So in those cases, request a kvmclock update for all vcpus.
- * We need to rate-limit these requests though, as they can
- * considerably slow guests that have a large number of vcpus.
- * The time for a remote vcpu to update its kvmclock is bound
- * by the delay we use to rate-limit the updates.
- */
-
-#define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
-
-static void kvmclock_update_fn(struct work_struct *work)
-{
- unsigned long i;
- struct delayed_work *dwork = to_delayed_work(work);
- struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
- kvmclock_update_work);
- struct kvm *kvm = container_of(ka, struct kvm, arch);
- struct kvm_vcpu *vcpu;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
- kvm_vcpu_kick(vcpu);
- }
-}
-
-static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
-{
- struct kvm *kvm = v->kvm;
-
- kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
- schedule_delayed_work(&kvm->arch.kvmclock_update_work,
- KVMCLOCK_UPDATE_DELAY);
-}
-
/* These helpers are safe iff @msr is known to be an MCx bank MSR. */
static bool is_mci_control_msr(u32 msr)
{
@@ -5018,12 +4978,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (kvm_lapic_hv_timer_in_use(vcpu))
kvm_lapic_restart_hv_timer(vcpu);

- /*
- * On a host with synchronized TSC, there is no need to update
- * kvmclock on vcpu->cpu migration
- */
- if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
- kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
if (vcpu->cpu != cpu)
kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu);
vcpu->cpu = cpu;
@@ -10961,8 +10915,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
__kvm_migrate_timers(vcpu);
if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
kvm_update_masterclock(vcpu->kvm);
- if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
- kvm_gen_kvmclock_update(vcpu);
if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
r = kvm_guest_time_update(vcpu);
if (unlikely(r))
@@ -12780,8 +12732,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.hv_root_tdp = INVALID_PAGE;
#endif

- INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
-
kvm_apicv_init(kvm);
kvm_hv_init_vm(kvm);
kvm_xen_init_vm(kvm);
@@ -12820,7 +12770,6 @@ static void kvm_unload_vcpu_mmus(struct kvm *kvm)

void kvm_arch_sync_events(struct kvm *kvm)
{
- cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
kvm_free_pit(kvm);
}

--
2.44.0

\
 
 \ /
  Last update: 2024-04-18 21:45    [W:0.126 / U:0.268 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site