lkml.org 
[lkml]   [2024]   [Apr]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH v1] KVM: x86: Introduce macros to simplify KVM_X86_OPS static calls
Date
Introduces two new macros, KVM_X86_SC() and KVM_X86_SCC(), to streamline
the usage of KVM_X86_OPS static calls. The current implementation of these
calls is verbose and can lead to alignment challenges due to the two pairs
of parentheses. This makes the code susceptible to exceeding the "80
columns per single line of code" limit as defined in the coding-style
document. The two macros are added to improve code readability and
maintainability, while adhering to the coding style guidelines.

Please note that this RFC only updated a few callsites for demonstration
purposes. If the approach looks good, all callsites will be updated in
the next version.

Signed-off-by: Wei Wang <wei.w.wang@intel.com>
---
arch/x86/include/asm/kvm_host.h | 3 +++
arch/x86/kvm/lapic.c | 15 ++++++++-------
arch/x86/kvm/trace.h | 3 ++-
arch/x86/kvm/x86.c | 8 ++++----
4 files changed, 17 insertions(+), 12 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6efd1497b026..42f6450c10ec 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1856,6 +1856,9 @@ extern struct kvm_x86_ops kvm_x86_ops;
DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));
#define KVM_X86_OP_OPTIONAL KVM_X86_OP
#define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
+
+#define KVM_X86_SC(func, ...) static_call(kvm_x86_##func)(__VA_ARGS__)
+#define KVM_X86_SCC(func, ...) static_call_cond(kvm_x86_##func)(__VA_ARGS__)
#include <asm/kvm-x86-ops.h>

int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index ebf41023be38..e819dbae8d79 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -728,8 +728,8 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
if (unlikely(apic->apicv_active)) {
/* need to update RVI */
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
- static_call_cond(kvm_x86_hwapic_irr_update)(apic->vcpu,
- apic_find_highest_irr(apic));
+ KVM_X86_SCC(hwapic_irr_update,
+ apic->vcpu, apic_find_highest_irr(apic));
} else {
apic->irr_pending = false;
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
@@ -800,7 +800,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
* and must be left alone.
*/
if (unlikely(apic->apicv_active))
- static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
+ KVM_X86_SCC(hwapic_isr_update, apic_find_highest_isr(apic));
else {
--apic->isr_count;
BUG_ON(apic->isr_count < 0);
@@ -2112,7 +2112,7 @@ static bool start_hv_timer(struct kvm_lapic *apic)
if (!ktimer->tscdeadline)
return false;

- if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
+ if (KVM_X86_SC(set_hv_timer, vcpu, ktimer->tscdeadline, &expired))
return false;

ktimer->hv_timer_in_use = true;
@@ -3041,9 +3041,10 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
kvm_apic_update_apicv(vcpu);
if (apic->apicv_active) {
- static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
- static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
- static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
+ KVM_X86_SCC(apicv_post_state_restore, vcpu);
+ KVM_X86_SCC(hwapic_irr_update,
+ vcpu, apic_find_highest_irr(apic));
+ KVM_X86_SCC(hwapic_isr_update, apic_find_highest_isr(apic));
}
kvm_make_request(KVM_REQ_EVENT, vcpu);
if (ioapic_in_kernel(vcpu->kvm))
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index c6b4b1728006..a51f6c2d43f1 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -828,7 +828,8 @@ TRACE_EVENT(kvm_emulate_insn,
),

TP_fast_assign(
- __entry->csbase = static_call(kvm_x86_get_segment_base)(vcpu, VCPU_SREG_CS);
+ __entry->csbase = KVM_X86_SC(get_segment_base,
+ vcpu, VCPU_SREG_CS);
__entry->len = vcpu->arch.emulate_ctxt->fetch.ptr
- vcpu->arch.emulate_ctxt->fetch.data;
__entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ebcc12d1e1de..146b88ded5d1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2008,7 +2008,7 @@ static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu)

static int complete_fast_msr_access(struct kvm_vcpu *vcpu)
{
- return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error);
+ return KVM_X86_SC(complete_emulated_msr, vcpu, vcpu->run->msr.error);
}

static int complete_fast_rdmsr(struct kvm_vcpu *vcpu)
@@ -7452,7 +7452,7 @@ static void kvm_init_msr_lists(void)
}

for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) {
- if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i]))
+ if (!KVM_X86_SC(has_emulated_msr, NULL, emulated_msrs_all[i]))
continue;

emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i];
@@ -10703,7 +10703,7 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
bitmap_or((ulong *)eoi_exit_bitmap,
vcpu->arch.ioapic_handled_vectors,
to_hv_synic(vcpu)->vec_bitmap, 256);
- static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
+ KVM_X86_SCC(load_eoi_exitmap, vcpu, eoi_exit_bitmap);
return;
}
#endif
@@ -13497,7 +13497,7 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
* when the irq is masked/disabled or the consumer side (KVM
* int this case doesn't want to receive the interrupts.
*/
- ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0);
+ ret = KVM_X86_SC(pi_update_irte, irqfd->kvm, prod->irq, irqfd->gsi, 0);
if (ret)
printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
" fails: %d\n", irqfd->consumer.token, ret);
base-commit: 49ff3b4aec51e3abfc9369997cc603319b02af9a
--
2.27.0


\
 
 \ /
  Last update: 2024-04-17 17:04    [W:0.022 / U:0.528 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site