lkml.org 
[lkml]   [2018]   [Dec]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[Resend PATCH V5 4/10] KVM/VMX: Add hv tlb range flush support
    Date
    From: Lan Tianyu <Tianyu.Lan@microsoft.com>

    This patch is to register tlb_remote_flush_with_range callback with
    hv tlb range flush interface.

    Signed-off-by: Lan Tianyu <Tianyu.Lan@microsoft.com>
    ---
    Change since v4:
    - Use new function kvm_fill_hv_flush_list_func() to fill flush
    request.
    Change since v3:
    - Merge Vitaly's don't pass EPT configuration info to
    vmx_hv_remote_flush_tlb() fix.
    Change since v1:
    - Pass flush range with new hyper-v tlb flush struct rather
    than KVM tlb flush struct.
    ---
    arch/x86/kvm/vmx.c | 63 +++++++++++++++++++++++++++++++++++++++---------------
    1 file changed, 46 insertions(+), 17 deletions(-)

    diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
    index 2356118ea440..bad2aa6c5ca1 100644
    --- a/arch/x86/kvm/vmx.c
    +++ b/arch/x86/kvm/vmx.c
    @@ -1569,7 +1569,34 @@ static void check_ept_pointer_match(struct kvm *kvm)
    to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
    }

    -static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
    +int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
    + void *data)
    +{
    + struct kvm_tlb_range *range = data;
    +
    + return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
    + range->pages);
    +}
    +
    +static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm,
    + struct kvm_vcpu *vcpu, struct kvm_tlb_range *range)
    +{
    + u64 ept_pointer = to_vmx(vcpu)->ept_pointer;
    +
    + /*
    + * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs address
    + * of the base of EPT PML4 table, strip off EPT configuration
    + * information.
    + */
    + if (range)
    + return hyperv_flush_guest_mapping_range(ept_pointer & PAGE_MASK,
    + kvm_fill_hv_flush_list_func, (void *)range);
    + else
    + return hyperv_flush_guest_mapping(ept_pointer & PAGE_MASK);
    +}
    +
    +static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
    + struct kvm_tlb_range *range)
    {
    struct kvm_vcpu *vcpu;
    int ret = -ENOTSUPP, i;
    @@ -1579,29 +1606,26 @@ static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
    if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
    check_ept_pointer_match(kvm);

    - /*
    - * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs the address of the
    - * base of EPT PML4 table, strip off EPT configuration information.
    - * If ept_pointer is invalid pointer, bypass the flush request.
    - */
    if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
    kvm_for_each_vcpu(i, vcpu, kvm) {
    - u64 ept_pointer = to_vmx(vcpu)->ept_pointer;
    -
    - if (!VALID_PAGE(ept_pointer))
    - continue;
    -
    - ret |= hyperv_flush_guest_mapping(
    - ept_pointer & PAGE_MASK);
    + /* If ept_pointer is invalid pointer, bypass flush request. */
    + if (VALID_PAGE(to_vmx(vcpu)->ept_pointer))
    + ret |= __hv_remote_flush_tlb_with_range(
    + kvm, vcpu, range);
    }
    } else {
    - ret = hyperv_flush_guest_mapping(
    - to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer & PAGE_MASK);
    + ret = __hv_remote_flush_tlb_with_range(kvm,
    + kvm_get_vcpu(kvm, 0), range);
    }

    spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
    return ret;
    }
    +
    +static int hv_remote_flush_tlb(struct kvm *kvm)
    +{
    + return hv_remote_flush_tlb_with_range(kvm, NULL);
    +}
    #else /* !IS_ENABLED(CONFIG_HYPERV) */
    static inline void evmcs_write64(unsigned long field, u64 value) {}
    static inline void evmcs_write32(unsigned long field, u32 value) {}
    @@ -7971,8 +7995,11 @@ static __init int hardware_setup(void)

    #if IS_ENABLED(CONFIG_HYPERV)
    if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
    - && enable_ept)
    - kvm_x86_ops->tlb_remote_flush = vmx_hv_remote_flush_tlb;
    + && enable_ept) {
    + kvm_x86_ops->tlb_remote_flush = hv_remote_flush_tlb;
    + kvm_x86_ops->tlb_remote_flush_with_range =
    + hv_remote_flush_tlb_with_range;
    + }
    #endif

    if (!cpu_has_vmx_ple()) {
    @@ -11612,6 +11639,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
    vmx->nested.posted_intr_nv = -1;
    vmx->nested.current_vmptr = -1ull;

    + vmx->ept_pointer = INVALID_PAGE;
    +
    vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;

    /*
    --
    2.14.4
    \
     
     \ /
      Last update: 2018-12-06 14:23    [W:2.293 / U:0.344 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site