lkml.org 
[lkml]   [2023]   [Jan]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v11 040/113] KVM: x86/mmu: Add a new is_private member for union kvm_mmu_page_role
    Date
    From: Isaku Yamahata <isaku.yamahata@intel.com>

    Because TDX support introduces private mapping, add a new member in union
    kvm_mmu_page_role with access functions to check the member.

    Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
    ---
    arch/x86/include/asm/kvm_host.h | 27 +++++++++++++++++++++++++++
    arch/x86/kvm/mmu/mmu_internal.h | 5 +++++
    arch/x86/kvm/mmu/spte.h | 6 ++++++
    3 files changed, 38 insertions(+)

    diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
    index f5b51bdef0c6..1bcd118eef31 100644
    --- a/arch/x86/include/asm/kvm_host.h
    +++ b/arch/x86/include/asm/kvm_host.h
    @@ -342,7 +342,12 @@ union kvm_mmu_page_role {
    unsigned ad_disabled:1;
    unsigned guest_mode:1;
    unsigned passthrough:1;
    +#ifdef CONFIG_KVM_MMU_PRIVATE
    + unsigned is_private:1;
    + unsigned :4;
    +#else
    unsigned :5;
    +#endif

    /*
    * This is left at the top of the word so that
    @@ -354,6 +359,28 @@ union kvm_mmu_page_role {
    };
    };

    +#ifdef CONFIG_KVM_MMU_PRIVATE
    +static inline bool kvm_mmu_page_role_is_private(union kvm_mmu_page_role role)
    +{
    + return !!role.is_private;
    +}
    +
    +static inline void kvm_mmu_page_role_set_private(union kvm_mmu_page_role *role)
    +{
    + role->is_private = 1;
    +}
    +#else
    +static inline bool kvm_mmu_page_role_is_private(union kvm_mmu_page_role role)
    +{
    + return false;
    +}
    +
    +static inline void kvm_mmu_page_role_set_private(union kvm_mmu_page_role *role)
    +{
    + WARN_ON_ONCE(1);
    +}
    +#endif
    +
    /*
    * kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties
    * relevant to the current MMU configuration. When loading CR0, CR4, or EFER,
    diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
    index 6767bc9b7c5c..a20b54060bc8 100644
    --- a/arch/x86/kvm/mmu/mmu_internal.h
    +++ b/arch/x86/kvm/mmu/mmu_internal.h
    @@ -143,6 +143,11 @@ static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
    return kvm_mmu_role_as_id(sp->role);
    }

    +static inline bool is_private_sp(const struct kvm_mmu_page *sp)
    +{
    + return kvm_mmu_page_role_is_private(sp->role);
    +}
    +
    static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
    {
    /*
    diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
    index 256395eb593f..7046671b08cb 100644
    --- a/arch/x86/kvm/mmu/spte.h
    +++ b/arch/x86/kvm/mmu/spte.h
    @@ -251,6 +251,12 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
    return to_shadow_page(__pa(sptep));
    }

    +static inline bool is_private_sptep(u64 *sptep)
    +{
    + WARN_ON_ONCE(!sptep);
    + return is_private_sp(sptep_to_sp(sptep));
    +}
    +
    static inline bool is_mmio_spte(struct kvm *kvm, u64 spte)
    {
    return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value &&
    --
    2.25.1
    \
     
     \ /
      Last update: 2023-03-26 23:38    [W:2.694 / U:0.224 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site