lkml.org 
[lkml]   [2017]   [Mar]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    SubjectRe: [PATCH RFC 2/4] KVM: MMU: Rename PT64_ROOT_LEVEL to PT64_ROOT_4LEVEL
    From
    Date


    On 29/12/2016 10:26, Liang Li wrote:
    > Now we have 4 level page table and 5 level page table in 64 bits
    > long mode, let's rename the PT64_ROOT_LEVEL to PT64_ROOT_4LEVEL,
    > then we can use PT64_ROOT_5LEVEL for 5 level page table, it's
    > helpful to make the code more clear.
    >
    > Signed-off-by: Liang Li <liang.z.li@intel.com>
    > Cc: Thomas Gleixner <tglx@linutronix.de>
    > Cc: Ingo Molnar <mingo@redhat.com>
    > Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
    > Cc: Dave Hansen <dave.hansen@linux.intel.com>
    > Cc: Xiao Guangrong <guangrong.xiao@linux.intel.com>
    > Cc: Paolo Bonzini <pbonzini@redhat.com>
    > Cc: "Radim Krčmář" <rkrcmar@redhat.com>

    I think you should also #define PT64_ROOT_MAX_LEVEL to 4, and use it
    whenever your final series replaces a 4 or PT64_ROOT_LEVEL with
    PT64_ROOT_5LEVEL.

    Then the next patch can do

    -#define PT64_ROOT_MAX_LEVEL PT64_ROOT_4LEVEL
    +#define PT64_ROOT_MAX_LEVEL PT64_ROOT_5LEVEL

    Paolo


    > ---
    > arch/x86/kvm/mmu.c | 36 ++++++++++++++++++------------------
    > arch/x86/kvm/mmu.h | 2 +-
    > arch/x86/kvm/mmu_audit.c | 4 ++--
    > arch/x86/kvm/svm.c | 2 +-
    > 4 files changed, 22 insertions(+), 22 deletions(-)
    >
    > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
    > index 7012de4..4c40273 100644
    > --- a/arch/x86/kvm/mmu.c
    > +++ b/arch/x86/kvm/mmu.c
    > @@ -1986,8 +1986,8 @@ static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
    > }
    >
    > struct mmu_page_path {
    > - struct kvm_mmu_page *parent[PT64_ROOT_LEVEL];
    > - unsigned int idx[PT64_ROOT_LEVEL];
    > + struct kvm_mmu_page *parent[PT64_ROOT_4LEVEL];
    > + unsigned int idx[PT64_ROOT_4LEVEL];
    > };
    >
    > #define for_each_sp(pvec, sp, parents, i) \
    > @@ -2193,8 +2193,8 @@ static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
    > iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
    > iterator->level = vcpu->arch.mmu.shadow_root_level;
    >
    > - if (iterator->level == PT64_ROOT_LEVEL &&
    > - vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL &&
    > + if (iterator->level == PT64_ROOT_4LEVEL &&
    > + vcpu->arch.mmu.root_level < PT64_ROOT_4LEVEL &&
    > !vcpu->arch.mmu.direct_map)
    > --iterator->level;
    >
    > @@ -3061,8 +3061,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
    > if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
    > return;
    >
    > - if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL &&
    > - (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL ||
    > + if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL &&
    > + (vcpu->arch.mmu.root_level == PT64_ROOT_4LEVEL ||
    > vcpu->arch.mmu.direct_map)) {
    > hpa_t root = vcpu->arch.mmu.root_hpa;
    >
    > @@ -3114,10 +3114,10 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
    > struct kvm_mmu_page *sp;
    > unsigned i;
    >
    > - if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
    > + if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) {
    > spin_lock(&vcpu->kvm->mmu_lock);
    > make_mmu_pages_available(vcpu);
    > - sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, 1, ACC_ALL);
    > + sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_4LEVEL, 1, ACC_ALL);
    > ++sp->root_count;
    > spin_unlock(&vcpu->kvm->mmu_lock);
    > vcpu->arch.mmu.root_hpa = __pa(sp->spt);
    > @@ -3158,14 +3158,14 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
    > * Do we shadow a long mode page table? If so we need to
    > * write-protect the guests page table root.
    > */
    > - if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
    > + if (vcpu->arch.mmu.root_level == PT64_ROOT_4LEVEL) {
    > hpa_t root = vcpu->arch.mmu.root_hpa;
    >
    > MMU_WARN_ON(VALID_PAGE(root));
    >
    > spin_lock(&vcpu->kvm->mmu_lock);
    > make_mmu_pages_available(vcpu);
    > - sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
    > + sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_4LEVEL,
    > 0, ACC_ALL);
    > root = __pa(sp->spt);
    > ++sp->root_count;
    > @@ -3180,7 +3180,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
    > * the shadow page table may be a PAE or a long mode page table.
    > */
    > pm_mask = PT_PRESENT_MASK;
    > - if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL)
    > + if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL)
    > pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
    >
    > for (i = 0; i < 4; ++i) {
    > @@ -3213,7 +3213,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
    > * If we shadow a 32 bit page table with a long mode page
    > * table we enter this path.
    > */
    > - if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
    > + if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) {
    > if (vcpu->arch.mmu.lm_root == NULL) {
    > /*
    > * The additional page necessary for this is only
    > @@ -3258,7 +3258,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
    >
    > vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
    > kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
    > - if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
    > + if (vcpu->arch.mmu.root_level == PT64_ROOT_4LEVEL) {
    > hpa_t root = vcpu->arch.mmu.root_hpa;
    > sp = page_header(root);
    > mmu_sync_children(vcpu, sp);
    > @@ -3334,7 +3334,7 @@ static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
    > walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
    > {
    > struct kvm_shadow_walk_iterator iterator;
    > - u64 sptes[PT64_ROOT_LEVEL], spte = 0ull;
    > + u64 sptes[PT64_ROOT_4LEVEL], spte = 0ull;
    > int root, leaf;
    > bool reserved = false;
    >
    > @@ -3725,7 +3725,7 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
    > rsvd_check->rsvd_bits_mask[1][0] =
    > rsvd_check->rsvd_bits_mask[0][0];
    > break;
    > - case PT64_ROOT_LEVEL:
    > + case PT64_ROOT_4LEVEL:
    > rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
    > nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
    > rsvd_bits(maxphyaddr, 51);
    > @@ -4034,7 +4034,7 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu,
    > static void paging64_init_context(struct kvm_vcpu *vcpu,
    > struct kvm_mmu *context)
    > {
    > - paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
    > + paging64_init_context_common(vcpu, context, PT64_ROOT_4LEVEL);
    > }
    >
    > static void paging32_init_context(struct kvm_vcpu *vcpu,
    > @@ -4088,7 +4088,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
    > context->root_level = 0;
    > } else if (is_long_mode(vcpu)) {
    > context->nx = is_nx(vcpu);
    > - context->root_level = PT64_ROOT_LEVEL;
    > + context->root_level = PT64_ROOT_4LEVEL;
    > reset_rsvds_bits_mask(vcpu, context);
    > context->gva_to_gpa = paging64_gva_to_gpa;
    > } else if (is_pae(vcpu)) {
    > @@ -4196,7 +4196,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
    > g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
    > } else if (is_long_mode(vcpu)) {
    > g_context->nx = is_nx(vcpu);
    > - g_context->root_level = PT64_ROOT_LEVEL;
    > + g_context->root_level = PT64_ROOT_4LEVEL;
    > reset_rsvds_bits_mask(vcpu, g_context);
    > g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
    > } else if (is_pae(vcpu)) {
    > diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
    > index ddc56e9..0d87de7 100644
    > --- a/arch/x86/kvm/mmu.h
    > +++ b/arch/x86/kvm/mmu.h
    > @@ -37,7 +37,7 @@
    > #define PT32_DIR_PSE36_MASK \
    > (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
    >
    > -#define PT64_ROOT_LEVEL 4
    > +#define PT64_ROOT_4LEVEL 4
    > #define PT32_ROOT_LEVEL 2
    > #define PT32E_ROOT_LEVEL 3
    >
    > diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
    > index dcce533..2e6996d 100644
    > --- a/arch/x86/kvm/mmu_audit.c
    > +++ b/arch/x86/kvm/mmu_audit.c
    > @@ -62,11 +62,11 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
    > if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
    > return;
    >
    > - if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
    > + if (vcpu->arch.mmu.root_level == PT64_ROOT_4LEVEL) {
    > hpa_t root = vcpu->arch.mmu.root_hpa;
    >
    > sp = page_header(root);
    > - __mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL);
    > + __mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_4LEVEL);
    > return;
    > }
    >
    > diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
    > index 08a4d3a..1acc6de 100644
    > --- a/arch/x86/kvm/svm.c
    > +++ b/arch/x86/kvm/svm.c
    > @@ -565,7 +565,7 @@ static inline void invlpga(unsigned long addr, u32 asid)
    > static int get_npt_level(void)
    > {
    > #ifdef CONFIG_X86_64
    > - return PT64_ROOT_LEVEL;
    > + return PT64_ROOT_4LEVEL;
    > #else
    > return PT32E_ROOT_LEVEL;
    > #endif
    >

    \
     
     \ /
      Last update: 2017-03-09 15:49    [W:3.398 / U:0.180 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site