lkml.org 
[lkml]   [2021]   [Aug]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH v3 0/3] SVM 5-level page table support
On Mon, Aug 23, 2021, Wei Huang wrote:
> On 08/23 12:20, Maxim Levitsky wrote:
> > This hack makes it work again for me (I don't yet use TDP mmu).
> >
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index caa3f9aee7d1..c25e0d40a620 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -3562,7 +3562,7 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
> > mmu->shadow_root_level < PT64_ROOT_4LEVEL)
> > return 0;
> >
> > - if (mmu->pae_root && mmu->pml4_root && mmu->pml5_root)

Maxim, I assume you hit this WARN and bail?

if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root ||
mmu->pml5_root))
return -EIO;

Because as the comment states, KVM expects all the special roots to be allocated
together. The 5-level paging supported breaks that assumption because pml5_root
will be allocated iff the host is using 5-level paging.

if (mmu->shadow_root_level > PT64_ROOT_4LEVEL) {
pml5_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (!pml5_root)
goto err_pml5;
}

I think this is the least awful fix, I'll test and send a proper patch later today.

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 4853c033e6ce..93b2ed422b48 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3548,6 +3548,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *mmu = vcpu->arch.mmu;
+ bool need_pml5 = mmu->shadow_root_level > PT64_ROOT_4LEVEL;
u64 *pml5_root = NULL;
u64 *pml4_root = NULL;
u64 *pae_root;
@@ -3562,7 +3563,14 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
mmu->shadow_root_level < PT64_ROOT_4LEVEL)
return 0;

- if (mmu->pae_root && mmu->pml4_root && mmu->pml5_root)
+ /*
+ * NPT, the only paging mode that uses this horror, uses a fixed number
+ * of levels for the shadow page tables, e.g. all MMUs are 4-level or
+ * all MMus are 5-level. Thus, this can safely require that pml5_root
+ * is allocated if the other roots are valid and pml5 is needed, as any
+ * prior MMU would also have required pml5.
+ */
+ if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root))
return 0;

/*
@@ -3570,7 +3578,7 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
* bail if KVM ends up in a state where only one of the roots is valid.
*/
if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root ||
- mmu->pml5_root))
+ (need_pml5 && mmu->pml5_root)))
return -EIO;

/*
> > + if (mmu->pae_root && mmu->pml4_root)
> > return 0;
> >
> > /*
> >
> >
> >
> > Best regards,
> > Maxim Levitsky
> >

\
 
 \ /
  Last update: 2021-08-23 18:12    [W:0.050 / U:0.060 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site