lkml.org 
[lkml]   [2015]   [Sep]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
SubjectRe: [PATCH] KVM: x86: fix bogus warning about reserved bits
From
Date


On 23/09/2015 09:56, Borislav Petkov wrote:
> On Tue, Sep 22, 2015 at 11:04:38PM +0200, Paolo Bonzini wrote:
>> Let's add more debugging output:
>
> Here you go:
>
> [ 50.474002] walk_shadow_page_get_mmio_spte: detect reserved bits on spte, addr 0xb8000 (level 4, 0xf0000000000f8)
> [ 50.484249] walk_shadow_page_get_mmio_spte: detect reserved bits on spte, addr 0xb8000 (level 3, 0xf000000000078)
> [ 50.494492] walk_shadow_page_get_mmio_spte: detect reserved bits on spte, addr 0xb8000 (level 2, 0xf000000000078)

And another patch, which both cranks up the debugging a bit and
tries another fix:

diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index dd05b9cef6ae..b2f49bb15ba1 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -105,8 +105,15 @@ static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
+ static bool first;

best = kvm_find_cpuid_entry(vcpu, 0, 0);
+ if (first && best) {
+ printk("cpuid(0).ebx = %x\n", best->ebx);
+ first = false;
+ } else if (first)
+ printk_ratelimited("cpuid(0) not initialized yet\n");
+
return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
}

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index bf1122e9c7bf..f50b280ffee1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3625,7 +3625,7 @@ static void
__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
struct rsvd_bits_validate *rsvd_check,
int maxphyaddr, int level, bool nx, bool gbpages,
- bool pse)
+ bool pse, bool amd)
{
u64 exb_bit_rsvd = 0;
u64 gbpages_bit_rsvd = 0;
@@ -3642,7 +3642,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
* Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
* leaf entries) on AMD CPUs only.
*/
- if (guest_cpuid_is_amd(vcpu))
+ if (amd)
nonleaf_bit8_rsvd = rsvd_bits(8, 8);

switch (level) {
@@ -3710,7 +3710,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
cpuid_maxphyaddr(vcpu), context->root_level,
context->nx, guest_cpuid_has_gbpages(vcpu),
- is_pse(vcpu));
+ is_pse(vcpu), guest_cpuid_is_amd(vcpu));
}

static void
@@ -3760,13 +3760,25 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
+ /*
+ * Passing "true" to the last argument is okay; it adds a check
+ * on bit 8 of the SPTEs which KVM doesn't use anyway.
+ */
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
boot_cpu_data.x86_phys_bits,
context->shadow_root_level, context->nx,
- guest_cpuid_has_gbpages(vcpu), is_pse(vcpu));
+ guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
+ true);
}
EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);

+static inline bool
+boot_cpu_is_amd(void)
+{
+ WARN_ON_ONCE(!tdp_enabled);
+ return shadow_x_mask != 0;
+}
+
/*
* the direct page table on host, use as much mmu features as
* possible, however, kvm currently does not do execution-protection.
@@ -3775,11 +3787,11 @@ static void
reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
- if (guest_cpuid_is_amd(vcpu))
+ if (boot_cpu_is_amd())
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
boot_cpu_data.x86_phys_bits,
context->shadow_root_level, false,
- cpu_has_gbpages, true);
+ cpu_has_gbpages, true, true);
else
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
boot_cpu_data.x86_phys_bits,


Applies on top of everything else you've got already.

Paolo


\
 
 \ /
  Last update: 2015-09-23 12:01    [W:0.253 / U:0.200 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site