lkml.org 
[lkml]   [2015]   [May]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 12/13] KVM: x86: add SMM to the MMU role, support SMRAM address space
    Date
    This is now very simple to do.  The only interesting part is a simple
    trick to find the right memslot in gfn_to_rmap, retrieving the address
    space from the spte role word. The same trick is used in the auditing
    code.

    The comment on top of union kvm_mmu_page_role has been stale forever,
    so remove it. Speaking of stale code, remove pad_for_nice_hex_output
    too: it was splitting the "access" bitfield across two bytes and thus
    had effectively turned into pad_for_ugly_hex_output.

    Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
    ---
    v1->v2: new

    arch/x86/include/asm/kvm_host.h | 26 +++++++++++++++-----------
    arch/x86/kvm/mmu.c | 15 ++++++++++++---
    arch/x86/kvm/mmu_audit.c | 10 +++++++---
    arch/x86/kvm/x86.c | 2 ++
    4 files changed, 36 insertions(+), 17 deletions(-)

    diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
    index 5a5e13af6e03..47006683f2fe 100644
    --- a/arch/x86/include/asm/kvm_host.h
    +++ b/arch/x86/include/asm/kvm_host.h
    @@ -184,23 +184,12 @@ struct kvm_mmu_memory_cache {
    void *objects[KVM_NR_MEM_OBJS];
    };

    -/*
    - * kvm_mmu_page_role, below, is defined as:
    - *
    - * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
    - * bits 4:7 - page table level for this shadow (1-4)
    - * bits 8:9 - page table quadrant for 2-level guests
    - * bit 16 - direct mapping of virtual to physical mapping at gfn
    - * used for real mode and two-dimensional paging
    - * bits 17:19 - common access permissions for all ptes in this shadow page
    - */
    union kvm_mmu_page_role {
    unsigned word;
    struct {
    unsigned level:4;
    unsigned cr4_pae:1;
    unsigned quadrant:2;
    - unsigned pad_for_nice_hex_output:6;
    unsigned direct:1;
    unsigned access:3;
    unsigned invalid:1;
    @@ -208,6 +197,15 @@ union kvm_mmu_page_role {
    unsigned cr0_wp:1;
    unsigned smep_andnot_wp:1;
    unsigned smap_andnot_wp:1;
    + unsigned :8;
    +
    + /*
    + * This is left at the top of the word so that
    + * kvm_memslots_for_spte_role can extract it with a
    + * simple shift. While there is room, give it a whole
    + * byte so it is also faster to load it from memory.
    + */
    + unsigned smm:8;
    };
    };

    @@ -1120,6 +1118,12 @@ enum {
    #define HF_SMM_MASK (1 << 6)
    #define HF_SMM_INSIDE_NMI_MASK (1 << 7)

    +#define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
    +#define KVM_ADDRESS_SPACE_NUM 2
    +
    +#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
    +#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
    +
    /*
    * Hardware virtualization extension instructions may fault if a
    * reboot turns off virtualization while processes are running.
    diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
    index a749490bc1db..8e9b1758b7a7 100644
    --- a/arch/x86/kvm/mmu.c
    +++ b/arch/x86/kvm/mmu.c
    @@ -806,13 +806,15 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,

    static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
    {
    + struct kvm_memslots *slots;
    struct kvm_memory_slot *slot;
    struct kvm_lpage_info *linfo;
    gfn_t gfn;
    int i;

    gfn = sp->gfn;
    - slot = gfn_to_memslot(kvm, gfn);
    + slots = kvm_memslots_for_spte_role(kvm, sp->role);
    + slot = __gfn_to_memslot(slots, gfn);
    for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
    linfo = lpage_info_slot(gfn, slot, i);
    linfo->write_count += 1;
    @@ -822,13 +824,15 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)

    static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
    {
    + struct kvm_memslots *slots;
    struct kvm_memory_slot *slot;
    struct kvm_lpage_info *linfo;
    gfn_t gfn;
    int i;

    gfn = sp->gfn;
    - slot = gfn_to_memslot(kvm, gfn);
    + slots = kvm_memslots_for_spte_role(kvm, sp->role);
    + slot = __gfn_to_memslot(slots, gfn);
    for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
    linfo = lpage_info_slot(gfn, slot, i);
    linfo->write_count -= 1;
    @@ -1045,9 +1049,11 @@ static unsigned long *__gfn_to_rmap(gfn_t gfn, int level,
    */
    static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, struct kvm_mmu_page *sp)
    {
    + struct kvm_memslots *slots;
    struct kvm_memory_slot *slot;

    - slot = gfn_to_memslot(kvm, gfn);
    + slots = kvm_memslots_for_spte_role(kvm, sp->role);
    + slot = __gfn_to_memslot(slots, gfn);
    return __gfn_to_rmap(gfn, sp->role.level, slot);
    }

    @@ -3924,6 +3930,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
    struct kvm_mmu *context = &vcpu->arch.mmu;

    context->base_role.word = 0;
    + context->base_role.smm = is_smm(vcpu);
    context->page_fault = tdp_page_fault;
    context->sync_page = nonpaging_sync_page;
    context->invlpg = nonpaging_invlpg;
    @@ -3985,6 +3992,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
    = smep && !is_write_protection(vcpu);
    context->base_role.smap_andnot_wp
    = smap && !is_write_protection(vcpu);
    + context->base_role.smm = is_smm(vcpu);
    }
    EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);

    @@ -4267,6 +4275,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
    .nxe = 1,
    .smep_andnot_wp = 1,
    .smap_andnot_wp = 1,
    + .smm = 1,
    };

    /*
    diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
    index 78288c15400c..a4f62e6f2db2 100644
    --- a/arch/x86/kvm/mmu_audit.c
    +++ b/arch/x86/kvm/mmu_audit.c
    @@ -131,12 +131,16 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
    static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
    unsigned long *rmapp;
    struct kvm_mmu_page *rev_sp;
    + struct kvm_memslots *slots;
    + struct kvm_memory_slot *slot;
    gfn_t gfn;

    rev_sp = page_header(__pa(sptep));
    gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);

    - if (!gfn_to_memslot(kvm, gfn)) {
    + slots = kvm_memslots_for_spte_role(kvm, rev_sp->role);
    + slot = __gfn_to_memslot(slots, gfn);
    + if (!slot) {
    if (!__ratelimit(&ratelimit_state))
    return;
    audit_printk(kvm, "no memslot for gfn %llx\n", gfn);
    @@ -146,7 +150,7 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
    return;
    }

    - rmapp = gfn_to_rmap(kvm, gfn, rev_sp);
    + rmapp = __gfn_to_rmap(gfn, rev_sp->role.level, slot);
    if (!*rmapp) {
    if (!__ratelimit(&ratelimit_state))
    return;
    @@ -197,7 +201,7 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
    if (sp->role.direct || sp->unsync || sp->role.invalid)
    return;

    - slots = kvm_memslots(kvm);
    + slots = kvm_memslots_for_spte_role(kvm, sp->role);
    slot = __gfn_to_memslot(slots, sp->gfn);
    rmapp = __gfn_to_rmap(sp->gfn, PT_PAGE_TABLE_LEVEL, slot);

    diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
    index 7dd6c61a71ae..2fa345d9bd6c 100644
    --- a/arch/x86/kvm/x86.c
    +++ b/arch/x86/kvm/x86.c
    @@ -5477,6 +5477,8 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu)
    vcpu->arch.smi_pending = 0;
    }
    }
    +
    + kvm_mmu_reset_context(vcpu);
    }

    static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
    --
    1.8.3.1



    \
     
     \ /
      Last update: 2015-05-27 19:41    [W:4.299 / U:0.076 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site