lkml.org 
[lkml]   [2015]   [Apr]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 3.12 34/63] arm/arm64: KVM: Fix and refactor unmap_range
    Date
    From: Christoffer Dall <christoffer.dall@linaro.org>

    3.12-stable review patch. If anyone has any objections, please let me know.

    ===============

    commit 4f853a714bf16338ff5261128e6c7ae2569e9505 upstream.

    unmap_range() was utterly broken, to quote Marc, and broke in all sorts
    of situations. It was also quite complicated to follow and didn't
    follow the usual scheme of having a separate iterating function for each
    level of page tables.

    Address this by refactoring the code and introduce a pgd_clear()
    function.

    Reviewed-by: Jungseok Lee <jays.lee@samsung.com>
    Reviewed-by: Mario Smarduch <m.smarduch@samsung.com>
    Acked-by: Marc Zyngier <marc.zyngier@arm.com>
    Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
    Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
    Signed-off-by: Jiri Slaby <jslaby@suse.cz>
    ---
    arch/arm/include/asm/kvm_mmu.h | 11 ++++
    arch/arm/kvm/mmu.c | 106 +++++++++++++++++++++++----------------
    arch/arm64/include/asm/kvm_mmu.h | 14 ++++++
    3 files changed, 89 insertions(+), 42 deletions(-)

    diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
    index 1de3a9b7bab6..a2c3c313ea77 100644
    --- a/arch/arm/include/asm/kvm_mmu.h
    +++ b/arch/arm/include/asm/kvm_mmu.h
    @@ -116,6 +116,17 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
    (__boundary - 1 < (end) - 1)? __boundary: (end); \
    })

    +static inline bool kvm_page_empty(void *ptr)
    +{
    + struct page *ptr_page = virt_to_page(ptr);
    + return page_count(ptr_page) == 1;
    +}
    +
    +
    +#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
    +#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
    +#define kvm_pud_table_empty(pudp) (0)
    +
    struct kvm;

    #define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
    diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
    index 61c5a92f6d9d..8e364f793c2c 100644
    --- a/arch/arm/kvm/mmu.c
    +++ b/arch/arm/kvm/mmu.c
    @@ -87,10 +87,13 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
    return p;
    }

    -static bool page_empty(void *ptr)
    +static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
    {
    - struct page *ptr_page = virt_to_page(ptr);
    - return page_count(ptr_page) == 1;
    + pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
    + pgd_clear(pgd);
    + kvm_tlb_flush_vmid_ipa(kvm, addr);
    + pud_free(NULL, pud_table);
    + put_page(virt_to_page(pgd));
    }

    static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
    @@ -111,55 +114,74 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
    put_page(virt_to_page(pmd));
    }

    -static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
    +static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
    + phys_addr_t addr, phys_addr_t end)
    {
    - if (pte_present(*pte)) {
    - kvm_set_pte(pte, __pte(0));
    - put_page(virt_to_page(pte));
    - kvm_tlb_flush_vmid_ipa(kvm, addr);
    - }
    + phys_addr_t start_addr = addr;
    + pte_t *pte, *start_pte;
    +
    + start_pte = pte = pte_offset_kernel(pmd, addr);
    + do {
    + if (!pte_none(*pte)) {
    + kvm_set_pte(pte, __pte(0));
    + put_page(virt_to_page(pte));
    + kvm_tlb_flush_vmid_ipa(kvm, addr);
    + }
    + } while (pte++, addr += PAGE_SIZE, addr != end);
    +
    + if (kvm_pte_table_empty(start_pte))
    + clear_pmd_entry(kvm, pmd, start_addr);
    }

    -static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
    - unsigned long long start, u64 size)
    +static void unmap_pmds(struct kvm *kvm, pud_t *pud,
    + phys_addr_t addr, phys_addr_t end)
    {
    - pgd_t *pgd;
    - pud_t *pud;
    - pmd_t *pmd;
    - pte_t *pte;
    - unsigned long long addr = start, end = start + size;
    - u64 next;
    + phys_addr_t next, start_addr = addr;
    + pmd_t *pmd, *start_pmd;

    - while (addr < end) {
    - pgd = pgdp + pgd_index(addr);
    - pud = pud_offset(pgd, addr);
    - if (pud_none(*pud)) {
    - addr = kvm_pud_addr_end(addr, end);
    - continue;
    + start_pmd = pmd = pmd_offset(pud, addr);
    + do {
    + next = kvm_pmd_addr_end(addr, end);
    + if (!pmd_none(*pmd)) {
    + unmap_ptes(kvm, pmd, addr, next);
    }
    + } while (pmd++, addr = next, addr != end);

    - pmd = pmd_offset(pud, addr);
    - if (pmd_none(*pmd)) {
    - addr = kvm_pmd_addr_end(addr, end);
    - continue;
    - }
    + if (kvm_pmd_table_empty(start_pmd))
    + clear_pud_entry(kvm, pud, start_addr);
    +}

    - pte = pte_offset_kernel(pmd, addr);
    - clear_pte_entry(kvm, pte, addr);
    - next = addr + PAGE_SIZE;
    -
    - /* If we emptied the pte, walk back up the ladder */
    - if (page_empty(pte)) {
    - clear_pmd_entry(kvm, pmd, addr);
    - next = kvm_pmd_addr_end(addr, end);
    - if (page_empty(pmd) && !page_empty(pud)) {
    - clear_pud_entry(kvm, pud, addr);
    - next = kvm_pud_addr_end(addr, end);
    - }
    +static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
    + phys_addr_t addr, phys_addr_t end)
    +{
    + phys_addr_t next, start_addr = addr;
    + pud_t *pud, *start_pud;
    +
    + start_pud = pud = pud_offset(pgd, addr);
    + do {
    + next = kvm_pud_addr_end(addr, end);
    + if (!pud_none(*pud)) {
    + unmap_pmds(kvm, pud, addr, next);
    }
    + } while (pud++, addr = next, addr != end);

    - addr = next;
    - }
    + if (kvm_pud_table_empty(start_pud))
    + clear_pgd_entry(kvm, pgd, start_addr);
    +}
    +
    +
    +static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
    + phys_addr_t start, u64 size)
    +{
    + pgd_t *pgd;
    + phys_addr_t addr = start, end = start + size;
    + phys_addr_t next;
    +
    + pgd = pgdp + pgd_index(addr);
    + do {
    + next = kvm_pgd_addr_end(addr, end);
    + unmap_puds(kvm, pgd, addr, next);
    + } while (pgd++, addr = next, addr != end);
    }

    static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
    diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
    index 3b038b39ba9b..db6b8f69e3f5 100644
    --- a/arch/arm64/include/asm/kvm_mmu.h
    +++ b/arch/arm64/include/asm/kvm_mmu.h
    @@ -119,6 +119,20 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
    #define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
    #define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)

    +static inline bool kvm_page_empty(void *ptr)
    +{
    + struct page *ptr_page = virt_to_page(ptr);
    + return page_count(ptr_page) == 1;
    +}
    +
    +#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
    +#ifndef CONFIG_ARM64_64K_PAGES
    +#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
    +#else
    +#define kvm_pmd_table_empty(pmdp) (0)
    +#endif
    +#define kvm_pud_table_empty(pudp) (0)
    +
    struct kvm;

    #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
    --
    2.3.5


    \
     
     \ /
      Last update: 2015-04-30 15:01    [W:5.474 / U:0.004 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site