lkml.org 
[lkml]   [2019]   [Aug]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 1/5] powerpc/mm: define empty update_mmu_cache() as static inline
    Date
    Only BOOK3S and FSL_BOOK3E have a usefull update_mmu_cache().

    For the others, just define it static inline.

    In the meantime, simplify the FSL_BOOK3E related ifdef as
    book3e_hugetlb_preload() only exists when CONFIG_PPC_FSL_BOOK3E
    is selected.

    Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
    ---
    arch/powerpc/include/asm/book3s/pgtable.h | 11 +++++++++++
    arch/powerpc/include/asm/nohash/pgtable.h | 13 +++++++++++++
    arch/powerpc/include/asm/pgtable.h | 12 ------------
    arch/powerpc/mm/mem.c | 11 +++++++----
    4 files changed, 31 insertions(+), 16 deletions(-)

    diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
    index 6436b65ac7bc..0e1263455d73 100644
    --- a/arch/powerpc/include/asm/book3s/pgtable.h
    +++ b/arch/powerpc/include/asm/book3s/pgtable.h
    @@ -26,5 +26,16 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
    unsigned long size, pgprot_t vma_prot);
    #define __HAVE_PHYS_MEM_ACCESS_PROT

    +/*
    + * This gets called at the end of handling a page fault, when
    + * the kernel has put a new PTE into the page table for the process.
    + * We use it to ensure coherency between the i-cache and d-cache
    + * for the page which has just been mapped in.
    + * On machines which use an MMU hash table, we use this to put a
    + * corresponding HPTE into the hash table ahead of time, instead of
    + * waiting for the inevitable extra hash-table miss exception.
    + */
    +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
    +
    #endif /* __ASSEMBLY__ */
    #endif
    diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
    index 1ca1c1864b32..7fed9dc0f147 100644
    --- a/arch/powerpc/include/asm/nohash/pgtable.h
    +++ b/arch/powerpc/include/asm/nohash/pgtable.h
    @@ -293,5 +293,18 @@ static inline int pgd_huge(pgd_t pgd)
    #define is_hugepd(hpd) (hugepd_ok(hpd))
    #endif

    +/*
    + * This gets called at the end of handling a page fault, when
    + * the kernel has put a new PTE into the page table for the process.
    + * We use it to ensure coherency between the i-cache and d-cache
    + * for the page which has just been mapped in.
    + */
    +#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE)
    +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
    +#else
    +static inline
    +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {}
    +#endif
    +
    #endif /* __ASSEMBLY__ */
    #endif
    diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
    index c58ba7963688..c70916a7865a 100644
    --- a/arch/powerpc/include/asm/pgtable.h
    +++ b/arch/powerpc/include/asm/pgtable.h
    @@ -77,18 +77,6 @@ extern void paging_init(void);

    #include <asm-generic/pgtable.h>

    -
    -/*
    - * This gets called at the end of handling a page fault, when
    - * the kernel has put a new PTE into the page table for the process.
    - * We use it to ensure coherency between the i-cache and d-cache
    - * for the page which has just been mapped in.
    - * On machines which use an MMU hash table, we use this to put a
    - * corresponding HPTE into the hash table ahead of time, instead of
    - * waiting for the inevitable extra hash-table miss exception.
    - */
    -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
    -
    #ifndef CONFIG_TRANSPARENT_HUGEPAGE
    #define pmd_large(pmd) 0
    #endif
    diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
    index 9191a66b3bc5..582ad728ac9d 100644
    --- a/arch/powerpc/mm/mem.c
    +++ b/arch/powerpc/mm/mem.c
    @@ -414,10 +414,10 @@ EXPORT_SYMBOL(flush_icache_user_range);
    *
    * This must always be called with the pte lock held.
    */
    +#ifdef CONFIG_PPC_BOOK3S
    void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
    pte_t *ptep)
    {
    -#ifdef CONFIG_PPC_BOOK3S
    /*
    * We don't need to worry about _PAGE_PRESENT here because we are
    * called with either mm->page_table_lock held or ptl lock held
    @@ -455,13 +455,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
    }

    hash_preload(vma->vm_mm, address, is_exec, trap);
    +}
    #endif /* CONFIG_PPC_BOOK3S */
    -#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
    - && defined(CONFIG_HUGETLB_PAGE)
    +#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE)
    +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
    + pte_t *ptep)
    +{
    if (is_vm_hugetlb_page(vma))
    book3e_hugetlb_preload(vma, address, *ptep);
    -#endif
    }
    +#endif

    /*
    * System memory should not be in /proc/iomem but various tools expect it
    --
    2.13.3
    \
     
     \ /
      Last update: 2019-08-16 07:42    [W:3.213 / U:0.004 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site