lkml.org 
[lkml]   [2021]   [Jun]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Date
SubjectRe: [PATCH v2 02/10] mm/thp: make is_huge_zero_pmd() safe and quicker
On Tue, Jun 8, 2021 at 9:08 PM Hugh Dickins <hughd@google.com> wrote:
>
> Most callers of is_huge_zero_pmd() supply a pmd already verified present;
> but a few (notably zap_huge_pmd()) do not - it might be a pmd migration
> entry, in which the pfn is encoded differently from a present pmd: which
> might pass the is_huge_zero_pmd() test (though not on x86, since L1TF
> forced us to protect against that); or perhaps even crash in pmd_page()
> applied to a swap-like entry.
>
> Make it safe by adding pmd_present() check into is_huge_zero_pmd() itself;
> and make it quicker by saving huge_zero_pfn, so that is_huge_zero_pmd()
> will not need to do that pmd_page() lookup each time.
>
> __split_huge_pmd_locked() checked pmd_trans_huge() before: that worked,
> but is unnecessary now that is_huge_zero_pmd() checks present.
>
> Fixes: e71769ae5260 ("mm: enable thp migration for shmem thp")
> Signed-off-by: Hugh Dickins <hughd@google.com>
> Cc: <stable@vger.kernel.org>

Reviewed-by: Yang Shi <shy828301@gmail.com>

> ---
> Patch added (replacing part of first) since the v1 series was posted.
>
> include/linux/huge_mm.h | 8 +++++++-
> mm/huge_memory.c | 5 ++++-
> 2 files changed, 11 insertions(+), 2 deletions(-)
>
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index 9626fda5efce..2a8ebe6c222e 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -286,6 +286,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
> vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
>
> extern struct page *huge_zero_page;
> +extern unsigned long huge_zero_pfn;
>
> static inline bool is_huge_zero_page(struct page *page)
> {
> @@ -294,7 +295,7 @@ static inline bool is_huge_zero_page(struct page *page)
>
> static inline bool is_huge_zero_pmd(pmd_t pmd)
> {
> - return is_huge_zero_page(pmd_page(pmd));
> + return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
> }
>
> static inline bool is_huge_zero_pud(pud_t pud)
> @@ -440,6 +441,11 @@ static inline bool is_huge_zero_page(struct page *page)
> return false;
> }
>
> +static inline bool is_huge_zero_pmd(pmd_t pmd)
> +{
> + return false;
> +}
> +
> static inline bool is_huge_zero_pud(pud_t pud)
> {
> return false;
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 42cfefc6e66e..5885c5f5836f 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -62,6 +62,7 @@ static struct shrinker deferred_split_shrinker;
>
> static atomic_t huge_zero_refcount;
> struct page *huge_zero_page __read_mostly;
> +unsigned long huge_zero_pfn __read_mostly = ~0UL;
>
> bool transparent_hugepage_enabled(struct vm_area_struct *vma)
> {
> @@ -98,6 +99,7 @@ static bool get_huge_zero_page(void)
> __free_pages(zero_page, compound_order(zero_page));
> goto retry;
> }
> + WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
>
> /* We take additional reference here. It will be put back by shrinker */
> atomic_set(&huge_zero_refcount, 2);
> @@ -147,6 +149,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
> if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
> struct page *zero_page = xchg(&huge_zero_page, NULL);
> BUG_ON(zero_page == NULL);
> + WRITE_ONCE(huge_zero_pfn, ~0UL);
> __free_pages(zero_page, compound_order(zero_page));
> return HPAGE_PMD_NR;
> }
> @@ -2071,7 +2074,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
> return;
> }
>
> - if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
> + if (is_huge_zero_pmd(*pmd)) {
> /*
> * FIXME: Do we want to invalidate secondary mmu by calling
> * mmu_notifier_invalidate_range() see comments below inside
> --
> 2.26.2
>

\
 
 \ /
  Last update: 2021-06-09 18:57    [W:0.069 / U:0.828 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site