lkml.org 
[lkml]   [2017]   [Feb]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Date
    Subject[PATCH 3.16 104/306] mm/hugetlb: check for reserved hugepages during memory offline
    3.16.40-rc1 review patch.  If anyone has any objections, please let me know.

    ------------------

    From: Gerald Schaefer <gerald.schaefer@de.ibm.com>

    commit 082d5b6b60e9f25e1511557fcfcb21eedd267446 upstream.

    In dissolve_free_huge_pages(), free hugepages will be dissolved without
    making sure that there are enough of them left to satisfy hugepage
    reservations.

    Fix this by adding a return value to dissolve_free_huge_pages() and
    checking h->free_huge_pages vs. h->resv_huge_pages. Note that this may
    lead to the situation where dissolve_free_huge_page() returns an error
    and all free hugepages that were dissolved before that error are lost,
    while the memory block still cannot be set offline.

    Fixes: c8721bbb ("mm: memory-hotplug: enable memory hotplug to handle hugepage")
    Link: http://lkml.kernel.org/r/20160926172811.94033-3-gerald.schaefer@de.ibm.com
    Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
    Acked-by: Michal Hocko <mhocko@suse.com>
    Acked-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
    Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
    Cc: Vlastimil Babka <vbabka@suse.cz>
    Cc: Mike Kravetz <mike.kravetz@oracle.com>
    Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.vnet.ibm.com>
    Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
    Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
    Cc: Rui Teng <rui.teng@linux.vnet.ibm.com>
    Cc: Dave Hansen <dave.hansen@linux.intel.com>
    Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
    Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
    [bwh: Backported to 3.16: adjust context]
    Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
    ---
    include/linux/hugetlb.h | 6 +++---
    mm/hugetlb.c | 26 +++++++++++++++++++++-----
    mm/memory_hotplug.c | 4 +++-
    3 files changed, 27 insertions(+), 9 deletions(-)

    --- a/include/linux/hugetlb.h
    +++ b/include/linux/hugetlb.h
    @@ -396,8 +396,8 @@ static inline pgoff_t basepage_index(str
    return __basepage_index(page);
    }

    -extern void dissolve_free_huge_pages(unsigned long start_pfn,
    - unsigned long end_pfn);
    +extern int dissolve_free_huge_pages(unsigned long start_pfn,
    + unsigned long end_pfn);
    static inline int hugepage_migration_supported(struct hstate *h)
    {
    #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
    @@ -452,7 +452,7 @@ static inline pgoff_t basepage_index(str
    {
    return page->index;
    }
    -#define dissolve_free_huge_pages(s, e) do {} while (0)
    +#define dissolve_free_huge_pages(s, e) 0
    #define hugepage_migration_supported(h) 0

    static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
    --- a/mm/hugetlb.c
    +++ b/mm/hugetlb.c
    @@ -1067,21 +1067,31 @@ static int free_pool_huge_page(struct hs

    /*
    * Dissolve a given free hugepage into free buddy pages. This function does
    - * nothing for in-use (including surplus) hugepages.
    + * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
    + * number of free hugepages would be reduced below the number of reserved
    + * hugepages.
    */
    -static void dissolve_free_huge_page(struct page *page)
    +static int dissolve_free_huge_page(struct page *page)
    {
    + int rc = 0;
    +
    spin_lock(&hugetlb_lock);
    if (PageHuge(page) && !page_count(page)) {
    struct page *head = compound_head(page);
    struct hstate *h = page_hstate(head);
    int nid = page_to_nid(head);
    + if (h->free_huge_pages - h->resv_huge_pages == 0) {
    + rc = -EBUSY;
    + goto out;
    + }
    list_del(&head->lru);
    h->free_huge_pages--;
    h->free_huge_pages_node[nid]--;
    update_and_free_page(h, head);
    }
    +out:
    spin_unlock(&hugetlb_lock);
    + return rc;
    }

    /*
    @@ -1089,16 +1099,22 @@ static void dissolve_free_huge_page(stru
    * make specified memory blocks removable from the system.
    * Note that this will dissolve a free gigantic hugepage completely, if any
    * part of it lies within the given range.
    + * Also note that if dissolve_free_huge_page() returns with an error, all
    + * free hugepages that were dissolved before that error are lost.
    */
    -void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
    +int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
    {
    unsigned long pfn;
    + int rc = 0;

    if (!hugepages_supported())
    - return;
    + return rc;

    for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
    - dissolve_free_huge_page(pfn_to_page(pfn));
    + if (rc = dissolve_free_huge_page(pfn_to_page(pfn)))
    + break;
    +
    + return rc;
    }

    static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
    --- a/mm/memory_hotplug.c
    +++ b/mm/memory_hotplug.c
    @@ -1732,7 +1732,9 @@ repeat:
    * dissolve free hugepages in the memory block before doing offlining
    * actually in order to make hugetlbfs's object counting consistent.
    */
    - dissolve_free_huge_pages(start_pfn, end_pfn);
    + ret = dissolve_free_huge_pages(start_pfn, end_pfn);
    + if (ret)
    + goto failed_removal;
    /* check again */
    offlined_pages = check_pages_isolated(start_pfn, end_pfn);
    if (offlined_pages < 0) {
    \
     
     \ /
      Last update: 2017-02-16 00:34    [W:4.919 / U:1.200 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site