lkml.org 
[lkml]   [2018]   [Dec]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Subject[PATCH v5 4/5] mm: Move buddy list manipulations into helpers
    From
    Date
    In preparation for runtime randomization of the zone lists, take all
    (well, most of) the list_*() functions in the buddy allocator and put
    them in helper functions. Provide a common control point for injecting
    additional behavior when freeing pages.

    Cc: Michal Hocko <mhocko@suse.com>
    Cc: Dave Hansen <dave.hansen@linux.intel.com>
    Signed-off-by: Dan Williams <dan.j.williams@intel.com>
    ---
    include/linux/mm.h | 3 --
    include/linux/mm_types.h | 3 ++
    include/linux/mmzone.h | 51 ++++++++++++++++++++++++++++++++++
    mm/compaction.c | 4 +--
    mm/page_alloc.c | 70 ++++++++++++++++++----------------------------
    5 files changed, 84 insertions(+), 47 deletions(-)

    diff --git a/include/linux/mm.h b/include/linux/mm.h
    index f9647779e82b..43e5a449caaf 100644
    --- a/include/linux/mm.h
    +++ b/include/linux/mm.h
    @@ -473,9 +473,6 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma)
    struct mmu_gather;
    struct inode;

    -#define page_private(page) ((page)->private)
    -#define set_page_private(page, v) ((page)->private = (v))
    -
    #if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
    static inline int pmd_devmap(pmd_t pmd)
    {
    diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
    index 5ed8f6292a53..72f37ea6dedb 100644
    --- a/include/linux/mm_types.h
    +++ b/include/linux/mm_types.h
    @@ -209,6 +209,9 @@ struct page {
    #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
    #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)

    +#define page_private(page) ((page)->private)
    +#define set_page_private(page, v) ((page)->private = (v))
    +
    struct page_frag_cache {
    void * va;
    #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
    diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
    index eafa66d66232..35cc33af87f2 100644
    --- a/include/linux/mmzone.h
    +++ b/include/linux/mmzone.h
    @@ -18,6 +18,8 @@
    #include <linux/pageblock-flags.h>
    #include <linux/page-flags-layout.h>
    #include <linux/atomic.h>
    +#include <linux/mm_types.h>
    +#include <linux/page-flags.h>
    #include <asm/page.h>

    /* Free memory management - zoned buddy allocator. */
    @@ -98,6 +100,55 @@ struct free_area {
    unsigned long nr_free;
    };

    +/* Used for pages not on another list */
    +static inline void add_to_free_area(struct page *page, struct free_area *area,
    + int migratetype)
    +{
    + list_add(&page->lru, &area->free_list[migratetype]);
    + area->nr_free++;
    +}
    +
    +/* Used for pages not on another list */
    +static inline void add_to_free_area_tail(struct page *page, struct free_area *area,
    + int migratetype)
    +{
    + list_add_tail(&page->lru, &area->free_list[migratetype]);
    + area->nr_free++;
    +}
    +
    +/* Used for pages which are on another list */
    +static inline void move_to_free_area(struct page *page, struct free_area *area,
    + int migratetype)
    +{
    + list_move(&page->lru, &area->free_list[migratetype]);
    +}
    +
    +static inline struct page *get_page_from_free_area(struct free_area *area,
    + int migratetype)
    +{
    + return list_first_entry_or_null(&area->free_list[migratetype],
    + struct page, lru);
    +}
    +
    +static inline void rmv_page_order(struct page *page)
    +{
    + __ClearPageBuddy(page);
    + set_page_private(page, 0);
    +}
    +
    +static inline void del_page_from_free_area(struct page *page,
    + struct free_area *area, int migratetype)
    +{
    + list_del(&page->lru);
    + rmv_page_order(page);
    + area->nr_free--;
    +}
    +
    +static inline bool free_area_empty(struct free_area *area, int migratetype)
    +{
    + return list_empty(&area->free_list[migratetype]);
    +}
    +
    struct pglist_data;

    /*
    diff --git a/mm/compaction.c b/mm/compaction.c
    index 7c607479de4a..44adbfa073b3 100644
    --- a/mm/compaction.c
    +++ b/mm/compaction.c
    @@ -1359,13 +1359,13 @@ static enum compact_result __compact_finished(struct zone *zone,
    bool can_steal;

    /* Job done if page is free of the right migratetype */
    - if (!list_empty(&area->free_list[migratetype]))
    + if (!free_area_empty(area, migratetype))
    return COMPACT_SUCCESS;

    #ifdef CONFIG_CMA
    /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
    if (migratetype == MIGRATE_MOVABLE &&
    - !list_empty(&area->free_list[MIGRATE_CMA]))
    + !free_area_empty(area, MIGRATE_CMA))
    return COMPACT_SUCCESS;
    #endif
    /*
    diff --git a/mm/page_alloc.c b/mm/page_alloc.c
    index a51031cf16fe..d2f7b050bc13 100644
    --- a/mm/page_alloc.c
    +++ b/mm/page_alloc.c
    @@ -704,12 +704,6 @@ static inline void set_page_order(struct page *page, unsigned int order)
    __SetPageBuddy(page);
    }

    -static inline void rmv_page_order(struct page *page)
    -{
    - __ClearPageBuddy(page);
    - set_page_private(page, 0);
    -}
    -
    /*
    * This function checks whether a page is free && is the buddy
    * we can coalesce a page and its buddy if
    @@ -810,13 +804,11 @@ static inline void __free_one_page(struct page *page,
    * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
    * merge with it and move up one order.
    */
    - if (page_is_guard(buddy)) {
    + if (page_is_guard(buddy))
    clear_page_guard(zone, buddy, order, migratetype);
    - } else {
    - list_del(&buddy->lru);
    - zone->free_area[order].nr_free--;
    - rmv_page_order(buddy);
    - }
    + else
    + del_page_from_free_area(buddy, &zone->free_area[order],
    + migratetype);
    combined_pfn = buddy_pfn & pfn;
    page = page + (combined_pfn - pfn);
    pfn = combined_pfn;
    @@ -866,15 +858,13 @@ static inline void __free_one_page(struct page *page,
    higher_buddy = higher_page + (buddy_pfn - combined_pfn);
    if (pfn_valid_within(buddy_pfn) &&
    page_is_buddy(higher_page, higher_buddy, order + 1)) {
    - list_add_tail(&page->lru,
    - &zone->free_area[order].free_list[migratetype]);
    - goto out;
    + add_to_free_area_tail(page, &zone->free_area[order],
    + migratetype);
    + return;
    }
    }

    - list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
    -out:
    - zone->free_area[order].nr_free++;
    + add_to_free_area(page, &zone->free_area[order], migratetype);
    }

    /*
    @@ -1819,7 +1809,7 @@ static inline void expand(struct zone *zone, struct page *page,
    if (set_page_guard(zone, &page[size], high, migratetype))
    continue;

    - list_add(&page[size].lru, &area->free_list[migratetype]);
    + add_to_free_area(&page[size], area, migratetype);
    area->nr_free++;
    set_page_order(&page[size], high);
    }
    @@ -1961,13 +1951,10 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
    /* Find a page of the appropriate size in the preferred list */
    for (current_order = order; current_order < MAX_ORDER; ++current_order) {
    area = &(zone->free_area[current_order]);
    - page = list_first_entry_or_null(&area->free_list[migratetype],
    - struct page, lru);
    + page = get_page_from_free_area(area, migratetype);
    if (!page)
    continue;
    - list_del(&page->lru);
    - rmv_page_order(page);
    - area->nr_free--;
    + del_page_from_free_area(page, area, migratetype);
    expand(zone, page, order, current_order, area, migratetype);
    set_pcppage_migratetype(page, migratetype);
    return page;
    @@ -2053,8 +2040,7 @@ static int move_freepages(struct zone *zone,
    }

    order = page_order(page);
    - list_move(&page->lru,
    - &zone->free_area[order].free_list[migratetype]);
    + move_to_free_area(page, &zone->free_area[order], migratetype);
    page += 1 << order;
    pages_moved += 1 << order;
    }
    @@ -2206,7 +2192,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,

    single_page:
    area = &zone->free_area[current_order];
    - list_move(&page->lru, &area->free_list[start_type]);
    + move_to_free_area(page, area, start_type);
    }

    /*
    @@ -2230,7 +2216,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
    if (fallback_mt == MIGRATE_TYPES)
    break;

    - if (list_empty(&area->free_list[fallback_mt]))
    + if (free_area_empty(area, fallback_mt))
    continue;

    if (can_steal_fallback(order, migratetype))
    @@ -2317,9 +2303,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
    for (order = 0; order < MAX_ORDER; order++) {
    struct free_area *area = &(zone->free_area[order]);

    - page = list_first_entry_or_null(
    - &area->free_list[MIGRATE_HIGHATOMIC],
    - struct page, lru);
    + page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
    if (!page)
    continue;

    @@ -2432,8 +2416,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
    VM_BUG_ON(current_order == MAX_ORDER);

    do_steal:
    - page = list_first_entry(&area->free_list[fallback_mt],
    - struct page, lru);
    + page = get_page_from_free_area(area, fallback_mt);

    steal_suitable_fallback(zone, page, start_migratetype, can_steal);

    @@ -2860,6 +2843,7 @@ EXPORT_SYMBOL_GPL(split_page);

    int __isolate_free_page(struct page *page, unsigned int order)
    {
    + struct free_area *area = &page_zone(page)->free_area[order];
    unsigned long watermark;
    struct zone *zone;
    int mt;
    @@ -2884,9 +2868,8 @@ int __isolate_free_page(struct page *page, unsigned int order)
    }

    /* Remove page from free list */
    - list_del(&page->lru);
    - zone->free_area[order].nr_free--;
    - rmv_page_order(page);
    +
    + del_page_from_free_area(page, area, mt);

    /*
    * Set the pageblock if the isolated page is at least half of a
    @@ -3180,13 +3163,13 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
    continue;

    for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
    - if (!list_empty(&area->free_list[mt]))
    + if (!free_area_empty(area, mt))
    return true;
    }

    #ifdef CONFIG_CMA
    if ((alloc_flags & ALLOC_CMA) &&
    - !list_empty(&area->free_list[MIGRATE_CMA])) {
    + !free_area_empty(area, MIGRATE_CMA)) {
    return true;
    }
    #endif
    @@ -5019,7 +5002,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)

    types[order] = 0;
    for (type = 0; type < MIGRATE_TYPES; type++) {
    - if (!list_empty(&area->free_list[type]))
    + if (!free_area_empty(area, type))
    types[order] |= 1 << type;
    }
    }
    @@ -8144,6 +8127,9 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
    spin_lock_irqsave(&zone->lock, flags);
    pfn = start_pfn;
    while (pfn < end_pfn) {
    + struct free_area *area;
    + int mt;
    +
    if (!pfn_valid(pfn)) {
    pfn++;
    continue;
    @@ -8162,13 +8148,13 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
    BUG_ON(page_count(page));
    BUG_ON(!PageBuddy(page));
    order = page_order(page);
    + area = &zone->free_area[order];
    #ifdef CONFIG_DEBUG_VM
    pr_info("remove from free list %lx %d %lx\n",
    pfn, 1 << order, end_pfn);
    #endif
    - list_del(&page->lru);
    - rmv_page_order(page);
    - zone->free_area[order].nr_free--;
    + mt = get_pageblock_migratetype(page);
    + del_page_from_free_area(page, area, mt);
    for (i = 0; i < (1 << order); i++)
    SetPageReserved((page+i));
    pfn += (1 << order);
    \
     
     \ /
      Last update: 2018-12-15 03:02    [W:4.226 / U:0.416 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site