lkml.org 
[lkml]   [2012]   [Dec]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 19/49] mm: migrate: Introduce migrate_misplaced_page()
    Date
    From: Peter Zijlstra <a.p.zijlstra@chello.nl>

    Note: This was originally based on Peter's patch "mm/migrate: Introduce
    migrate_misplaced_page()" but borrows extremely heavily from Andrea's
    "autonuma: memory follows CPU algorithm and task/mm_autonuma stats
    collection". The end result is barely recognisable so signed-offs
    had to be dropped. If original authors are ok with it, I'll
    re-add the signed-off-bys.

    Add migrate_misplaced_page() which deals with migrating pages from
    faults.

    Based-on-work-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
    Based-on-work-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Based-on-work-by: Andrea Arcangeli <aarcange@redhat.com>
    Signed-off-by: Mel Gorman <mgorman@suse.de>
    Reviewed-by: Rik van Riel <riel@redhat.com>
    ---
    include/linux/migrate.h | 11 +++++
    mm/migrate.c | 108 ++++++++++++++++++++++++++++++++++++++++++++++-
    2 files changed, 117 insertions(+), 2 deletions(-)

    diff --git a/include/linux/migrate.h b/include/linux/migrate.h
    index 9d1c159..2923135 100644
    --- a/include/linux/migrate.h
    +++ b/include/linux/migrate.h
    @@ -13,6 +13,7 @@ enum migrate_reason {
    MR_MEMORY_HOTPLUG,
    MR_SYSCALL, /* also applies to cpusets */
    MR_MEMPOLICY_MBIND,
    + MR_NUMA_MISPLACED,
    MR_CMA
    };

    @@ -73,4 +74,14 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
    #define fail_migrate_page NULL

    #endif /* CONFIG_MIGRATION */
    +
    +#ifdef CONFIG_BALANCE_NUMA
    +extern int migrate_misplaced_page(struct page *page, int node);
    +#else
    +static inline int migrate_misplaced_page(struct page *page, int node)
    +{
    + return -EAGAIN; /* can't migrate now */
    +}
    +#endif /* CONFIG_BALANCE_NUMA */
    +
    #endif /* _LINUX_MIGRATE_H */
    diff --git a/mm/migrate.c b/mm/migrate.c
    index 27be9c9..a2c4567 100644
    --- a/mm/migrate.c
    +++ b/mm/migrate.c
    @@ -282,7 +282,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
    struct page *newpage, struct page *page,
    struct buffer_head *head, enum migrate_mode mode)
    {
    - int expected_count;
    + int expected_count = 0;
    void **pslot;

    if (!mapping) {
    @@ -1415,4 +1415,108 @@ int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
    }
    return err;
    }
    -#endif
    +
    +#ifdef CONFIG_BALANCE_NUMA
    +/*
    + * Returns true if this is a safe migration target node for misplaced NUMA
    + * pages. Currently it only checks the watermarks which crude
    + */
    +static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
    + int nr_migrate_pages)
    +{
    + int z;
    + for (z = pgdat->nr_zones - 1; z >= 0; z--) {
    + struct zone *zone = pgdat->node_zones + z;
    +
    + if (!populated_zone(zone))
    + continue;
    +
    + if (zone->all_unreclaimable)
    + continue;
    +
    + /* Avoid waking kswapd by allocating pages_to_migrate pages. */
    + if (!zone_watermark_ok(zone, 0,
    + high_wmark_pages(zone) +
    + nr_migrate_pages,
    + 0, 0))
    + continue;
    + return true;
    + }
    + return false;
    +}
    +
    +static struct page *alloc_misplaced_dst_page(struct page *page,
    + unsigned long data,
    + int **result)
    +{
    + int nid = (int) data;
    + struct page *newpage;
    +
    + newpage = alloc_pages_exact_node(nid,
    + (GFP_HIGHUSER_MOVABLE | GFP_THISNODE |
    + __GFP_NOMEMALLOC | __GFP_NORETRY |
    + __GFP_NOWARN) &
    + ~GFP_IOFS, 0);
    + return newpage;
    +}
    +
    +/*
    + * Attempt to migrate a misplaced page to the specified destination
    + * node. Caller is expected to have an elevated reference count on
    + * the page that will be dropped by this function before returning.
    + */
    +int migrate_misplaced_page(struct page *page, int node)
    +{
    + int isolated = 0;
    + LIST_HEAD(migratepages);
    +
    + /*
    + * Don't migrate pages that are mapped in multiple processes.
    + * TODO: Handle false sharing detection instead of this hammer
    + */
    + if (page_mapcount(page) != 1) {
    + put_page(page);
    + goto out;
    + }
    +
    + /* Avoid migrating to a node that is nearly full */
    + if (migrate_balanced_pgdat(NODE_DATA(node), 1)) {
    + int page_lru;
    +
    + if (isolate_lru_page(page)) {
    + put_page(page);
    + goto out;
    + }
    + isolated = 1;
    +
    + /*
    + * Page is isolated which takes a reference count so now the
    + * callers reference can be safely dropped without the page
    + * disappearing underneath us during migration
    + */
    + put_page(page);
    +
    + page_lru = page_is_file_cache(page);
    + inc_zone_page_state(page, NR_ISOLATED_ANON + page_lru);
    + list_add(&page->lru, &migratepages);
    + }
    +
    + if (isolated) {
    + int nr_remaining;
    +
    + nr_remaining = migrate_pages(&migratepages,
    + alloc_misplaced_dst_page,
    + node, false, MIGRATE_ASYNC,
    + MR_NUMA_MISPLACED);
    + if (nr_remaining) {
    + putback_lru_pages(&migratepages);
    + isolated = 0;
    + }
    + }
    + BUG_ON(!list_empty(&migratepages));
    +out:
    + return isolated;
    +}
    +#endif /* CONFIG_BALANCE_NUMA */
    +
    +#endif /* CONFIG_NUMA */
    --
    1.7.9.2


    \
     
     \ /
      Last update: 2012-12-07 12:21    [W:3.160 / U:0.148 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site