lkml.org 
[lkml]   [2013]   [Jul]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
SubjectRe: [PATCH 1/8] migrate: make core migration code aware of hugepage
From
On Thu, Jul 25, 2013 at 12:54 PM, Naoya Horiguchi
<n-horiguchi@ah.jp.nec.com> wrote:
> Before enabling each user of page migration to support hugepage,
> this patch enables the list of pages for migration to link not only
> LRU pages, but also hugepages. As a result, putback_movable_pages()
> and migrate_pages() can handle both of LRU pages and hugepages.
>
> ChangeLog v4:
> - make some macros return 'do {} while(0)'
> - use more readable variable name
>
> ChangeLog v3:
> - revert introducing migrate_movable_pages
> - add isolate_huge_page
>
> ChangeLog v2:
> - move code removing VM_HUGETLB from vma_migratable check into a
> separate patch
> - hold hugetlb_lock in putback_active_hugepage
> - update comment near the definition of hugetlb_lock
>
> Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
> Acked-by: Andi Kleen <ak@linux.intel.com>
> Reviewed-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
> ---
Acked-by: Hillf Danton <dhillf@gmail.com>

> include/linux/hugetlb.h | 6 ++++++
> mm/hugetlb.c | 32 +++++++++++++++++++++++++++++++-
> mm/migrate.c | 10 +++++++++-
> 3 files changed, 46 insertions(+), 2 deletions(-)
>
> diff --git v3.11-rc1.orig/include/linux/hugetlb.h v3.11-rc1/include/linux/hugetlb.h
> index c2b1801..c7a14a4 100644
> --- v3.11-rc1.orig/include/linux/hugetlb.h
> +++ v3.11-rc1/include/linux/hugetlb.h
> @@ -66,6 +66,9 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to,
> vm_flags_t vm_flags);
> void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
> int dequeue_hwpoisoned_huge_page(struct page *page);
> +bool isolate_huge_page(struct page *page, struct list_head *list);
> +void putback_active_hugepage(struct page *page);
> +void putback_active_hugepages(struct list_head *list);
> void copy_huge_page(struct page *dst, struct page *src);
>
> #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
> @@ -134,6 +137,9 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page)
> return 0;
> }
>
> +#define isolate_huge_page(p, l) false
> +#define putback_active_hugepage(p) do {} while (0)
> +#define putback_active_hugepages(l) do {} while (0)
> static inline void copy_huge_page(struct page *dst, struct page *src)
> {
> }
> diff --git v3.11-rc1.orig/mm/hugetlb.c v3.11-rc1/mm/hugetlb.c
> index 83aff0a..506d195 100644
> --- v3.11-rc1.orig/mm/hugetlb.c
> +++ v3.11-rc1/mm/hugetlb.c
> @@ -48,7 +48,8 @@ static unsigned long __initdata default_hstate_max_huge_pages;
> static unsigned long __initdata default_hstate_size;
>
> /*
> - * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
> + * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
> + * free_huge_pages, and surplus_huge_pages.
> */
> DEFINE_SPINLOCK(hugetlb_lock);
>
> @@ -3431,3 +3432,32 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
> return ret;
> }
> #endif
> +
> +bool isolate_huge_page(struct page *page, struct list_head *list)
> +{
> + VM_BUG_ON(!PageHead(page));
> + if (!get_page_unless_zero(page))
> + return false;
> + spin_lock(&hugetlb_lock);
> + list_move_tail(&page->lru, list);
> + spin_unlock(&hugetlb_lock);
> + return true;
> +}
> +
> +void putback_active_hugepage(struct page *page)
> +{
> + VM_BUG_ON(!PageHead(page));
> + spin_lock(&hugetlb_lock);
> + list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
> + spin_unlock(&hugetlb_lock);
> + put_page(page);
> +}
> +
> +void putback_active_hugepages(struct list_head *list)
> +{
> + struct page *page;
> + struct page *page2;
> +
> + list_for_each_entry_safe(page, page2, list, lru)
> + putback_active_hugepage(page);
> +}
> diff --git v3.11-rc1.orig/mm/migrate.c v3.11-rc1/mm/migrate.c
> index 6f0c244..b44a067 100644
> --- v3.11-rc1.orig/mm/migrate.c
> +++ v3.11-rc1/mm/migrate.c
> @@ -100,6 +100,10 @@ void putback_movable_pages(struct list_head *l)
> struct page *page2;
>
> list_for_each_entry_safe(page, page2, l, lru) {
> + if (unlikely(PageHuge(page))) {
> + putback_active_hugepage(page);
> + continue;
> + }
> list_del(&page->lru);
> dec_zone_page_state(page, NR_ISOLATED_ANON +
> page_is_file_cache(page));
> @@ -1025,7 +1029,11 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
> list_for_each_entry_safe(page, page2, from, lru) {
> cond_resched();
>
> - rc = unmap_and_move(get_new_page, private,
> + if (PageHuge(page))
> + rc = unmap_and_move_huge_page(get_new_page,
> + private, page, pass > 2, mode);
> + else
> + rc = unmap_and_move(get_new_page, private,
> page, pass > 2, mode);
>
> switch(rc) {
> --
> 1.8.3.1
>


\
 
 \ /
  Last update: 2013-07-25 09:01    [W:0.169 / U:0.064 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site