lkml.org 
[lkml]   [2014]   [Jul]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v4 08/13] numa_maps: remove numa_maps->vma
    Date
    pagewalk.c can handle vma in itself, so we don't have to pass vma via
    walk->private. And show_numa_map() walks pages on vma basis, so using
    walk_page_vma() is preferable.

    ChangeLog v4:
    - remove redundant vma

    Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
    Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
    ---
    fs/proc/task_mmu.c | 29 +++++++++++++----------------
    1 file changed, 13 insertions(+), 16 deletions(-)

    diff --git v3.16-rc3.orig/fs/proc/task_mmu.c v3.16-rc3/fs/proc/task_mmu.c
    index 0d3d1ac32b2e..4ca28f401bb1 100644
    --- v3.16-rc3.orig/fs/proc/task_mmu.c
    +++ v3.16-rc3/fs/proc/task_mmu.c
    @@ -1245,7 +1245,6 @@ const struct file_operations proc_pagemap_operations = {
    #ifdef CONFIG_NUMA

    struct numa_maps {
    - struct vm_area_struct *vma;
    unsigned long pages;
    unsigned long anon;
    unsigned long active;
    @@ -1314,18 +1313,17 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
    static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
    unsigned long end, struct mm_walk *walk)
    {
    - struct numa_maps *md;
    + struct numa_maps *md = walk->private;
    + struct vm_area_struct *vma = walk->vma;
    spinlock_t *ptl;
    pte_t *orig_pte;
    pte_t *pte;

    - md = walk->private;
    -
    - if (pmd_trans_huge_lock(pmd, md->vma, &ptl) == 1) {
    + if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
    pte_t huge_pte = *(pte_t *)pmd;
    struct page *page;

    - page = can_gather_numa_stats(huge_pte, md->vma, addr);
    + page = can_gather_numa_stats(huge_pte, vma, addr);
    if (page)
    gather_stats(page, md, pte_dirty(huge_pte),
    HPAGE_PMD_SIZE/PAGE_SIZE);
    @@ -1337,7 +1335,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
    return 0;
    orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
    do {
    - struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
    + struct page *page = can_gather_numa_stats(*pte, vma, addr);
    if (!page)
    continue;
    gather_stats(page, md, pte_dirty(*pte), 1);
    @@ -1385,7 +1383,12 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
    struct file *file = vma->vm_file;
    struct task_struct *task = proc_priv->task;
    struct mm_struct *mm = vma->vm_mm;
    - struct mm_walk walk = {};
    + struct mm_walk walk = {
    + .hugetlb_entry = gather_hugetlb_stats,
    + .pmd_entry = gather_pte_stats,
    + .private = md,
    + .mm = mm,
    + };
    struct mempolicy *pol;
    char buffer[64];
    int nid;
    @@ -1396,13 +1399,6 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
    /* Ensure we start with an empty set of numa_maps statistics. */
    memset(md, 0, sizeof(*md));

    - md->vma = vma;
    -
    - walk.hugetlb_entry = gather_hugetlb_stats;
    - walk.pmd_entry = gather_pte_stats;
    - walk.private = md;
    - walk.mm = mm;
    -
    pol = get_vma_policy(task, vma, vma->vm_start);
    mpol_to_str(buffer, sizeof(buffer), pol);
    mpol_cond_put(pol);
    @@ -1432,7 +1428,8 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
    if (is_vm_hugetlb_page(vma))
    seq_puts(m, " huge");

    - walk_page_range(vma->vm_start, vma->vm_end, &walk);
    + /* mmap_sem is held by m_start */
    + walk_page_vma(vma, &walk);

    if (!md->pages)
    goto out;
    --
    1.9.3


    \
     
     \ /
      Last update: 2014-07-02 03:21    [W:3.344 / U:0.048 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site