lkml.org 
[lkml]   [2011]   [Feb]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: [RFC][PATCH 3/6] break out smaps_pte_entry() from smaps_pte_range()
    On Mon, 31 Jan 2011, Dave Hansen wrote:

    >
    > We will use smaps_pte_entry() in a moment to handle both small
    > and transparent large pages. But, we must break it out of
    > smaps_pte_range() first.
    >

    The extraction from smaps_pte_range() looks good. What's the performance
    impact on very frequent consumers of /proc/pid/smaps, though, as the
    result of the calls throughout the iteration if smaps_pte_entry() doesn't
    get inlined (supposedly because you'll be reusing the extracted function
    again elsewhere)?

    >
    > ---
    >
    > linux-2.6.git-dave/fs/proc/task_mmu.c | 85 ++++++++++++++++++----------------
    > 1 file changed, 46 insertions(+), 39 deletions(-)
    >
    > diff -puN fs/proc/task_mmu.c~break-out-smaps_pte_entry fs/proc/task_mmu.c
    > --- linux-2.6.git/fs/proc/task_mmu.c~break-out-smaps_pte_entry 2011-01-27 11:03:06.761548697 -0800
    > +++ linux-2.6.git-dave/fs/proc/task_mmu.c 2011-01-27 11:03:06.773548685 -0800
    > @@ -333,56 +333,63 @@ struct mem_size_stats {
    > u64 pss;
    > };
    >
    > -static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
    > - struct mm_walk *walk)
    > +
    > +static void smaps_pte_entry(pte_t ptent, unsigned long addr,
    > + struct mm_walk *walk)
    > {
    > struct mem_size_stats *mss = walk->private;
    > struct vm_area_struct *vma = mss->vma;
    > - pte_t *pte, ptent;
    > - spinlock_t *ptl;
    > struct page *page;
    > int mapcount;
    >
    > - split_huge_page_pmd(walk->mm, pmd);
    > -
    > - pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
    > - for (; addr != end; pte++, addr += PAGE_SIZE) {
    > - ptent = *pte;
    > + if (is_swap_pte(ptent)) {
    > + mss->swap += PAGE_SIZE;
    > + return;
    > + }
    >
    > - if (is_swap_pte(ptent)) {
    > - mss->swap += PAGE_SIZE;
    > - continue;
    > - }
    > + if (!pte_present(ptent))
    > + return;
    >
    > - if (!pte_present(ptent))
    > - continue;
    > + page = vm_normal_page(vma, addr, ptent);
    > + if (!page)
    > + return;
    > +
    > + if (PageAnon(page))
    > + mss->anonymous += PAGE_SIZE;
    > +
    > + mss->resident += PAGE_SIZE;
    > + /* Accumulate the size in pages that have been accessed. */
    > + if (pte_young(ptent) || PageReferenced(page))
    > + mss->referenced += PAGE_SIZE;
    > + mapcount = page_mapcount(page);
    > + if (mapcount >= 2) {
    > + if (pte_dirty(ptent) || PageDirty(page))
    > + mss->shared_dirty += PAGE_SIZE;
    > + else
    > + mss->shared_clean += PAGE_SIZE;
    > + mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
    > + } else {
    > + if (pte_dirty(ptent) || PageDirty(page))
    > + mss->private_dirty += PAGE_SIZE;
    > + else
    > + mss->private_clean += PAGE_SIZE;
    > + mss->pss += (PAGE_SIZE << PSS_SHIFT);
    > + }
    > +}
    >
    > - page = vm_normal_page(vma, addr, ptent);
    > - if (!page)
    > - continue;
    > +static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
    > + struct mm_walk *walk)
    > +{
    > + struct mem_size_stats *mss = walk->private;
    > + struct vm_area_struct *vma = mss->vma;
    > + pte_t *pte;
    > + spinlock_t *ptl;
    >
    > - if (PageAnon(page))
    > - mss->anonymous += PAGE_SIZE;
    > + split_huge_page_pmd(walk->mm, pmd);
    >
    > - mss->resident += PAGE_SIZE;
    > - /* Accumulate the size in pages that have been accessed. */
    > - if (pte_young(ptent) || PageReferenced(page))
    > - mss->referenced += PAGE_SIZE;
    > - mapcount = page_mapcount(page);
    > - if (mapcount >= 2) {
    > - if (pte_dirty(ptent) || PageDirty(page))
    > - mss->shared_dirty += PAGE_SIZE;
    > - else
    > - mss->shared_clean += PAGE_SIZE;
    > - mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
    > - } else {
    > - if (pte_dirty(ptent) || PageDirty(page))
    > - mss->private_dirty += PAGE_SIZE;
    > - else
    > - mss->private_clean += PAGE_SIZE;
    > - mss->pss += (PAGE_SIZE << PSS_SHIFT);
    > - }
    > - }
    > + pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
    > + for (; addr != end; pte++, addr += PAGE_SIZE)
    > + smaps_pte_entry(*pte, addr, walk);
    > pte_unmap_unlock(pte - 1, ptl);
    > cond_resched();
    > return 0;
    > diff -puN mm/huge_memory.c~break-out-smaps_pte_entry mm/huge_memory.c
    > _

    Is there a missing change to mm/huge_memory.c?


    \
     
     \ /
      Last update: 2011-02-03 22:25    [W:0.039 / U:148.728 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site