lkml.org 
[lkml]   [2010]   [Apr]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: [PATCH 09/10] vmscan: Setup pagevec as late as possible in shrink_page_list()
    On Fri, Apr 16, 2010 at 04:54:03PM +0900, KOSAKI Motohiro wrote:
    > > shrink_page_list() sets up a pagevec to release pages as according as they
    > > are free. It uses significant amounts of stack on the pagevec. This
    > > patch adds pages to be freed via pagevec to a linked list which is then
    > > freed en-masse at the end. This avoids using stack in the main path that
    > > potentially calls writepage().
    > >
    > > Signed-off-by: Mel Gorman <mel@csn.ul.ie>
    > > ---
    > > mm/vmscan.c | 34 ++++++++++++++++++++++++++--------
    > > 1 files changed, 26 insertions(+), 8 deletions(-)
    > >
    > > diff --git a/mm/vmscan.c b/mm/vmscan.c
    > > index 9bc1ede..2c22c83 100644
    > > --- a/mm/vmscan.c
    > > +++ b/mm/vmscan.c
    > > @@ -619,6 +619,22 @@ static enum page_references page_check_references(struct page *page,
    > > return PAGEREF_RECLAIM;
    > > }
    > >
    > > +static void free_page_list(struct list_head *free_list)
    > > +{
    > > + struct pagevec freed_pvec;
    > > + struct page *page, *tmp;
    > > +
    > > + pagevec_init(&freed_pvec, 1);
    > > +
    > > + list_for_each_entry_safe(page, tmp, free_list, lru) {
    > > + list_del(&page->lru);
    > > + if (!pagevec_add(&freed_pvec, page)) {
    > > + __pagevec_free(&freed_pvec);
    > > + pagevec_reinit(&freed_pvec);
    > > + }
    > > + }
    >
    > Need this two line at this? because we need consider number of
    > list element are not 14xN.
    >
    > if (pagevec_count(&freed_pvec))
    > __pagevec_free(&freed_pvec);
    >

    Whoops, yes indeed. Otherwise this potentially leaks and as
    SWAP_CLUSTER_MAX is 32, it's often not going to be 14xN

    >
    > > +}
    > > +
    > > /*
    > > * shrink_page_list() returns the number of reclaimed pages
    > > */
    > > @@ -627,13 +643,12 @@ static unsigned long shrink_page_list(struct list_head *page_list,
    > > enum pageout_io sync_writeback)
    > > {
    > > LIST_HEAD(ret_pages);
    > > - struct pagevec freed_pvec;
    > > + LIST_HEAD(free_list);
    > > int pgactivate = 0;
    > > unsigned long nr_reclaimed = 0;
    > >
    > > cond_resched();
    > >
    > > - pagevec_init(&freed_pvec, 1);
    > > while (!list_empty(page_list)) {
    > > enum page_references references;
    > > struct address_space *mapping;
    > > @@ -808,10 +823,12 @@ static unsigned long shrink_page_list(struct list_head *page_list,
    > > __clear_page_locked(page);
    > > free_it:
    > > nr_reclaimed++;
    > > - if (!pagevec_add(&freed_pvec, page)) {
    > > - __pagevec_free(&freed_pvec);
    > > - pagevec_reinit(&freed_pvec);
    > > - }
    > > +
    > > + /*
    > > + * Is there need to periodically free_page_list? It would
    > > + * appear not as the counts should be low
    > > + */
    > > + list_add(&page->lru, &free_list);
    > > continue;
    > >
    > > cull_mlocked:
    > > @@ -834,9 +851,10 @@ keep:
    > > list_add(&page->lru, &ret_pages);
    > > VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
    > > }
    > > +
    > > + free_page_list(&free_list);
    > > +
    > > list_splice(&ret_pages, page_list);
    > > - if (pagevec_count(&freed_pvec))
    > > - __pagevec_free(&freed_pvec);
    > > count_vm_events(PGACTIVATE, pgactivate);
    > > return nr_reclaimed;
    > > }
    > > --
    > > 1.6.5
    > >
    >
    >
    >

    --
    Mel Gorman
    Part-time Phd Student Linux Technology Center
    University of Limerick IBM Dublin Software Lab


    \
     
     \ /
      Last update: 2010-04-16 16:37    [W:0.088 / U:1.540 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site