lkml.org 
[lkml]   [2005]   [Nov]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[patch 2/12] mm: pagealloc opt
    Date
    Slightly optimise some page allocation and freeing functions by
    taking advantage of knowing whether or not interrupts are disabled.

    Signed-off-by: Nick Piggin <npiggin@suse.de>

    Index: linux-2.6/mm/page_alloc.c
    ===================================================================
    --- linux-2.6.orig/mm/page_alloc.c
    +++ linux-2.6/mm/page_alloc.c
    @@ -375,11 +375,10 @@ static int
    free_pages_bulk(struct zone *zone, int count,
    struct list_head *list, unsigned int order)
    {
    - unsigned long flags;
    struct page *page = NULL;
    int ret = 0;

    - spin_lock_irqsave(&zone->lock, flags);
    + spin_lock(&zone->lock);
    zone->all_unreclaimable = 0;
    zone->pages_scanned = 0;
    while (!list_empty(list) && count--) {
    @@ -389,12 +388,13 @@ free_pages_bulk(struct zone *zone, int c
    __free_pages_bulk(page, zone, order);
    ret++;
    }
    - spin_unlock_irqrestore(&zone->lock, flags);
    + spin_unlock(&zone->lock);
    return ret;
    }

    void __free_pages_ok(struct page *page, unsigned int order)
    {
    + unsigned long flags;
    LIST_HEAD(list);
    int i;

    @@ -412,7 +412,9 @@ void __free_pages_ok(struct page *page,
    free_pages_check(__FUNCTION__, page + i);
    list_add(&page->lru, &list);
    kernel_map_pages(page, 1<<order, 0);
    + local_irq_save(flags);
    free_pages_bulk(page_zone(page), 1, &list, order);
    + local_irq_restore(flags);
    }


    @@ -528,12 +530,11 @@ static struct page *__rmqueue(struct zon
    static int rmqueue_bulk(struct zone *zone, unsigned int order,
    unsigned long count, struct list_head *list)
    {
    - unsigned long flags;
    int i;
    int allocated = 0;
    struct page *page;

    - spin_lock_irqsave(&zone->lock, flags);
    + spin_lock(&zone->lock);
    for (i = 0; i < count; ++i) {
    page = __rmqueue(zone, order);
    if (page == NULL)
    @@ -541,7 +542,7 @@ static int rmqueue_bulk(struct zone *zon
    allocated++;
    list_add_tail(&page->lru, list);
    }
    - spin_unlock_irqrestore(&zone->lock, flags);
    + spin_unlock(&zone->lock);
    return allocated;
    }

    @@ -578,6 +579,7 @@ void drain_remote_pages(void)
    #if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
    static void __drain_pages(unsigned int cpu)
    {
    + unsigned long flags;
    struct zone *zone;
    int i;

    @@ -589,8 +591,10 @@ static void __drain_pages(unsigned int c
    struct per_cpu_pages *pcp;

    pcp = &pset->pcp[i];
    + local_irq_save(flags);
    pcp->count -= free_pages_bulk(zone, pcp->count,
    &pcp->list, 0);
    + local_irq_restore(flags);
    }
    }
    }
    @@ -728,7 +732,7 @@ buffered_rmqueue(struct zone *zone, int
    if (pcp->count <= pcp->low)
    pcp->count += rmqueue_bulk(zone, 0,
    pcp->batch, &pcp->list);
    - if (pcp->count) {
    + if (likely(pcp->count)) {
    page = list_entry(pcp->list.next, struct page, lru);
    list_del(&page->lru);
    pcp->count--;
    Send instant messages to your online friends http://au.messenger.yahoo.com
    -
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

    \
     
     \ /
      Last update: 2005-11-21 14:09    [W:2.845 / U:0.280 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site