lkml.org 
[lkml]   [2010]   [Aug]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH 14/20] ia64: Preemptible mmu_gather
    Fix up the ia64 mmu_gather code to conform to the new API.

    Cc: Tony Luck <tony.luck@intel.com>
    Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    ---
    arch/ia64/include/asm/tlb.h | 38 +++++++++++++++++++++++++-------------
    1 file changed, 25 insertions(+), 13 deletions(-)

    Index: linux-2.6/arch/ia64/include/asm/tlb.h
    ===================================================================
    --- linux-2.6.orig/arch/ia64/include/asm/tlb.h
    +++ linux-2.6/arch/ia64/include/asm/tlb.h
    @@ -47,21 +47,21 @@
    #include <asm/machvec.h>

    #ifdef CONFIG_SMP
    -# define FREE_PTE_NR 2048
    # define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
    #else
    -# define FREE_PTE_NR 0
    # define tlb_fast_mode(tlb) (1)
    #endif

    struct mmu_gather {
    struct mm_struct *mm;
    unsigned int nr; /* == ~0U => fast mode */
    + unsigned int max;
    unsigned char fullmm; /* non-zero means full mm flush */
    unsigned char need_flush; /* really unmapped some PTEs? */
    unsigned long start_addr;
    unsigned long end_addr;
    - struct page *pages[FREE_PTE_NR];
    + struct page **pages;
    + struct page *local[8];
    };

    struct ia64_tr_entry {
    @@ -90,9 +90,6 @@ extern struct ia64_tr_entry *ia64_idtrs[
    #define RR_RID_MASK 0x00000000ffffff00L
    #define RR_TO_RID(val) ((val >> 8) & 0xffffff)

    -/* Users of the generic TLB shootdown code must declare this storage space. */
    -DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
    -
    /*
    * Flush the TLB for address range START to END and, if not in fast mode, release the
    * freed pages that where gathered up to this point.
    @@ -147,15 +144,25 @@ ia64_tlb_flush_mmu (struct mmu_gather *t
    }
    }

    -/*
    - * Return a pointer to an initialized struct mmu_gather.
    - */
    -static inline struct mmu_gather *
    -tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
    +static inline void __tlb_alloc_pages(struct mmu_gather *tlb)
    +{
    + unsigned long addr = __get_free_pages(GFP_ATOMIC, 0);
    +
    + if (addr) {
    + tlb->pages = (void *)addr;
    + tlb->max = PAGE_SIZE / sizeof(void *);
    + }
    +}
    +
    +
    +static inline void
    +tlb_gather_mmu (struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
    {
    struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);

    tlb->mm = mm;
    + tlb->max = ARRAY_SIZE(tlb->local);
    + tlb->pages = tlb->local;
    /*
    * Use fast mode if only 1 CPU is online.
    *
    @@ -191,7 +198,8 @@ tlb_finish_mmu (struct mmu_gather *tlb,
    /* keep the page table cache within bounds */
    check_pgt_cache();

    - put_cpu_var(mmu_gathers);
    + if (tlb->pages != tlb->local)
    + free_pages((unsigned long)tlb->pages, 0);
    }

    /*
    @@ -208,8 +216,12 @@ tlb_remove_page (struct mmu_gather *tlb,
    free_page_and_swap_cache(page);
    return;
    }
    +
    + if (!tlb->nr && tlb->pages == tlb->local)
    + __tlb_alloc_pages(tlb);
    +
    tlb->pages[tlb->nr++] = page;
    - if (tlb->nr >= FREE_PTE_NR)
    + if (tlb->nr >= tlb->max)
    ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
    }




    \
     
     \ /
      Last update: 2010-08-28 16:35    [W:4.436 / U:0.400 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site