lkml.org 
[lkml]   [2008]   [Sep]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[RFC] [PATCH 9/9] memcg: percpu page cgroup lookup cache
    Use per-cpu cache for fast access to page_cgroup.
    This patch is for making fastpath faster.

    Because page_cgroup is accessed when the page is allocated/freed,
    we can assume several of continuous page_cgroup will be accessed soon.
    (If not interleaved on NUMA...but in such case, alloc/free itself is slow.)

    We cache some set of page_cgroup's base pointer on per-cpu area and
    use it when we hit.

    TODO:
    - memory/cpu hotplug support.

    Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>

    ---
    mm/page_cgroup.c | 47 +++++++++++++++++++++++++++++++++++++++++++++--
    1 file changed, 45 insertions(+), 2 deletions(-)

    Index: mmtom-2.6.27-rc5+/mm/page_cgroup.c
    ===================================================================
    --- mmtom-2.6.27-rc5+.orig/mm/page_cgroup.c
    +++ mmtom-2.6.27-rc5+/mm/page_cgroup.c
    @@ -57,14 +57,26 @@ static int pcg_hashmask __read_mostly;
    #define PCG_HASHMASK (pcg_hashmask)
    #define PCG_HASHSIZE (1 << pcg_hashshift)

    +#define PCG_CACHE_MAX_SLOT (32)
    +#define PCG_CACHE_MASK (PCG_CACHE_MAX_SLOT - 1)
    +struct percpu_page_cgroup_cache {
    + struct {
    + unsigned long index;
    + struct page_cgroup *base;
    + } slots[PCG_CACHE_MAX_SLOT];
    +};
    +DEFINE_PER_CPU(struct percpu_page_cgroup_cache, pcg_cache);
    +
    int pcg_hashfun(unsigned long index)
    {
    return hash_long(index, pcg_hashshift);
    }

    -struct page_cgroup *lookup_page_cgroup(unsigned long pfn)
    +noinline static struct page_cgroup *
    +__lookup_page_cgroup(struct percpu_page_cgroup_cache *pcc,unsigned long pfn)
    {
    unsigned long index = pfn >> ENTS_PER_CHUNK_SHIFT;
    + int s = index & PCG_CACHE_MASK;
    struct pcg_hash *ent;
    struct pcg_hash_head *head;
    struct hlist_node *node;
    @@ -77,6 +89,8 @@ struct page_cgroup *lookup_page_cgroup(u
    hlist_for_each_entry(ent, node, &head->head, node) {
    if (ent->index == index) {
    pc = ent->map + pfn;
    + pcc->slots[s].index = ent->index;
    + pcc->slots[s].base = ent->map;
    break;
    }
    }
    @@ -84,6 +98,22 @@ struct page_cgroup *lookup_page_cgroup(u
    return pc;
    }

    +struct page_cgroup *lookup_page_cgroup(unsigned long pfn)
    +{
    + unsigned long index = pfn >> ENTS_PER_CHUNK_SHIFT;
    + int hnum = (pfn >> ENTS_PER_CHUNK_SHIFT) & PCG_CACHE_MASK;
    + struct percpu_page_cgroup_cache *pcc;
    + struct page_cgroup *ret;
    +
    + pcc = &get_cpu_var(pcg_cache);
    + if (likely(pcc->slots[hnum].index == index))
    + ret = pcc->slots[hnum].base + pfn;
    + else
    + ret = __lookup_page_cgroup(pcc, pfn);
    + put_cpu_var(pcg_cache);
    + return ret;
    +}
    +
    static void __meminit alloc_page_cgroup(int node, unsigned long index)
    {
    struct pcg_hash *ent;
    @@ -124,12 +154,23 @@ static void __meminit alloc_page_cgroup(
    return;
    }

    +void clear_page_cgroup_cache_pcg(int cpu)
    +{
    + struct percpu_page_cgroup_cache *pcc;
    + int i;
    +
    + pcc = &per_cpu(pcg_cache, cpu);
    + for (i = 0; i < PCG_CACHE_MAX_SLOT; i++) {
    + pcc->slots[i].index = -1;
    + pcc->slots[i].base = NULL;
    + }
    +}

    /* Called From mem_cgroup's initilization */
    void __init page_cgroup_init(void)
    {
    struct pcg_hash_head *head;
    - int node, i;
    + int node, cpu, i;
    unsigned long start, pfn, end, index, offset;
    long default_pcg_hash_size;

    @@ -174,5 +215,7 @@ void __init page_cgroup_init(void)
    }
    }
    }
    + for_each_possible_cpu(cpu)
    + clear_page_cgroup_cache_pcg(cpu);
    return;
    }


    \
     
     \ /
      Last update: 2008-09-11 13:23    [W:0.046 / U:32.528 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site