lkml.org 
[lkml]   [2012]   [May]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    SubjectRe: [PATCH] mm: Optimize put_mems_allowed() usage
    From
    Date
    Anybody care to pick this one up? Andrew?

    On Tue, 2012-03-27 at 15:14 +0200, Peter Zijlstra wrote:
    > Subject: mm: Optimize put_mems_allowed() usage
    > From: Peter Zijlstra <a.p.zijlstra@chello.nl>
    > Date: Mon Mar 26 14:13:05 CEST 2012
    >
    > Since put_mems_allowed() is strictly optional, its a seqcount retry,
    > we don't need to evaluate the function if the allocation was in fact
    > successful, saving a smp_rmb some loads and comparisons on some
    > relative fast-paths.
    >
    > Since the naming, get/put_mems_allowed() does suggest a mandatory
    > pairing, rename the interface, as suggested by Mel, to resemble the
    > seqcount interface.
    >
    > This gives us: read_mems_allowed_begin() and
    > read_mems_allowed_retry(), where it is important to note that the
    > return value of the latter call is inverted from its previous
    > incarnation.
    >
    > Acked-by: Mel Gorman <mgorman@suse.de>
    > Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    > ---
    > include/linux/cpuset.h | 27 ++++++++++++++-------------
    > kernel/cpuset.c | 2 +-
    > mm/filemap.c | 4 ++--
    > mm/hugetlb.c | 4 ++--
    > mm/mempolicy.c | 14 +++++++-------
    > mm/page_alloc.c | 8 ++++----
    > mm/slab.c | 4 ++--
    > mm/slub.c | 16 +++-------------
    > 8 files changed, 35 insertions(+), 44 deletions(-)
    >
    > --- a/include/linux/cpuset.h
    > +++ b/include/linux/cpuset.h
    > @@ -89,25 +89,26 @@ extern void rebuild_sched_domains(void);
    > extern void cpuset_print_task_mems_allowed(struct task_struct *p);
    >
    > /*
    > - * get_mems_allowed is required when making decisions involving mems_allowed
    > - * such as during page allocation. mems_allowed can be updated in parallel
    > - * and depending on the new value an operation can fail potentially causing
    > - * process failure. A retry loop with get_mems_allowed and put_mems_allowed
    > - * prevents these artificial failures.
    > + * read_mems_allowed_begin is required when making decisions involving
    > + * mems_allowed such as during page allocation. mems_allowed can be updated in
    > + * parallel and depending on the new value an operation can fail potentially
    > + * causing process failure. A retry loop with read_mems_allowed_begin and
    > + * read_mems_allowed_retry prevents these artificial failures.
    > */
    > -static inline unsigned int get_mems_allowed(void)
    > +static inline unsigned int read_mems_allowed_begin(void)
    > {
    > return read_seqcount_begin(&current->mems_allowed_seq);
    > }
    >
    > /*
    > - * If this returns false, the operation that took place after get_mems_allowed
    > - * may have failed. It is up to the caller to retry the operation if
    > + * If this returns true, the operation that took place after
    > + * read_mems_allowed_begin may have failed artificially due to a concurrent
    > + * update of mems_allowed. It is up to the caller to retry the operation if
    > * appropriate.
    > */
    > -static inline bool put_mems_allowed(unsigned int seq)
    > +static inline bool read_mems_allowed_retry(unsigned int seq)
    > {
    > - return !read_seqcount_retry(&current->mems_allowed_seq, seq);
    > + return read_seqcount_retry(&current->mems_allowed_seq, seq);
    > }
    >
    > static inline void set_mems_allowed(nodemask_t nodemask)
    > @@ -225,14 +226,14 @@ static inline void set_mems_allowed(node
    > {
    > }
    >
    > -static inline unsigned int get_mems_allowed(void)
    > +static inline unsigned int read_mems_allowed_begin(void)
    > {
    > return 0;
    > }
    >
    > -static inline bool put_mems_allowed(unsigned int seq)
    > +static inline bool read_mems_allowed_retry(unsigned int seq)
    > {
    > - return true;
    > + return false;
    > }
    >
    > #endif /* !CONFIG_CPUSETS */
    > --- a/kernel/cpuset.c
    > +++ b/kernel/cpuset.c
    > @@ -976,7 +976,7 @@ static void cpuset_change_task_nodemask(
    > task_lock(tsk);
    > /*
    > * Determine if a loop is necessary if another thread is doing
    > - * get_mems_allowed(). If at least one node remains unchanged and
    > + * read_mems_allowed_begin(). If at least one node remains unchanged and
    > * tsk does not have a mempolicy, then an empty nodemask will not be
    > * possible when mems_allowed is larger than a word.
    > */
    > --- a/mm/filemap.c
    > +++ b/mm/filemap.c
    > @@ -501,10 +501,10 @@ struct page *__page_cache_alloc(gfp_t gf
    > if (cpuset_do_page_mem_spread()) {
    > unsigned int cpuset_mems_cookie;
    > do {
    > - cpuset_mems_cookie = get_mems_allowed();
    > + cpuset_mems_cookie = read_mems_allowed_begin();
    > n = cpuset_mem_spread_node();
    > page = alloc_pages_exact_node(n, gfp, 0);
    > - } while (!put_mems_allowed(cpuset_mems_cookie) && !page);
    > + } while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
    >
    > return page;
    > }
    > --- a/mm/hugetlb.c
    > +++ b/mm/hugetlb.c
    > @@ -541,7 +541,7 @@ static struct page *dequeue_huge_page_vm
    > unsigned int cpuset_mems_cookie;
    >
    > retry_cpuset:
    > - cpuset_mems_cookie = get_mems_allowed();
    > + cpuset_mems_cookie = read_mems_allowed_begin();
    > zonelist = huge_zonelist(vma, address,
    > htlb_alloc_mask, &mpol, &nodemask);
    > /*
    > @@ -570,7 +570,7 @@ static struct page *dequeue_huge_page_vm
    > }
    >
    > mpol_cond_put(mpol);
    > - if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
    > + if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
    > goto retry_cpuset;
    > return page;
    >
    > --- a/mm/mempolicy.c
    > +++ b/mm/mempolicy.c
    > @@ -1692,7 +1692,7 @@ int node_random(const nodemask_t *maskp)
    > * If the effective policy is 'BIND, returns a pointer to the mempolicy's
    > * @nodemask for filtering the zonelist.
    > *
    > - * Must be protected by get_mems_allowed()
    > + * Must be protected by read_mems_allowed_begin()
    > */
    > struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
    > gfp_t gfp_flags, struct mempolicy **mpol,
    > @@ -1857,7 +1857,7 @@ alloc_pages_vma(gfp_t gfp, int order, st
    >
    > retry_cpuset:
    > pol = get_vma_policy(current, vma, addr);
    > - cpuset_mems_cookie = get_mems_allowed();
    > + cpuset_mems_cookie = read_mems_allowed_begin();
    >
    > if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
    > unsigned nid;
    > @@ -1865,7 +1865,7 @@ alloc_pages_vma(gfp_t gfp, int order, st
    > nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
    > mpol_cond_put(pol);
    > page = alloc_page_interleave(gfp, order, nid);
    > - if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
    > + if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
    > goto retry_cpuset;
    >
    > return page;
    > @@ -1878,7 +1878,7 @@ alloc_pages_vma(gfp_t gfp, int order, st
    > struct page *page = __alloc_pages_nodemask(gfp, order,
    > zl, policy_nodemask(gfp, pol));
    > __mpol_put(pol);
    > - if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
    > + if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
    > goto retry_cpuset;
    > return page;
    > }
    > @@ -1887,7 +1887,7 @@ alloc_pages_vma(gfp_t gfp, int order, st
    > */
    > page = __alloc_pages_nodemask(gfp, order, zl,
    > policy_nodemask(gfp, pol));
    > - if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
    > + if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
    > goto retry_cpuset;
    > return page;
    > }
    > @@ -1921,7 +1921,7 @@ struct page *alloc_pages_current(gfp_t g
    > pol = &default_policy;
    >
    > retry_cpuset:
    > - cpuset_mems_cookie = get_mems_allowed();
    > + cpuset_mems_cookie = read_mems_allowed_begin();
    >
    > /*
    > * No reference counting needed for current->mempolicy
    > @@ -1934,7 +1934,7 @@ struct page *alloc_pages_current(gfp_t g
    > policy_zonelist(gfp, pol, numa_node_id()),
    > policy_nodemask(gfp, pol));
    >
    > - if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
    > + if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
    > goto retry_cpuset;
    >
    > return page;
    > --- a/mm/page_alloc.c
    > +++ b/mm/page_alloc.c
    > @@ -2402,7 +2402,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, u
    > return NULL;
    >
    > retry_cpuset:
    > - cpuset_mems_cookie = get_mems_allowed();
    > + cpuset_mems_cookie = read_mems_allowed_begin();
    >
    > /* The preferred zone is used for statistics later */
    > first_zones_zonelist(zonelist, high_zoneidx,
    > @@ -2429,7 +2429,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, u
    > * the mask is being updated. If a page allocation is about to fail,
    > * check if the cpuset changed during allocation and if so, retry.
    > */
    > - if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
    > + if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
    > goto retry_cpuset;
    >
    > return page;
    > @@ -2651,9 +2651,9 @@ bool skip_free_areas_node(unsigned int f
    > goto out;
    >
    > do {
    > - cpuset_mems_cookie = get_mems_allowed();
    > + cpuset_mems_cookie = read_mems_allowed_begin();
    > ret = !node_isset(nid, cpuset_current_mems_allowed);
    > - } while (!put_mems_allowed(cpuset_mems_cookie));
    > + } while (read_mems_allowed_retry(cpuset_mems_cookie));
    > out:
    > return ret;
    > }
    > --- a/mm/slab.c
    > +++ b/mm/slab.c
    > @@ -3318,7 +3318,7 @@ static void *fallback_alloc(struct kmem_
    > local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
    >
    > retry_cpuset:
    > - cpuset_mems_cookie = get_mems_allowed();
    > + cpuset_mems_cookie = read_mems_allowed_begin();
    > zonelist = node_zonelist(slab_node(current->mempolicy), flags);
    >
    > retry:
    > @@ -3374,7 +3374,7 @@ static void *fallback_alloc(struct kmem_
    > }
    > }
    >
    > - if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !obj))
    > + if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
    > goto retry_cpuset;
    > return obj;
    > }
    > --- a/mm/slub.c
    > +++ b/mm/slub.c
    > @@ -1606,7 +1606,7 @@ static struct page *get_any_partial(stru
    > return NULL;
    >
    > do {
    > - cpuset_mems_cookie = get_mems_allowed();
    > + cpuset_mems_cookie = read_mems_allowed_begin();
    > zonelist = node_zonelist(slab_node(current->mempolicy), flags);
    > for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
    > struct kmem_cache_node *n;
    > @@ -1616,21 +1616,11 @@ static struct page *get_any_partial(stru
    > if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
    > n->nr_partial > s->min_partial) {
    > object = get_partial_node(s, n, c);
    > - if (object) {
    > - /*
    > - * Return the object even if
    > - * put_mems_allowed indicated that
    > - * the cpuset mems_allowed was
    > - * updated in parallel. It's a
    > - * harmless race between the alloc
    > - * and the cpuset update.
    > - */
    > - put_mems_allowed(cpuset_mems_cookie);
    > + if (object)
    > return object;
    > - }
    > }
    > }
    > - } while (!put_mems_allowed(cpuset_mems_cookie));
    > + } while (read_mems_allowed_retry(cpuset_mems_cookie));
    > #endif
    > return NULL;
    > }
    >



    \
     
     \ /
      Last update: 2012-05-17 13:21    [W:0.052 / U:2.464 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site