lkml.org 
[lkml]   [2013]   [Jul]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 05/18] mm, hugetlb: protect region tracking via newly introduced resv_map lock
    Date
    There is a race condition if we map a same file on different processes.
    Region tracking is protected by mmap_sem and hugetlb_instantiation_mutex.
    When we do mmap, we don't grab a hugetlb_instantiation_mutex, but,
    grab a mmap_sem. This doesn't prevent other process to modify region
    structure, so it can be modified by two processes concurrently.

    To solve this, I introduce a lock to resv_map and make region manipulation
    function grab a lock before they do actual work. This makes region
    tracking safe.

    Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>

    diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
    index 2677c07..e29e28f 100644
    --- a/include/linux/hugetlb.h
    +++ b/include/linux/hugetlb.h
    @@ -26,6 +26,7 @@ struct hugepage_subpool {

    struct resv_map {
    struct kref refs;
    + spinlock_t lock;
    struct list_head regions;
    };
    extern struct resv_map *resv_map_alloc(void);
    diff --git a/mm/hugetlb.c b/mm/hugetlb.c
    index 24c0111..bf2ee11 100644
    --- a/mm/hugetlb.c
    +++ b/mm/hugetlb.c
    @@ -134,15 +134,8 @@ static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
    * Region tracking -- allows tracking of reservations and instantiated pages
    * across the pages in a mapping.
    *
    - * The region data structures are protected by a combination of the mmap_sem
    - * and the hugetlb_instantiation_mutex. To access or modify a region the caller
    - * must either hold the mmap_sem for write, or the mmap_sem for read and
    - * the hugetlb_instantiation_mutex:
    - *
    - * down_write(&mm->mmap_sem);
    - * or
    - * down_read(&mm->mmap_sem);
    - * mutex_lock(&hugetlb_instantiation_mutex);
    + * The region data structures are embedded into a resv_map and
    + * protected by a resv_map's lock
    */
    struct file_region {
    struct list_head link;
    @@ -155,6 +148,7 @@ static long region_add(struct resv_map *resv, long f, long t)
    struct list_head *head = &resv->regions;
    struct file_region *rg, *nrg, *trg;

    + spin_lock(&resv->lock);
    /* Locate the region we are either in or before. */
    list_for_each_entry(rg, head, link)
    if (f <= rg->to)
    @@ -184,6 +178,7 @@ static long region_add(struct resv_map *resv, long f, long t)
    }
    nrg->from = f;
    nrg->to = t;
    + spin_unlock(&resv->lock);
    return 0;
    }

    @@ -193,6 +188,7 @@ static long region_chg(struct resv_map *resv, long f, long t)
    struct file_region *rg, *nrg;
    long chg = 0;

    + spin_lock(&resv->lock);
    /* Locate the region we are before or in. */
    list_for_each_entry(rg, head, link)
    if (f <= rg->to)
    @@ -203,14 +199,18 @@ static long region_chg(struct resv_map *resv, long f, long t)
    * size such that we can guarantee to record the reservation. */
    if (&rg->link == head || t < rg->from) {
    nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
    - if (!nrg)
    - return -ENOMEM;
    + if (!nrg) {
    + chg = -ENOMEM;
    + goto out;
    + }
    +
    nrg->from = f;
    nrg->to = f;
    INIT_LIST_HEAD(&nrg->link);
    list_add(&nrg->link, rg->link.prev);

    - return t - f;
    + chg = t - f;
    + goto out;
    }

    /* Round our left edge to the current segment if it encloses us. */
    @@ -223,7 +223,7 @@ static long region_chg(struct resv_map *resv, long f, long t)
    if (&rg->link == head)
    break;
    if (rg->from > t)
    - return chg;
    + goto out;

    /* We overlap with this area, if it extends further than
    * us then we must extend ourselves. Account for its
    @@ -234,6 +234,9 @@ static long region_chg(struct resv_map *resv, long f, long t)
    }
    chg -= rg->to - rg->from;
    }
    +
    +out:
    + spin_unlock(&resv->lock);
    return chg;
    }

    @@ -243,12 +246,13 @@ static long region_truncate(struct resv_map *resv, long end)
    struct file_region *rg, *trg;
    long chg = 0;

    + spin_lock(&resv->lock);
    /* Locate the region we are either in or before. */
    list_for_each_entry(rg, head, link)
    if (end <= rg->to)
    break;
    if (&rg->link == head)
    - return 0;
    + goto out;

    /* If we are in the middle of a region then adjust it. */
    if (end > rg->from) {
    @@ -265,6 +269,9 @@ static long region_truncate(struct resv_map *resv, long end)
    list_del(&rg->link);
    kfree(rg);
    }
    +
    +out:
    + spin_unlock(&resv->lock);
    return chg;
    }

    @@ -274,6 +281,7 @@ static long region_count(struct resv_map *resv, long f, long t)
    struct file_region *rg;
    long chg = 0;

    + spin_lock(&resv->lock);
    /* Locate each segment we overlap with, and count that overlap. */
    list_for_each_entry(rg, head, link) {
    long seg_from;
    @@ -289,6 +297,7 @@ static long region_count(struct resv_map *resv, long f, long t)

    chg += seg_to - seg_from;
    }
    + spin_unlock(&resv->lock);

    return chg;
    }
    @@ -386,6 +395,7 @@ struct resv_map *resv_map_alloc(void)
    return NULL;

    kref_init(&resv_map->refs);
    + spin_lock_init(&resv_map->lock);
    INIT_LIST_HEAD(&resv_map->regions);

    return resv_map;
    --
    1.7.9.5


    \
     
     \ /
      Last update: 2013-07-29 08:01    [W:4.254 / U:0.084 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site