lkml.org 
[lkml]   [2020]   [Sep]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH rdma-next 4/4] RDMA/umem: Move to allocate SG table from pages
    Date
    From: Maor Gottlieb <maorg@nvidia.com>

    Remove the implementation of ib_umem_add_sg_table and instead
    call to sg_alloc_table_append which already has the logic to
    merge contiguous pages.

    Besides that it removes duplicated functionality, it reduces the
    memory consumption of the SG table significantly. Prior to this
    patch, the SG table was allocated in advance regardless consideration
    of contiguous pages.

    In huge pages system of 2MB page size, without this change, the SG table
    would contain x512 SG entries.
    E.g. for 100GB memory registration:

    Number of entries Size
    Before 26214400 600.0MB
    After 51200 1.2MB

    Signed-off-by: Maor Gottlieb <maorg@nvidia.com>
    Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
    ---
    drivers/infiniband/core/umem.c | 93 +++++-----------------------------
    1 file changed, 14 insertions(+), 79 deletions(-)

    diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
    index be889e99cfac..9eb946f665ec 100644
    --- a/drivers/infiniband/core/umem.c
    +++ b/drivers/infiniband/core/umem.c
    @@ -62,73 +62,6 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
    sg_free_table(&umem->sg_head);
    }

    -/* ib_umem_add_sg_table - Add N contiguous pages to scatter table
    - *
    - * sg: current scatterlist entry
    - * page_list: array of npage struct page pointers
    - * npages: number of pages in page_list
    - * max_seg_sz: maximum segment size in bytes
    - * nents: [out] number of entries in the scatterlist
    - *
    - * Return new end of scatterlist
    - */
    -static struct scatterlist *ib_umem_add_sg_table(struct scatterlist *sg,
    - struct page **page_list,
    - unsigned long npages,
    - unsigned int max_seg_sz,
    - int *nents)
    -{
    - unsigned long first_pfn;
    - unsigned long i = 0;
    - bool update_cur_sg = false;
    - bool first = !sg_page(sg);
    -
    - /* Check if new page_list is contiguous with end of previous page_list.
    - * sg->length here is a multiple of PAGE_SIZE and sg->offset is 0.
    - */
    - if (!first && (page_to_pfn(sg_page(sg)) + (sg->length >> PAGE_SHIFT) ==
    - page_to_pfn(page_list[0])))
    - update_cur_sg = true;
    -
    - while (i != npages) {
    - unsigned long len;
    - struct page *first_page = page_list[i];
    -
    - first_pfn = page_to_pfn(first_page);
    -
    - /* Compute the number of contiguous pages we have starting
    - * at i
    - */
    - for (len = 0; i != npages &&
    - first_pfn + len == page_to_pfn(page_list[i]) &&
    - len < (max_seg_sz >> PAGE_SHIFT);
    - len++)
    - i++;
    -
    - /* Squash N contiguous pages from page_list into current sge */
    - if (update_cur_sg) {
    - if ((max_seg_sz - sg->length) >= (len << PAGE_SHIFT)) {
    - sg_set_page(sg, sg_page(sg),
    - sg->length + (len << PAGE_SHIFT),
    - 0);
    - update_cur_sg = false;
    - continue;
    - }
    - update_cur_sg = false;
    - }
    -
    - /* Squash N contiguous pages into next sge or first sge */
    - if (!first)
    - sg = sg_next(sg);
    -
    - (*nents)++;
    - sg_set_page(sg, first_page, len << PAGE_SHIFT, 0);
    - first = false;
    - }
    -
    - return sg;
    -}
    -
    /**
    * ib_umem_find_best_pgsz - Find best HW page size to use for this MR
    *
    @@ -205,7 +138,8 @@ static struct ib_umem *__ib_umem_get(struct ib_device *device,
    struct mm_struct *mm;
    unsigned long npages;
    int ret;
    - struct scatterlist *sg;
    + struct scatterlist *sg = NULL;
    + struct sg_append append = {};
    unsigned int gup_flags = FOLL_WRITE;

    /*
    @@ -255,15 +189,9 @@ static struct ib_umem *__ib_umem_get(struct ib_device *device,

    cur_base = addr & PAGE_MASK;

    - ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
    - if (ret)
    - goto vma;
    -
    if (!umem->writable)
    gup_flags |= FOLL_FORCE;

    - sg = umem->sg_head.sgl;
    -
    while (npages) {
    cond_resched();
    ret = pin_user_pages_fast(cur_base,
    @@ -276,10 +204,18 @@ static struct ib_umem *__ib_umem_get(struct ib_device *device,

    cur_base += ret * PAGE_SIZE;
    npages -= ret;
    -
    - sg = ib_umem_add_sg_table(sg, page_list, ret,
    - dma_get_max_seg_size(device->dma_device),
    - &umem->sg_nents);
    + append.left_pages = npages;
    + append.prv = sg;
    + sg = sg_alloc_table_append(&umem->sg_head, page_list, ret, 0,
    + ret << PAGE_SHIFT,
    + dma_get_max_seg_size(device->dma_device),
    + GFP_KERNEL, &append);
    + umem->sg_nents = umem->sg_head.nents;
    + if (IS_ERR(sg)) {
    + unpin_user_pages_dirty_lock(page_list, ret, 0);
    + ret = PTR_ERR(sg);
    + goto umem_release;
    + }
    }

    sg_mark_end(sg);
    @@ -301,7 +237,6 @@ static struct ib_umem *__ib_umem_get(struct ib_device *device,

    umem_release:
    __ib_umem_release(device, umem, 0);
    -vma:
    atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
    out:
    free_page((unsigned long) page_list);
    --
    2.26.2
    \
     
     \ /
      Last update: 2020-09-03 17:13    [W:2.460 / U:0.348 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site