lkml.org 
[lkml]   [2013]   [Dec]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 057/115] gpu: ion: Modify zeroing code so it only allocates address space once
    Date
    From: Rebecca Schultz Zavin <rschultz@google.com>

    vmap/vunmap spend a significant amount of time allocating the
    address space to map into. Rather than allocating address space
    for each page, instead allocate once for the entire allocation
    and then just map and unmap each page into that address space.

    Signed-off-by: Rebecca Schultz Zavin <rschultz@google.com>
    [jstultz: modified patch to apply to staging directory]
    Signed-off-by: John Stultz <john.stultz@linaro.org>
    ---
    drivers/staging/android/ion/ion_system_heap.c | 28 ++++++++++++++++++++-------
    1 file changed, 21 insertions(+), 7 deletions(-)

    diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
    index 89247cf..e54307f 100644
    --- a/drivers/staging/android/ion/ion_system_heap.c
    +++ b/drivers/staging/android/ion/ion_system_heap.c
    @@ -91,7 +91,7 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,

    static void free_buffer_page(struct ion_system_heap *heap,
    struct ion_buffer *buffer, struct page *page,
    - unsigned int order)
    + unsigned int order, struct vm_struct *vm_struct)
    {
    bool cached = ion_buffer_cached(buffer);
    bool split_pages = ion_buffer_fault_user_mappings(buffer);
    @@ -105,10 +105,13 @@ static void free_buffer_page(struct ion_system_heap *heap,
    purpose is to keep the pages out of the cache */
    for (i = 0; i < (1 << order); i++) {
    struct page *sub_page = page + i;
    - void *addr = vmap(&sub_page, 1, VM_MAP,
    - pgprot_writecombine(PAGE_KERNEL));
    - memset(addr, 0, PAGE_SIZE);
    - vunmap(addr);
    + struct page **pages = &sub_page;
    + map_vm_area(vm_struct,
    + pgprot_writecombine(PAGE_KERNEL),
    + &pages);
    + memset(vm_struct->addr, 0, PAGE_SIZE);
    + unmap_kernel_range((unsigned long)vm_struct->addr,
    + PAGE_SIZE);
    }
    ion_page_pool_free(pool, page);
    } else if (split_pages) {
    @@ -164,6 +167,8 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
    long size_remaining = PAGE_ALIGN(size);
    unsigned int max_order = orders[0];
    bool split_pages = ion_buffer_fault_user_mappings(buffer);
    + struct vm_struct *vm_struct;
    + pte_t *ptes;

    INIT_LIST_HEAD(&pages);
    while (size_remaining > 0) {
    @@ -211,10 +216,13 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
    err1:
    kfree(table);
    err:
    + vm_struct = get_vm_area(PAGE_SIZE, &ptes);
    list_for_each_entry(info, &pages, list) {
    - free_buffer_page(sys_heap, buffer, info->page, info->order);
    + free_buffer_page(sys_heap, buffer, info->page, info->order,
    + vm_struct);
    kfree(info);
    }
    + free_vm_area(vm_struct);
    return -ENOMEM;
    }

    @@ -227,10 +235,16 @@ void ion_system_heap_free(struct ion_buffer *buffer)
    struct sg_table *table = buffer->sg_table;
    struct scatterlist *sg;
    LIST_HEAD(pages);
    + struct vm_struct *vm_struct;
    + pte_t *ptes;
    int i;

    + vm_struct = get_vm_area(PAGE_SIZE, &ptes);
    +
    for_each_sg(table->sgl, sg, table->nents, i)
    - free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg)));
    + free_buffer_page(sys_heap, buffer, sg_page(sg),
    + get_order(sg_dma_len(sg)), vm_struct);
    + free_vm_area(vm_struct);
    sg_free_table(table);
    kfree(table);
    }
    --
    1.8.3.2


    \
     
     \ /
      Last update: 2013-12-14 00:01    [W:2.272 / U:0.132 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site