lkml.org 
[lkml]   [2019]   [Oct]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Date
SubjectRe: [PATCH v8 1/5] kasan: support backing vmalloc space with real shadow memory
Hello, Daniel.

> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index a3c70e275f4e..9fb7a16f42ae 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -690,8 +690,19 @@ merge_or_add_vmap_area(struct vmap_area *va,
> struct list_head *next;
> struct rb_node **link;
> struct rb_node *parent;
> + unsigned long orig_start, orig_end;
Shouldn't that be wrapped around #ifdef CONFIG_KASAN_VMALLOC?

> bool merged = false;
>
> + /*
> + * To manage KASAN vmalloc memory usage, we use this opportunity to
> + * clean up the shadow memory allocated to back this allocation.
> + * Because a vmalloc shadow page covers several pages, the start or end
> + * of an allocation might not align with a shadow page. Use the merging
> + * opportunities to try to extend the region we can release.
> + */
> + orig_start = va->va_start;
> + orig_end = va->va_end;
> +
The same.

> /*
> * Find a place in the tree where VA potentially will be
> * inserted, unless it is merged with its sibling/siblings.
> @@ -741,6 +752,10 @@ merge_or_add_vmap_area(struct vmap_area *va,
> if (sibling->va_end == va->va_start) {
> sibling->va_end = va->va_end;
>
> + kasan_release_vmalloc(orig_start, orig_end,
> + sibling->va_start,
> + sibling->va_end);
> +
The same.

> /* Check and update the tree if needed. */
> augment_tree_propagate_from(sibling);
>
> @@ -754,6 +769,8 @@ merge_or_add_vmap_area(struct vmap_area *va,
> }
>
> insert:
> + kasan_release_vmalloc(orig_start, orig_end, va->va_start, va->va_end);
> +
The same + all further changes in this file.
> if (!merged) {
> link_va(va, root, parent, link, head);
> augment_tree_propagate_from(va);
> @@ -2068,6 +2085,22 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
>
> setup_vmalloc_vm(area, va, flags, caller);
>
> + /*
> + * For KASAN, if we are in vmalloc space, we need to cover the shadow
> + * area with real memory. If we come here through VM_ALLOC, this is
> + * done by a higher level function that has access to the true size,
> + * which might not be a full page.
> + *
> + * We assume module space comes via VM_ALLOC path.
> + */
> + if (is_vmalloc_addr(area->addr) && !(area->flags & VM_ALLOC)) {
> + if (kasan_populate_vmalloc(area->size, area)) {
> + unmap_vmap_area(va);
> + kfree(area);
> + return NULL;
> + }
> + }
> +
> return area;
> }
>
> @@ -2245,6 +2278,9 @@ static void __vunmap(const void *addr, int deallocate_pages)
> debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
> debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
>
> + if (area->flags & VM_KASAN)
> + kasan_poison_vmalloc(area->addr, area->size);
> +
> vm_remove_mappings(area, deallocate_pages);
>
> if (deallocate_pages) {
> @@ -2497,6 +2533,9 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
> if (!addr)
> return NULL;
>
> + if (kasan_populate_vmalloc(real_size, area))
> + return NULL;
> +
> /*
> * In this function, newly allocated vm_struct has VM_UNINITIALIZED
> * flag. It means that vm_struct is not fully initialized.
> @@ -3351,10 +3390,14 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
> spin_unlock(&vmap_area_lock);
>
> /* insert all vm's */
> - for (area = 0; area < nr_vms; area++)
> + for (area = 0; area < nr_vms; area++) {
> setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
> pcpu_get_vm_areas);
>
> + /* assume success here */
> + kasan_populate_vmalloc(sizes[area], vms[area]);
> + }
> +
> kfree(vas);
> return vms;
>


--
Vlad Rezki

\
 
 \ /
  Last update: 2019-10-01 12:17    [W:1.261 / U:0.696 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site