lkml.org 
[lkml]   [2019]   [Jul]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v4 2/2] mm/vmalloc.c: Modify struct vmap_area to reduce its size
Date
Objective
---------
The current implementation of struct vmap_area wasted space.

After applying this commit, sizeof(struct vmap_area) has been
reduced from 11 words to 8 words.

Description
-----------
1) Pack "vm" and "subtree_max_size"
This is no problem because
A) "vm" is only used when vmap_area is in "busy" tree
B) "subtree_max_size" is only used when vmap_area is in
"free" tree

2) Pack "purge_list"
The variable "purge_list" is only used when vmap_area is in
"lazy purge" list. So it can be packed with other variables,
which are only used in rbtree and list sorted by address.

3) Eliminate "flags".
Since only one flag VM_VM_AREA is being used, and the same
thing can be done by judging whether "vm" is NULL, then the
"flags" can be eliminated.

Signed-off-by: Pengfei Li <lpf.vector@gmail.com>
Suggested-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
include/linux/vmalloc.h | 40 +++++++++++++++++++++++++++++++---------
mm/vmalloc.c | 28 +++++++++++++---------------
2 files changed, 44 insertions(+), 24 deletions(-)

diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 9b21d0047710..6fb377ca9e7a 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -51,15 +51,37 @@ struct vmap_area {
unsigned long va_start;
unsigned long va_end;

- /*
- * Largest available free size in subtree.
- */
- unsigned long subtree_max_size;
- unsigned long flags;
- struct rb_node rb_node; /* address sorted rbtree */
- struct list_head list; /* address sorted list */
- struct llist_node purge_list; /* "lazy purge" list */
- struct vm_struct *vm;
+ union {
+ /* In rbtree and list sorted by address */
+ struct {
+ union {
+ /*
+ * In "busy" rbtree and list.
+ * rbtree root: vmap_area_root
+ * list head: vmap_area_list
+ */
+ struct vm_struct *vm;
+
+ /*
+ * In "free" rbtree and list.
+ * rbtree root: free_vmap_area_root
+ * list head: free_vmap_area_list
+ */
+ unsigned long subtree_max_size;
+ };
+
+ struct rb_node rb_node;
+ struct list_head list;
+ };
+
+ /*
+ * In "lazy purge" list.
+ * llist head: vmap_purge_list
+ */
+ struct {
+ struct llist_node purge_list;
+ };
+ };
};

/*
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 9eb700a2087b..1245d3285a32 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -329,7 +329,6 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0

-#define VM_VM_AREA 0x04

static DEFINE_SPINLOCK(vmap_area_lock);
/* Export for kexec only */
@@ -1115,7 +1114,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,

va->va_start = addr;
va->va_end = addr + size;
- va->flags = 0;
+ va->vm = NULL;
insert_vmap_area(va, &vmap_area_root, &vmap_area_list);

spin_unlock(&vmap_area_lock);
@@ -1279,7 +1278,9 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
llist_for_each_entry_safe(va, n_va, valist, purge_list) {
unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;

- __free_vmap_area(va);
+ merge_or_add_vmap_area(va,
+ &free_vmap_area_root, &free_vmap_area_list);
+
atomic_long_sub(nr, &vmap_lazy_nr);

if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
@@ -1919,7 +1920,6 @@ void __init vmalloc_init(void)
if (WARN_ON_ONCE(!va))
continue;

- va->flags = VM_VM_AREA;
va->va_start = (unsigned long)tmp->addr;
va->va_end = va->va_start + tmp->size;
va->vm = tmp;
@@ -2017,7 +2017,6 @@ static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
vm->size = va->va_end - va->va_start;
vm->caller = caller;
va->vm = vm;
- va->flags |= VM_VM_AREA;
spin_unlock(&vmap_area_lock);
}

@@ -2122,10 +2121,10 @@ struct vm_struct *find_vm_area(const void *addr)
struct vmap_area *va;

va = find_vmap_area((unsigned long)addr);
- if (va && va->flags & VM_VM_AREA)
- return va->vm;
+ if (!va)
+ return NULL;

- return NULL;
+ return va->vm;
}

/**
@@ -2146,11 +2145,10 @@ struct vm_struct *remove_vm_area(const void *addr)

spin_lock(&vmap_area_lock);
va = __find_vmap_area((unsigned long)addr);
- if (va && va->flags & VM_VM_AREA) {
+ if (va && va->vm) {
struct vm_struct *vm = va->vm;

va->vm = NULL;
- va->flags &= ~VM_VM_AREA;
spin_unlock(&vmap_area_lock);

kasan_free_shadow(vm);
@@ -2853,7 +2851,7 @@ long vread(char *buf, char *addr, unsigned long count)
if (!count)
break;

- if (!(va->flags & VM_VM_AREA))
+ if (!va->vm)
continue;

vm = va->vm;
@@ -2933,7 +2931,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
if (!count)
break;

- if (!(va->flags & VM_VM_AREA))
+ if (!va->vm)
continue;

vm = va->vm;
@@ -3463,10 +3461,10 @@ static int s_show(struct seq_file *m, void *p)
va = list_entry(p, struct vmap_area, list);

/*
- * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
- * behalf of vmap area is being tear down or vm_map_ram allocation.
+ * If !va->vm then this vmap_area object is allocated
+ * by vm_map_ram.
*/
- if (!(va->flags & VM_VM_AREA)) {
+ if (!va->vm) {
seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
(void *)va->va_start, (void *)va->va_end,
va->va_end - va->va_start);
--
2.21.0
\
 
 \ /
  Last update: 2019-07-12 14:04    [W:0.124 / U:0.136 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site