lkml.org 
[lkml]   [2019]   [Apr]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCHV2 1/4] z3fold: introduce helper functions
This patch introduces a separate helper function for object
allocation, as well as 2 smaller helpers to add a buddy to the list
and to get a pointer to the pool from the z3fold header. No
functional changes here.

Signed-off-by: Vitaly Wool <vitaly.vul@sony.com>
---
mm/z3fold.c | 184 ++++++++++++++++++++++++++++------------------------
1 file changed, 100 insertions(+), 84 deletions(-)

diff --git a/mm/z3fold.c b/mm/z3fold.c
index aee9b0b8d907..7a59875d880c 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -255,10 +255,15 @@ static enum buddy handle_to_buddy(unsigned long handle)
return (handle - zhdr->first_num) & BUDDY_MASK;
}

+static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
+{
+ return zhdr->pool;
+}
+
static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
{
struct page *page = virt_to_page(zhdr);
- struct z3fold_pool *pool = zhdr->pool;
+ struct z3fold_pool *pool = zhdr_to_pool(zhdr);

WARN_ON(!list_empty(&zhdr->buddy));
set_bit(PAGE_STALE, &page->private);
@@ -295,9 +300,10 @@ static void release_z3fold_page_locked_list(struct kref *ref)
{
struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
refcount);
- spin_lock(&zhdr->pool->lock);
+ struct z3fold_pool *pool = zhdr_to_pool(zhdr);
+ spin_lock(&pool->lock);
list_del_init(&zhdr->buddy);
- spin_unlock(&zhdr->pool->lock);
+ spin_unlock(&pool->lock);

WARN_ON(z3fold_page_trylock(zhdr));
__release_z3fold_page(zhdr, true);
@@ -349,6 +355,23 @@ static int num_free_chunks(struct z3fold_header *zhdr)
return nfree;
}

+/* Add to the appropriate unbuddied list */
+static inline void add_to_unbuddied(struct z3fold_pool *pool,
+ struct z3fold_header *zhdr)
+{
+ if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
+ zhdr->middle_chunks == 0) {
+ struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
+
+ int freechunks = num_free_chunks(zhdr);
+ spin_lock(&pool->lock);
+ list_add(&zhdr->buddy, &unbuddied[freechunks]);
+ spin_unlock(&pool->lock);
+ zhdr->cpu = smp_processor_id();
+ put_cpu_ptr(pool->unbuddied);
+ }
+}
+
static inline void *mchunk_memmove(struct z3fold_header *zhdr,
unsigned short dst_chunk)
{
@@ -406,10 +429,8 @@ static int z3fold_compact_page(struct z3fold_header *zhdr)

static void do_compact_page(struct z3fold_header *zhdr, bool locked)
{
- struct z3fold_pool *pool = zhdr->pool;
+ struct z3fold_pool *pool = zhdr_to_pool(zhdr);
struct page *page;
- struct list_head *unbuddied;
- int fchunks;

page = virt_to_page(zhdr);
if (locked)
@@ -430,18 +451,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
}

z3fold_compact_page(zhdr);
- unbuddied = get_cpu_ptr(pool->unbuddied);
- fchunks = num_free_chunks(zhdr);
- if (fchunks < NCHUNKS &&
- (!zhdr->first_chunks || !zhdr->middle_chunks ||
- !zhdr->last_chunks)) {
- /* the page's not completely free and it's unbuddied */
- spin_lock(&pool->lock);
- list_add(&zhdr->buddy, &unbuddied[fchunks]);
- spin_unlock(&pool->lock);
- zhdr->cpu = smp_processor_id();
- }
- put_cpu_ptr(pool->unbuddied);
+ add_to_unbuddied(pool, zhdr);
z3fold_page_unlock(zhdr);
}

@@ -453,6 +463,67 @@ static void compact_page_work(struct work_struct *w)
do_compact_page(zhdr, false);
}

+/* returns _locked_ z3fold page header or NULL */
+static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
+ size_t size, bool can_sleep)
+{
+ struct z3fold_header *zhdr = NULL;
+ struct page *page;
+ struct list_head *unbuddied;
+ int chunks = size_to_chunks(size), i;
+
+lookup:
+ /* First, try to find an unbuddied z3fold page. */
+ unbuddied = get_cpu_ptr(pool->unbuddied);
+ for_each_unbuddied_list(i, chunks) {
+ struct list_head *l = &unbuddied[i];
+
+ zhdr = list_first_entry_or_null(READ_ONCE(l),
+ struct z3fold_header, buddy);
+
+ if (!zhdr)
+ continue;
+
+ /* Re-check under lock. */
+ spin_lock(&pool->lock);
+ l = &unbuddied[i];
+ if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
+ struct z3fold_header, buddy)) ||
+ !z3fold_page_trylock(zhdr)) {
+ spin_unlock(&pool->lock);
+ zhdr = NULL;
+ put_cpu_ptr(pool->unbuddied);
+ if (can_sleep)
+ cond_resched();
+ goto lookup;
+ }
+ list_del_init(&zhdr->buddy);
+ zhdr->cpu = -1;
+ spin_unlock(&pool->lock);
+
+ page = virt_to_page(zhdr);
+ if (test_bit(NEEDS_COMPACTING, &page->private)) {
+ z3fold_page_unlock(zhdr);
+ zhdr = NULL;
+ put_cpu_ptr(pool->unbuddied);
+ if (can_sleep)
+ cond_resched();
+ goto lookup;
+ }
+
+ /*
+ * this page could not be removed from its unbuddied
+ * list while pool lock was held, and then we've taken
+ * page lock so kref_put could not be called before
+ * we got here, so it's safe to just call kref_get()
+ */
+ kref_get(&zhdr->refcount);
+ break;
+ }
+ put_cpu_ptr(pool->unbuddied);
+
+ return zhdr;
+}

/*
* API Functions
@@ -546,7 +617,7 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
unsigned long *handle)
{
- int chunks = 0, i, freechunks;
+ int chunks = size_to_chunks(size);
struct z3fold_header *zhdr = NULL;
struct page *page = NULL;
enum buddy bud;
@@ -561,56 +632,8 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
bud = HEADLESS;
else {
- struct list_head *unbuddied;
- chunks = size_to_chunks(size);
-
-lookup:
- /* First, try to find an unbuddied z3fold page. */
- unbuddied = get_cpu_ptr(pool->unbuddied);
- for_each_unbuddied_list(i, chunks) {
- struct list_head *l = &unbuddied[i];
-
- zhdr = list_first_entry_or_null(READ_ONCE(l),
- struct z3fold_header, buddy);
-
- if (!zhdr)
- continue;
-
- /* Re-check under lock. */
- spin_lock(&pool->lock);
- l = &unbuddied[i];
- if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
- struct z3fold_header, buddy)) ||
- !z3fold_page_trylock(zhdr)) {
- spin_unlock(&pool->lock);
- put_cpu_ptr(pool->unbuddied);
- goto lookup;
- }
- list_del_init(&zhdr->buddy);
- zhdr->cpu = -1;
- spin_unlock(&pool->lock);
-
- page = virt_to_page(zhdr);
- if (test_bit(NEEDS_COMPACTING, &page->private)) {
- z3fold_page_unlock(zhdr);
- zhdr = NULL;
- put_cpu_ptr(pool->unbuddied);
- if (can_sleep)
- cond_resched();
- goto lookup;
- }
-
- /*
- * this page could not be removed from its unbuddied
- * list while pool lock was held, and then we've taken
- * page lock so kref_put could not be called before
- * we got here, so it's safe to just call kref_get()
- */
- kref_get(&zhdr->refcount);
- break;
- }
- put_cpu_ptr(pool->unbuddied);
-
+retry:
+ zhdr = __z3fold_alloc(pool, size, can_sleep);
if (zhdr) {
if (zhdr->first_chunks == 0) {
if (zhdr->middle_chunks != 0 &&
@@ -630,8 +653,9 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
z3fold_page_unlock(zhdr);
pr_err("No free chunks in unbuddied\n");
WARN_ON(1);
- goto lookup;
+ goto retry;
}
+ page = virt_to_page(zhdr);
goto found;
}
bud = FIRST;
@@ -662,8 +686,12 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
if (!page)
return -ENOMEM;

- atomic64_inc(&pool->pages_nr);
zhdr = init_z3fold_page(page, pool);
+ if (!zhdr) {
+ __free_page(page);
+ return -ENOMEM;
+ }
+ atomic64_inc(&pool->pages_nr);

if (bud == HEADLESS) {
set_bit(PAGE_HEADLESS, &page->private);
@@ -680,19 +708,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
zhdr->middle_chunks = chunks;
zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
}
-
- if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
- zhdr->middle_chunks == 0) {
- struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
-
- /* Add to unbuddied list */
- freechunks = num_free_chunks(zhdr);
- spin_lock(&pool->lock);
- list_add(&zhdr->buddy, &unbuddied[freechunks]);
- spin_unlock(&pool->lock);
- zhdr->cpu = smp_processor_id();
- put_cpu_ptr(pool->unbuddied);
- }
+ add_to_unbuddied(pool, zhdr);

headless:
spin_lock(&pool->lock);
--
2.17.1
\
 
 \ /
  Last update: 2019-04-17 10:37    [W:0.158 / U:0.508 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site