lkml.org 
[lkml]   [2021]   [Jan]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 1/3] kvfree_rcu: Allocate a page for a single argument
Date
For a single argument we can directly request a page from a caller
context when a "carry page block" is run out of free spots. Instead
of hitting a slow path we can request an extra page by demand and
proceed with a fast path.

A single-argument kvfree_rcu() must be invoked in sleepable contexts,
and that its fallback is the relatively high latency synchronize_rcu().
Single-argument kvfree_rcu() therefore uses GFP_KERNEL|__GFP_RETRY_MAYFAIL
to allow limited sleeping within the memory allocator.

[ paulmck: Add add_ptr_to_bulk_krc_lock header comment per Michal Hocko. ]
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
kernel/rcu/tree.c | 42 ++++++++++++++++++++++++++----------------
1 file changed, 26 insertions(+), 16 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index e04e336bee42..2014fb22644d 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3465,37 +3465,50 @@ run_page_cache_worker(struct kfree_rcu_cpu *krcp)
}
}

+// Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
+// state specified by flags. If can_alloc is true, the caller must
+// be schedulable and not be holding any locks or mutexes that might be
+// acquired by the memory allocator or anything that it might invoke.
+// Returns true if ptr was successfully recorded, else the caller must
+// use a fallback.
static inline bool
-kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)
+add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
+ unsigned long *flags, void *ptr, bool can_alloc)
{
struct kvfree_rcu_bulk_data *bnode;
int idx;

- if (unlikely(!krcp->initialized))
+ *krcp = krc_this_cpu_lock(flags);
+ if (unlikely(!(*krcp)->initialized))
return false;

- lockdep_assert_held(&krcp->lock);
idx = !!is_vmalloc_addr(ptr);

/* Check if a new block is required. */
- if (!krcp->bkvhead[idx] ||
- krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
- bnode = get_cached_bnode(krcp);
- /* Switch to emergency path. */
+ if (!(*krcp)->bkvhead[idx] ||
+ (*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
+ bnode = get_cached_bnode(*krcp);
+ if (!bnode && can_alloc) {
+ krc_this_cpu_unlock(*krcp, *flags);
+ bnode = (struct kvfree_rcu_bulk_data *)
+ __get_free_page(GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ *krcp = krc_this_cpu_lock(flags);
+ }
+
if (!bnode)
return false;

/* Initialize the new block. */
bnode->nr_records = 0;
- bnode->next = krcp->bkvhead[idx];
+ bnode->next = (*krcp)->bkvhead[idx];

/* Attach it to the head. */
- krcp->bkvhead[idx] = bnode;
+ (*krcp)->bkvhead[idx] = bnode;
}

/* Finally insert. */
- krcp->bkvhead[idx]->records
- [krcp->bkvhead[idx]->nr_records++] = ptr;
+ (*krcp)->bkvhead[idx]->records
+ [(*krcp)->bkvhead[idx]->nr_records++] = ptr;

return true;
}
@@ -3533,8 +3546,6 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
ptr = (unsigned long *) func;
}

- krcp = krc_this_cpu_lock(&flags);
-
// Queue the object but don't yet schedule the batch.
if (debug_rcu_head_queue(ptr)) {
// Probable double kfree_rcu(), just leak.
@@ -3542,12 +3553,11 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
__func__, head);

// Mark as success and leave.
- success = true;
- goto unlock_return;
+ return;
}

kasan_record_aux_stack(ptr);
- success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr);
+ success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
if (!success) {
run_page_cache_worker(krcp);

--
2.20.1
\
 
 \ /
  Last update: 2021-01-20 17:30    [W:0.251 / U:0.584 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site