lkml.org 
[lkml]   [2021]   [Jun]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 5.10 081/137] wireguard: peer: allocate in kmem_cache
Date
From: Jason A. Donenfeld <Jason@zx2c4.com>

commit a4e9f8e3287c9eb6bf70df982870980dd3341863 upstream.

With deployments having upwards of 600k peers now, this somewhat heavy
structure could benefit from more fine-grained allocations.
Specifically, instead of using a 2048-byte slab for a 1544-byte object,
we can now use 1544-byte objects directly, thus saving almost 25%
per-peer, or with 600k peers, that's a savings of 303 MiB. This also
makes wireguard's memory usage more transparent in tools like slabtop
and /proc/slabinfo.

Fixes: 8b5553ace83c ("wireguard: queueing: get rid of per-peer ring buffers")
Suggested-by: Arnd Bergmann <arnd@arndb.de>
Suggested-by: Matthew Wilcox <willy@infradead.org>
Cc: stable@vger.kernel.org
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
drivers/net/wireguard/main.c | 7 +++++++
drivers/net/wireguard/peer.c | 21 +++++++++++++++++----
drivers/net/wireguard/peer.h | 3 +++
3 files changed, 27 insertions(+), 4 deletions(-)

--- a/drivers/net/wireguard/main.c
+++ b/drivers/net/wireguard/main.c
@@ -28,6 +28,10 @@ static int __init mod_init(void)
#endif
wg_noise_init();

+ ret = wg_peer_init();
+ if (ret < 0)
+ goto err_peer;
+
ret = wg_device_init();
if (ret < 0)
goto err_device;
@@ -44,6 +48,8 @@ static int __init mod_init(void)
err_netlink:
wg_device_uninit();
err_device:
+ wg_peer_uninit();
+err_peer:
return ret;
}

@@ -51,6 +57,7 @@ static void __exit mod_exit(void)
{
wg_genetlink_uninit();
wg_device_uninit();
+ wg_peer_uninit();
}

module_init(mod_init);
--- a/drivers/net/wireguard/peer.c
+++ b/drivers/net/wireguard/peer.c
@@ -15,6 +15,7 @@
#include <linux/rcupdate.h>
#include <linux/list.h>

+static struct kmem_cache *peer_cache;
static atomic64_t peer_counter = ATOMIC64_INIT(0);

struct wg_peer *wg_peer_create(struct wg_device *wg,
@@ -29,10 +30,10 @@ struct wg_peer *wg_peer_create(struct wg
if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
return ERR_PTR(ret);

- peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+ peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL);
if (unlikely(!peer))
return ERR_PTR(ret);
- if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
+ if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)))
goto err;

peer->device = wg;
@@ -64,7 +65,7 @@ struct wg_peer *wg_peer_create(struct wg
return peer;

err:
- kfree(peer);
+ kmem_cache_free(peer_cache, peer);
return ERR_PTR(ret);
}

@@ -193,7 +194,8 @@ static void rcu_release(struct rcu_head
/* The final zeroing takes care of clearing any remaining handshake key
* material and other potentially sensitive information.
*/
- kfree_sensitive(peer);
+ memzero_explicit(peer, sizeof(*peer));
+ kmem_cache_free(peer_cache, peer);
}

static void kref_release(struct kref *refcount)
@@ -225,3 +227,14 @@ void wg_peer_put(struct wg_peer *peer)
return;
kref_put(&peer->refcount, kref_release);
}
+
+int __init wg_peer_init(void)
+{
+ peer_cache = KMEM_CACHE(wg_peer, 0);
+ return peer_cache ? 0 : -ENOMEM;
+}
+
+void wg_peer_uninit(void)
+{
+ kmem_cache_destroy(peer_cache);
+}
--- a/drivers/net/wireguard/peer.h
+++ b/drivers/net/wireguard/peer.h
@@ -80,4 +80,7 @@ void wg_peer_put(struct wg_peer *peer);
void wg_peer_remove(struct wg_peer *peer);
void wg_peer_remove_all(struct wg_device *wg);

+int wg_peer_init(void);
+void wg_peer_uninit(void);
+
#endif /* _WG_PEER_H */

\
 
 \ /
  Last update: 2021-06-08 21:31    [W:0.500 / U:0.132 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site