lkml.org 
[lkml]   [2020]   [Aug]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH bpf-next v4 03/30] bpf: memcg-based memory accounting for bpf maps
Date
This patch enables memcg-based memory accounting for memory allocated
by __bpf_map_area_alloc(), which is used by most map types for
large allocations.

If a map is updated from an interrupt context, and the update
results in memory allocation, the memory cgroup can't be determined
from the context of the current process. To address this case,
bpf map preserves a pointer to the memory cgroup of the process,
which created the map. This memory cgroup is charged for allocations
from interrupt context.

Following patches in the series will refine the accounting for
some map types.

Signed-off-by: Roman Gushchin <guro@fb.com>
---
include/linux/bpf.h | 4 ++++
kernel/bpf/helpers.c | 37 ++++++++++++++++++++++++++++++++++++-
kernel/bpf/syscall.c | 27 ++++++++++++++++++++++++++-
3 files changed, 66 insertions(+), 2 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index a9b7185a6b37..b5f178afde94 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -34,6 +34,7 @@ struct btf_type;
struct exception_table_entry;
struct seq_operations;
struct bpf_iter_aux_info;
+struct mem_cgroup;

extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
@@ -138,6 +139,9 @@ struct bpf_map {
u32 btf_value_type_id;
struct btf *btf;
struct bpf_map_memory memory;
+#ifdef CONFIG_MEMCG_KMEM
+ struct mem_cgroup *memcg;
+#endif
char name[BPF_OBJ_NAME_LEN];
u32 btf_vmlinux_value_type_id;
bool bypass_spec_v1;
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index be43ab3e619f..f8ce7bc7003f 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -14,6 +14,7 @@
#include <linux/jiffies.h>
#include <linux/pid_namespace.h>
#include <linux/proc_ns.h>
+#include <linux/sched/mm.h>

#include "../../lib/kstrtox.h"

@@ -41,11 +42,45 @@ const struct bpf_func_proto bpf_map_lookup_elem_proto = {
.arg2_type = ARG_PTR_TO_MAP_KEY,
};

+#ifdef CONFIG_MEMCG_KMEM
+static __always_inline int __bpf_map_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 flags)
+{
+ struct mem_cgroup *old_memcg;
+ bool in_interrupt;
+ int ret;
+
+ /*
+ * If update from an interrupt context results in a memory allocation,
+ * the memory cgroup to charge can't be determined from the context
+ * of the current task. Instead, we charge the memory cgroup, which
+ * contained a process created the map.
+ */
+ in_interrupt = in_interrupt();
+ if (in_interrupt)
+ old_memcg = memalloc_use_memcg(map->memcg);
+
+ ret = map->ops->map_update_elem(map, key, value, flags);
+
+ if (in_interrupt)
+ memalloc_use_memcg(old_memcg);
+
+ return ret;
+}
+#else
+static __always_inline int __bpf_map_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 flags)
+{
+ return map->ops->map_update_elem(map, key, value, flags);
+}
+#endif
+
BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
void *, value, u64, flags)
{
WARN_ON_ONCE(!rcu_read_lock_held());
- return map->ops->map_update_elem(map, key, value, flags);
+
+ return __bpf_map_update_elem(map, key, value, flags);
}

const struct bpf_func_proto bpf_map_update_elem_proto = {
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 689d736b6904..683614c17a95 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -29,6 +29,7 @@
#include <linux/bpf_lsm.h>
#include <linux/poll.h>
#include <linux/bpf-netns.h>
+#include <linux/memcontrol.h>

#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
(map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
@@ -275,7 +276,7 @@ static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
* __GFP_RETRY_MAYFAIL to avoid such situations.
*/

- const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO;
+ const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT;
unsigned int flags = 0;
unsigned long align = 1;
void *area;
@@ -452,6 +453,27 @@ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
__release(&map_idr_lock);
}

+#ifdef CONFIG_MEMCG_KMEM
+static void bpf_map_save_memcg(struct bpf_map *map)
+{
+ map->memcg = get_mem_cgroup_from_mm(current->mm);
+}
+
+static void bpf_map_release_memcg(struct bpf_map *map)
+{
+ mem_cgroup_put(map->memcg);
+}
+
+#else
+static void bpf_map_save_memcg(struct bpf_map *map)
+{
+}
+
+static void bpf_map_release_memcg(struct bpf_map *map)
+{
+}
+#endif
+
/* called from workqueue */
static void bpf_map_free_deferred(struct work_struct *work)
{
@@ -463,6 +485,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
/* implementation dependent freeing */
map->ops->map_free(map);
bpf_map_charge_finish(&mem);
+ bpf_map_release_memcg(map);
}

static void bpf_map_put_uref(struct bpf_map *map)
@@ -869,6 +892,8 @@ static int map_create(union bpf_attr *attr)
if (err)
goto free_map_sec;

+ bpf_map_save_memcg(map);
+
err = bpf_map_new_fd(map, f_flags);
if (err < 0) {
/* failed to allocate fd.
--
2.26.2
\
 
 \ /
  Last update: 2020-08-21 17:02    [W:0.207 / U:0.844 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site