lkml.org 
[lkml]   [2021]   [Jan]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH] mm: slub: Convert sys slab alloc_calls, free_calls to bin attribute
Date
Reading the sys slab alloc_calls, free_calls returns the available object
owners, but the size of this file is limited to PAGE_SIZE
because of the limitation of sysfs attributes, it is returning the
partial owner info, which is not sufficient to debug/account the slab
memory and alloc_calls output is not matching with /proc/slabinfo.

To remove the PAGE_SIZE limitation converted the sys slab
alloc_calls, free_calls to bin attribute.

Signed-off-by: Faiyaz Mohammed <faiyazm@codeaurora.org>
---
mm/slub.c | 61 +++++++++++++++++++++++++++++++++++++++++++++++--------------
1 file changed, 47 insertions(+), 14 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index b52384e..8744e5ec 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4710,13 +4710,14 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s,
}

static int list_locations(struct kmem_cache *s, char *buf,
- enum track_item alloc)
+ loff_t offset, enum track_item alloc)
{
int len = 0;
unsigned long i;
struct loc_track t = { 0, 0, NULL };
int node;
struct kmem_cache_node *n;
+ static unsigned int previous_read_count;
unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);

if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
@@ -4742,11 +4743,9 @@ static int list_locations(struct kmem_cache *s, char *buf,
spin_unlock_irqrestore(&n->list_lock, flags);
}

- for (i = 0; i < t.count; i++) {
+ for (i = previous_read_count; i < t.count; i++) {
struct location *l = &t.loc[i];

- if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
- break;
len += sprintf(buf + len, "%7ld ", l->count);

if (l->addr)
@@ -4784,12 +4783,20 @@ static int list_locations(struct kmem_cache *s, char *buf,
nodemask_pr_args(&l->nodes));

len += sprintf(buf + len, "\n");
+
+ if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) {
+ previous_read_count = i + 1;
+ break;
+ }
}

+ if ((offset != 0) && ((i >= t.count) || (previous_read_count > t.count))) {
+ previous_read_count = 0;
+ len = 0;
+ } else if (!t.count)
+ len += sprintf(buf, "No data\n");
free_loc_track(&t);
bitmap_free(map);
- if (!t.count)
- len += sprintf(buf, "No data\n");
return len;
}

@@ -5180,6 +5187,7 @@ static int any_slab_objects(struct kmem_cache *s)

struct slab_attribute {
struct attribute attr;
+ struct bin_attribute bin_attr;
ssize_t (*show)(struct kmem_cache *s, char *buf);
ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
};
@@ -5192,6 +5200,12 @@ struct slab_attribute {
static struct slab_attribute _name##_attr = \
__ATTR(_name, 0600, _name##_show, _name##_store)

+#define SLAB_BIN_ATTR_RO(_name) \
+ static struct slab_attribute _name##_attr = { \
+ .bin_attr = \
+ __BIN_ATTR_RO(_name, 0) \
+ } \
+
static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%u\n", s->size);
@@ -5535,21 +5549,33 @@ static ssize_t validate_store(struct kmem_cache *s,
}
SLAB_ATTR(validate);

-static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
+static ssize_t alloc_calls_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t offset, size_t count)
{
+ struct kmem_cache *s;
+
+ s = to_slab(kobj);
if (!(s->flags & SLAB_STORE_USER))
return -ENOSYS;
- return list_locations(s, buf, TRACK_ALLOC);
+
+ return list_locations(s, buf, offset, TRACK_ALLOC);
}
-SLAB_ATTR_RO(alloc_calls);
+SLAB_BIN_ATTR_RO(alloc_calls);

-static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
+static ssize_t free_calls_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t offset, size_t count)
{
+ struct kmem_cache *s;
+
+ s = to_slab(kobj);
if (!(s->flags & SLAB_STORE_USER))
return -ENOSYS;
- return list_locations(s, buf, TRACK_FREE);
+
+ return list_locations(s, buf, offset, TRACK_FREE);
}
-SLAB_ATTR_RO(free_calls);
+SLAB_BIN_ATTR_RO(free_calls);
#endif /* CONFIG_SLUB_DEBUG */

#ifdef CONFIG_FAILSLAB
@@ -5694,6 +5720,14 @@ STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
#endif /* CONFIG_SLUB_STATS */

+
+static struct bin_attribute *slab_bin_attrs[] = {
+#ifdef CONFIG_SLUB_DEBUG
+ &alloc_calls_attr.bin_attr,
+ &free_calls_attr.bin_attr,
+#endif
+};
+
static struct attribute *slab_attrs[] = {
&slab_size_attr.attr,
&object_size_attr.attr,
@@ -5722,8 +5756,6 @@ static struct attribute *slab_attrs[] = {
&poison_attr.attr,
&store_user_attr.attr,
&validate_attr.attr,
- &alloc_calls_attr.attr,
- &free_calls_attr.attr,
#endif
#ifdef CONFIG_ZONE_DMA
&cache_dma_attr.attr,
@@ -5769,6 +5801,7 @@ static struct attribute *slab_attrs[] = {

static const struct attribute_group slab_attr_group = {
.attrs = slab_attrs,
+ .bin_attrs = slab_bin_attrs,
};

static ssize_t slab_attr_show(struct kobject *kobj,
--
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a
member of the Code Aurora Forum, hosted by The Linux Foundation
\
 
 \ /
  Last update: 2021-01-12 10:24    [W:2.642 / U:0.068 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site