lkml.org 
[lkml]   [2019]   [Oct]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH 3/3] x86/ftrace: Use text_poke()
On Fri, 4 Oct 2019 16:44:35 +0200
Daniel Bristot de Oliveira <bristot@redhat.com> wrote:

> On 04/10/2019 15:40, Steven Rostedt wrote:
> > On Fri, 4 Oct 2019 10:10:47 +0200
> > Daniel Bristot de Oliveira <bristot@redhat.com> wrote:
> >
> >> [ In addition ]
> >>
> >> Currently, ftrace_rec entries are ordered inside the group of functions, but
> >> "groups of function" are not ordered. So, the current int3 handler does a (*):
> >>
> >> for_each_group_of_functions:
> >> check if the ip is in the range ----> n by the number of groups.
> >> do a bsearch. ----> log(n) by the numbers of entry
> >> in the group.
> >>
> >> If, instead, it uses an ordered vector, the complexity would be log(n) by the
> >> total number of entries, which is better. So, how bad is the idea of:
> > BTW, I'm currently rewriting the grouping of the vectors, in order to
> > shrink the size of each dyn_ftrace_rec (as we discussed at Kernel
> > Recipes). I can make the groups all sorted in doing so, thus we can
> > load the sorted if that's needed, without doing anything special.
> >
>
> Good! if you do they sorted and store the amount of entries in a variable, we
> can have things done for a future "optimized" version.
>

You mean something like this?

-- Steve


From 9b85c08d796dc953cf9b9331d1aed4c3052b09b9 Mon Sep 17 00:00:00 2001
From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
Date: Tue, 1 Oct 2019 14:38:07 -0400
Subject: [PATCH] ftrace: Add information on number of page groups allocated

Looking for ways to shrink the size of the dyn_ftrace structure, knowing the
information about how many pages and the number of groups of those pages, is
useful in working out the best ways to save on memory.

This adds one info print on how many groups of pages were used to allocate
the ftrace dyn_ftrace structures, and also shows the number of pages and
groups in the dyn_ftrace_total_info (which is used for debugging).

Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
---
kernel/trace/ftrace.c | 14 ++++++++++++++
kernel/trace/trace.c | 21 +++++++++++++++------
kernel/trace/trace.h | 2 ++
3 files changed, 31 insertions(+), 6 deletions(-)

diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index c4cc048eb594..244e2d9083a6 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2860,6 +2860,8 @@ static void ftrace_shutdown_sysctl(void)

static u64 ftrace_update_time;
unsigned long ftrace_update_tot_cnt;
+unsigned long ftrace_number_of_pages;
+unsigned long ftrace_number_of_groups;

static inline int ops_traces_mod(struct ftrace_ops *ops)
{
@@ -2984,6 +2986,9 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
goto again;
}

+ ftrace_number_of_pages += 1 << order;
+ ftrace_number_of_groups++;
+
cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
pg->size = cnt;

@@ -3039,6 +3044,8 @@ ftrace_allocate_pages(unsigned long num_to_init)
start_pg = pg->next;
kfree(pg);
pg = start_pg;
+ ftrace_number_of_pages -= 1 << order;
+ ftrace_number_of_groups--;
}
pr_info("ftrace: FAILED to allocate memory for functions\n");
return NULL;
@@ -5788,6 +5795,8 @@ void ftrace_release_mod(struct module *mod)
free_pages((unsigned long)pg->records, order);
tmp_page = pg->next;
kfree(pg);
+ ftrace_number_of_pages -= 1 << order;
+ ftrace_number_of_groups--;
}
}

@@ -6129,6 +6138,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
*last_pg = pg->next;
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
free_pages((unsigned long)pg->records, order);
+ ftrace_number_of_pages -= 1 << order;
+ ftrace_number_of_groups--;
kfree(pg);
pg = container_of(last_pg, struct ftrace_page, next);
if (!(*last_pg))
@@ -6184,6 +6195,9 @@ void __init ftrace_init(void)
__start_mcount_loc,
__stop_mcount_loc);

+ pr_info("ftrace: allocated %ld pages with %ld groups\n",
+ ftrace_number_of_pages, ftrace_number_of_groups);
+
set_ftrace_early_filters();

return;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e917aa783675..bb41a70c5189 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -7548,14 +7548,23 @@ static ssize_t
tracing_read_dyn_info(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- unsigned long *p = filp->private_data;
- char buf[64]; /* Not too big for a shallow stack */
+ unsigned long *p = ftrace_update_tot_cnt;
+ ssize_t ret;
+ char *buf;
int r;

- r = scnprintf(buf, 63, "%ld", *p);
- buf[r++] = '\n';
+ r = snprintf(NULL, 0, "%ld pages:%d groups: %d\n");
+ r++;
+ buf = kmalloc(r, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ r = scnprintf(buf, r, "%ld pages:%d groups: %d\n",
+ ftrace_update_tot_cnt,
+ ftrace_number_of_pages,
+ ftrace_number_of_groups);

- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+ ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}

static const struct file_operations tracing_dyn_info_fops = {
@@ -8747,7 +8756,7 @@ static __init int tracer_init_tracefs(void)

#ifdef CONFIG_DYNAMIC_FTRACE
trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
- &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
+ NULL, &tracing_dyn_info_fops);
#endif

create_trace_instances(d_tracer);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index f801d154ff6a..f6f92a6c5cec 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -802,6 +802,8 @@ extern void trace_event_follow_fork(struct trace_array *tr, bool enable);

#ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_update_tot_cnt;
+extern unsigned long ftrace_number_of_pages;
+extern unsigned long ftrace_number_of_groups;
void ftrace_init_trace_array(struct trace_array *tr);
#else
static inline void ftrace_init_trace_array(struct trace_array *tr) { }
--
2.20.1
\
 
 \ /
  Last update: 2019-10-04 17:14    [W:0.073 / U:0.036 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site