lkml.org 
[lkml]   [2019]   [Jul]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[GIT PULL] tracing: A few fixes for this rc release

Linus,

This includes three fixes:

- Fixes a deadlock from a previous fix to keep module loading
and function tracing text modifications from stepping on each other.
(this has a few patches to help document the issue in comments)

- Fix a crash when the snapshot buffer gets out of sync with the
main ring buffer.

- Fix a memory leak when reading the memory logs


Please pull the latest trace-v5.2-rc5 tree, which can be found at:


git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace.git
trace-v5.2-rc5

Tag SHA1: e2d01e8f26e849e0e45742a46727ba0da8af7c55
Head SHA1: 074376ac0e1d1fcd4fafebca86ee6158e7c20680


Eiichi Tsukata (1):
tracing/snapshot: Resize spare buffer if size changed

Jiri Kosina (1):
ftrace/x86: Anotate text_mutex split between ftrace_arch_code_modify_post_process() and ftrace_arch_code_modify_prepare()

Petr Mladek (1):
ftrace/x86: Remove possible deadlock between register_kprobe() and ftrace_run_update_code()

Steven Rostedt (VMware) (1):
ftrace/x86: Add a comment to why we take text_mutex in ftrace_arch_code_modify_prepare()

Takeshi Misawa (1):
tracing: Fix memory leak in tracing_err_log_open()

----
arch/x86/kernel/ftrace.c | 10 ++++++++++
kernel/trace/ftrace.c | 10 +---------
kernel/trace/trace.c | 24 +++++++++++++++++++-----
3 files changed, 30 insertions(+), 14 deletions(-)
---------------------------
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 0927bb158ffc..76228525acd0 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/memory.h>

#include <trace/syscall.h>

@@ -34,16 +35,25 @@
#ifdef CONFIG_DYNAMIC_FTRACE

int ftrace_arch_code_modify_prepare(void)
+ __acquires(&text_mutex)
{
+ /*
+ * Need to grab text_mutex to prevent a race from module loading
+ * and live kernel patching from changing the text permissions while
+ * ftrace has it set to "read/write".
+ */
+ mutex_lock(&text_mutex);
set_kernel_text_rw();
set_all_modules_text_rw();
return 0;
}

int ftrace_arch_code_modify_post_process(void)
+ __releases(&text_mutex)
{
set_all_modules_text_ro();
set_kernel_text_ro();
+ mutex_unlock(&text_mutex);
return 0;
}

diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 38277af44f5c..576c41644e77 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -34,7 +34,6 @@
#include <linux/hash.h>
#include <linux/rcupdate.h>
#include <linux/kprobes.h>
-#include <linux/memory.h>

#include <trace/events/sched.h>

@@ -2611,12 +2610,10 @@ static void ftrace_run_update_code(int command)
{
int ret;

- mutex_lock(&text_mutex);
-
ret = ftrace_arch_code_modify_prepare();
FTRACE_WARN_ON(ret);
if (ret)
- goto out_unlock;
+ return;

/*
* By default we use stop_machine() to modify the code.
@@ -2628,9 +2625,6 @@ static void ftrace_run_update_code(int command)

ret = ftrace_arch_code_modify_post_process();
FTRACE_WARN_ON(ret);
-
-out_unlock:
- mutex_unlock(&text_mutex);
}

static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
@@ -5784,7 +5778,6 @@ void ftrace_module_enable(struct module *mod)
struct ftrace_page *pg;

mutex_lock(&ftrace_lock);
- mutex_lock(&text_mutex);

if (ftrace_disabled)
goto out_unlock;
@@ -5846,7 +5839,6 @@ void ftrace_module_enable(struct module *mod)
ftrace_arch_code_modify_post_process();

out_unlock:
- mutex_unlock(&text_mutex);
mutex_unlock(&ftrace_lock);

process_cached_mods(mod->name);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 83e08b78dbee..c3aabb576fe5 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6719,11 +6719,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
break;
}
#endif
- if (!tr->allocated_snapshot) {
+ if (tr->allocated_snapshot)
+ ret = resize_buffer_duplicate_size(&tr->max_buffer,
+ &tr->trace_buffer, iter->cpu_file);
+ else
ret = tracing_alloc_snapshot_instance(tr);
- if (ret < 0)
- break;
- }
+ if (ret < 0)
+ break;
local_irq_disable();
/* Now, we're going to swap */
if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
@@ -7126,12 +7128,24 @@ static ssize_t tracing_err_log_write(struct file *file,
return count;
}

+static int tracing_err_log_release(struct inode *inode, struct file *file)
+{
+ struct trace_array *tr = inode->i_private;
+
+ trace_array_put(tr);
+
+ if (file->f_mode & FMODE_READ)
+ seq_release(inode, file);
+
+ return 0;
+}
+
static const struct file_operations tracing_err_log_fops = {
.open = tracing_err_log_open,
.write = tracing_err_log_write,
.read = seq_read,
.llseek = seq_lseek,
- .release = tracing_release_generic_tr,
+ .release = tracing_err_log_release,
};

static int tracing_buffers_open(struct inode *inode, struct file *filp)
\
 
 \ /
  Last update: 2019-07-03 16:55    [W:1.021 / U:0.092 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site