lkml.org 
[lkml]   [2022]   [Apr]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC 2/3] futex: exit: Print a warning when futex_cleanup fails
Date
The futex_cleanup routines currently fails silently.

Allow the futex_cleanup routines to fail more verbosely so we can
leave a message behind for easier debugging/error detecting.

Fixes: 0771dfefc9e5 ("[PATCH] lightweight robust futexes: core")
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Darren Hart <dvhart@infradead.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: "André Almeida" <andrealmeid@collabora.com>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Ulrich Drepper <drepper@redhat.com>
Signed-off-by: Nico Pache <npache@redhat.com>
---
kernel/futex/core.c | 44 ++++++++++++++++++++++++++------------------
1 file changed, 26 insertions(+), 18 deletions(-)

diff --git a/kernel/futex/core.c b/kernel/futex/core.c
index b22ef1efe751..8a62eb1b5d77 100644
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -760,9 +760,9 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
* Walk curr->robust_list (very carefully, it's a userspace list!)
* and mark any locks found there dead, and notify any waiters.
*
- * We silently return on any sign of list-walking problem.
+ * We return false on any sign of list-walking problem.
*/
-static void exit_robust_list(struct task_struct *curr)
+static bool exit_robust_list(struct task_struct *curr)
{
struct robust_list_head __user *head = curr->robust_list;
struct robust_list __user *entry, *next_entry, *pending;
@@ -771,23 +771,25 @@ static void exit_robust_list(struct task_struct *curr)
unsigned long futex_offset;
int rc;

+ if (!futex_cmpxchg_enabled)
+ return false;
/*
* Fetch the list head (which was registered earlier, via
* sys_set_robust_list()):
*/
if (fetch_robust_entry(&entry, &head->list.next, &pi))
- return;
+ return false;
/*
* Fetch the relative futex offset:
*/
if (get_user(futex_offset, &head->futex_offset))
- return;
+ return false;
/*
* Fetch any possibly pending lock-add first, and handle it
* if it exists:
*/
if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
- return;
+ return false;

next_entry = NULL; /* avoid warning with gcc */
while (entry != &head->list) {
@@ -803,10 +805,10 @@ static void exit_robust_list(struct task_struct *curr)
if (entry != pending) {
if (handle_futex_death((void __user *)entry + futex_offset,
curr, pi, HANDLE_DEATH_LIST))
- return;
+ return false;
}
if (rc)
- return;
+ return false;
entry = next_entry;
pi = next_pi;
/*
@@ -819,9 +821,10 @@ static void exit_robust_list(struct task_struct *curr)
}

if (pending) {
- handle_futex_death((void __user *)pending + futex_offset,
+ return handle_futex_death((void __user *)pending + futex_offset,
curr, pip, HANDLE_DEATH_PENDING);
}
+ return true;
}

#ifdef CONFIG_COMPAT
@@ -854,9 +857,9 @@ compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **ent
* Walk curr->robust_list (very carefully, it's a userspace list!)
* and mark any locks found there dead, and notify any waiters.
*
- * We silently return on any sign of list-walking problem.
+ * We return false on any sign of list-walking problem.
*/
-static void compat_exit_robust_list(struct task_struct *curr)
+static bool compat_exit_robust_list(struct task_struct *curr)
{
struct compat_robust_list_head __user *head = curr->compat_robust_list;
struct robust_list __user *entry, *next_entry, *pending;
@@ -866,24 +869,26 @@ static void compat_exit_robust_list(struct task_struct *curr)
compat_long_t futex_offset;
int rc;

+ if (!futex_cmpxchg_enabled)
+ return false;
/*
* Fetch the list head (which was registered earlier, via
* sys_set_robust_list()):
*/
if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
- return;
+ return false;
/*
* Fetch the relative futex offset:
*/
if (get_user(futex_offset, &head->futex_offset))
- return;
+ return false;
/*
* Fetch any possibly pending lock-add first, and handle it
* if it exists:
*/
if (compat_fetch_robust_entry(&upending, &pending,
&head->list_op_pending, &pip))
- return;
+ return false;

next_entry = NULL; /* avoid warning with gcc */
while (entry != (struct robust_list __user *) &head->list) {
@@ -902,10 +907,10 @@ static void compat_exit_robust_list(struct task_struct *curr)

if (handle_futex_death(uaddr, curr, pi,
HANDLE_DEATH_LIST))
- return;
+ return false;
}
if (rc)
- return;
+ return false;
uentry = next_uentry;
entry = next_entry;
pi = next_pi;
@@ -920,8 +925,9 @@ static void compat_exit_robust_list(struct task_struct *curr)
if (pending) {
void __user *uaddr = futex_uaddr(pending, futex_offset);

- handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
+ return handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
}
+ return true;
}
#endif

@@ -1007,13 +1013,15 @@ static inline void exit_pi_state_list(struct task_struct *curr) { }
static void futex_cleanup(struct task_struct *tsk)
{
if (unlikely(tsk->robust_list)) {
- exit_robust_list(tsk);
+ if (!exit_robust_list(tsk))
+ pr_info("futex: exit_robust_list failed");
tsk->robust_list = NULL;
}

#ifdef CONFIG_COMPAT
if (unlikely(tsk->compat_robust_list)) {
- compat_exit_robust_list(tsk);
+ if (!compat_exit_robust_list(tsk))
+ pr_info("futex: compat_exit_robust_list failed");
tsk->compat_robust_list = NULL;
}
#endif
--
2.35.1
\
 
 \ /
  Last update: 2022-04-21 21:06    [W:0.832 / U:0.104 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site