lkml.org 
[lkml]   [2021]   [Jul]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[patch 38/50] locking/mutex: Exclude non-ww_mutex API for RT
From: Thomas Gleixner <tglx@linutronix.de>

In order to build ww_mutex standalone on RT and to replace mutex with a RT
specific rtmutex based variant, guard the non-ww_mutex API so it is only
built when CONFIG_PREEMPT_RT is disabled.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/locking/mutex.c | 33 +++++++++++++++++++++++----------
1 file changed, 23 insertions(+), 10 deletions(-)
---
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -242,7 +242,7 @@ static void __mutex_handoff(_mutex_t *lo
}
}

-#ifndef CONFIG_DEBUG_LOCK_ALLOC
+#if !defined(CONFIG_DEBUG_LOCK_ALLOC) && !defined(CONFIG_PREEMPT_RT)
/*
* We split the mutex lock/unlock logic into separate fastpath and
* slowpath functions, to reduce the register pressure on the fastpath.
@@ -280,7 +280,7 @@ void __sched mutex_lock(struct mutex *lo
__mutex_lock_slowpath(lock);
}
EXPORT_SYMBOL(mutex_lock);
-#endif
+#endif /* !CONFIG_DEBUG_LOCK_ALLOC && !CONFIG_PREEMPT_RT */

/*
* Wait-Die:
@@ -705,17 +705,27 @@ mutex_optimistic_spin(_mutex_t *lock, st

return false;
}
-#else
+#else /* CONFIG_MUTEX_SPIN_ON_OWNER */
static __always_inline bool
mutex_optimistic_spin(_mutex_t *lock, struct ww_acquire_ctx *ww_ctx,
struct mutex_waiter *waiter)
{
return false;
}
-#endif
+#endif /* !CONFIG_MUTEX_SPIN_ON_OWNER */

static noinline void __sched __mutex_unlock_slowpath(_mutex_t *lock, unsigned long ip);

+static __always_inline void __mutex_unlock(_mutex_t *lock)
+{
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
+ if (__mutex_unlock_fast(lock))
+ return;
+#endif
+ __mutex_unlock_slowpath(lock, _RET_IP_);
+}
+
+#ifndef CONFIG_PREEMPT_RT
/**
* mutex_unlock - release the mutex
* @lock: the mutex to be released
@@ -729,13 +739,10 @@ static noinline void __sched __mutex_unl
*/
void __sched mutex_unlock(struct mutex *lock)
{
-#ifndef CONFIG_DEBUG_LOCK_ALLOC
- if (__mutex_unlock_fast(lock))
- return;
-#endif
- __mutex_unlock_slowpath(lock, _RET_IP_);
+ __mutex_unlock(lock);
}
EXPORT_SYMBOL(mutex_unlock);
+#endif /* !CONFIG_PREEMPT_RT */

/**
* ww_mutex_unlock - release the w/w mutex
@@ -763,7 +770,7 @@ void __sched ww_mutex_unlock(struct ww_m
lock->ctx = NULL;
}

- mutex_unlock(&lock->base);
+ __mutex_unlock(&lock->base);
}
EXPORT_SYMBOL(ww_mutex_unlock);

@@ -1093,12 +1100,14 @@ static __always_inline int __sched
return ret;
}

+#ifndef CONFIG_PREEMPT_RT
static int __sched
__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
struct lockdep_map *nest_lock, unsigned long ip)
{
return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
}
+#endif /* !CONFIG_PREEMPT_RT */

static int __sched
__ww_mutex_lock(_mutex_t *lock, unsigned int state, unsigned int subclass,
@@ -1109,6 +1118,7 @@ static int __sched
}

#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifndef CONFIG_PREEMPT_RT
void __sched
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{
@@ -1151,6 +1161,7 @@ mutex_lock_io_nested(struct mutex *lock,
io_schedule_finish(token);
}
EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
+# endif /* !CONFIG_PREEMPT_RT */

static inline int
ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
@@ -1278,6 +1289,7 @@ static noinline void __sched __mutex_unl
}

#ifndef CONFIG_DEBUG_LOCK_ALLOC
+#ifndef CONFIG_PREEMPT_RT
/*
* Here come the less common (and hence less performance-critical) APIs:
* mutex_lock_interruptible() and mutex_trylock().
@@ -1372,6 +1384,7 @@ static noinline int __sched
{
return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
}
+#endif /* !CONFIG_PREEMPT_RT */

static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
\
 
 \ /
  Last update: 2021-07-13 18:15    [W:0.291 / U:0.444 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site