Messages in this thread | | | Date | Thu, 11 Aug 2022 14:19:53 -0400 | Subject | Re: [PATCH v2] locking: Add __lockfunc to slow path functions | From | Waiman Long <> |
| |
On 8/10/22 18:03, Namhyung Kim wrote: > So that we can skip the functions in the perf lock contention and other > places like /proc/PID/wchan. > > Signed-off-by: Namhyung Kim <namhyung@kernel.org> > --- > * annotate pv qspinlock functions too > > arch/x86/include/asm/qspinlock_paravirt.h | 13 +++++++------ > kernel/locking/qrwlock.c | 4 ++-- > kernel/locking/qspinlock.c | 2 +- > kernel/locking/qspinlock_paravirt.h | 4 ++-- > 4 files changed, 12 insertions(+), 11 deletions(-) > > diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h > index 892fd8c3a6f7..60ece592b220 100644 > --- a/arch/x86/include/asm/qspinlock_paravirt.h > +++ b/arch/x86/include/asm/qspinlock_paravirt.h > @@ -12,7 +12,7 @@ > */ > #ifdef CONFIG_64BIT > > -PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath); > +__PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text"); > #define __pv_queued_spin_unlock __pv_queued_spin_unlock > #define PV_UNLOCK "__raw_callee_save___pv_queued_spin_unlock" > #define PV_UNLOCK_SLOWPATH "__raw_callee_save___pv_queued_spin_unlock_slowpath" > @@ -20,9 +20,10 @@ PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath); > /* > * Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock > * which combines the registers saving trunk and the body of the following > - * C code: > + * C code. Note that it puts the code in the .spinlock.text section which > + * is equivalent to adding __lockfunc in the C code: > * > - * void __pv_queued_spin_unlock(struct qspinlock *lock) > + * void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock) > * { > * u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0); > * > @@ -36,7 +37,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath); > * rsi = lockval (second argument) > * rdx = internal variable (set to 0) > */ > -asm (".pushsection .text;" > +asm (".pushsection .spinlock.text;" > ".globl " PV_UNLOCK ";" > ".type " PV_UNLOCK ", @function;" > ".align 4,0x90;" > @@ -65,8 +66,8 @@ asm (".pushsection .text;" > > #else /* CONFIG_64BIT */ > > -extern void __pv_queued_spin_unlock(struct qspinlock *lock); > -PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock); > +extern void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock); > +__PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock, ".spinlock.text"); > > #endif /* CONFIG_64BIT */ > #endif > diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c > index 2e1600906c9f..d2ef312a8611 100644 > --- a/kernel/locking/qrwlock.c > +++ b/kernel/locking/qrwlock.c > @@ -18,7 +18,7 @@ > * queued_read_lock_slowpath - acquire read lock of a queued rwlock > * @lock: Pointer to queued rwlock structure > */ > -void queued_read_lock_slowpath(struct qrwlock *lock) > +void __lockfunc queued_read_lock_slowpath(struct qrwlock *lock) > { > /* > * Readers come here when they cannot get the lock without waiting > @@ -63,7 +63,7 @@ EXPORT_SYMBOL(queued_read_lock_slowpath); > * queued_write_lock_slowpath - acquire write lock of a queued rwlock > * @lock : Pointer to queued rwlock structure > */ > -void queued_write_lock_slowpath(struct qrwlock *lock) > +void __lockfunc queued_write_lock_slowpath(struct qrwlock *lock) > { > int cnts; > > diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c > index 65a9a10caa6f..2b23378775fe 100644 > --- a/kernel/locking/qspinlock.c > +++ b/kernel/locking/qspinlock.c > @@ -313,7 +313,7 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, > * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' : > * queue : ^--' : > */ > -void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) > +void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) > { > struct mcs_spinlock *prev, *next, *node; > u32 old, tail; > diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h > index e84d21aa0722..6afc249ce697 100644 > --- a/kernel/locking/qspinlock_paravirt.h > +++ b/kernel/locking/qspinlock_paravirt.h > @@ -489,7 +489,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) > * PV versions of the unlock fastpath and slowpath functions to be used > * instead of queued_spin_unlock(). > */ > -__visible void > +__visible __lockfunc void > __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) > { > struct pv_node *node; > @@ -544,7 +544,7 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) > #include <asm/qspinlock_paravirt.h> > > #ifndef __pv_queued_spin_unlock > -__visible void __pv_queued_spin_unlock(struct qspinlock *lock) > +__visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock) > { > u8 locked; > Acked-by: Waiman Long <longman@redhat.com>
| |