lkml.org 
[lkml]   [2016]   [Apr]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Date
Subject[PATCH 3.16 210/217] tun, bpf: fix suspicious RCU usage in tun_{attach, detach}_filter
3.16.35-rc1 review patch.  If anyone has any objections, please let me know.

------------------

From: Daniel Borkmann <daniel@iogearbox.net>

[ Upstream commit 5a5abb1fa3b05dd6aa821525832644c1e7d2905f ]

Sasha Levin reported a suspicious rcu_dereference_protected() warning
found while fuzzing with trinity that is similar to this one:

[ 52.765684] net/core/filter.c:2262 suspicious rcu_dereference_protected() usage!
[ 52.765688] other info that might help us debug this:
[ 52.765695] rcu_scheduler_active = 1, debug_locks = 1
[ 52.765701] 1 lock held by a.out/1525:
[ 52.765704] #0: (rtnl_mutex){+.+.+.}, at: [<ffffffff816a64b7>] rtnl_lock+0x17/0x20
[ 52.765721] stack backtrace:
[ 52.765728] CPU: 1 PID: 1525 Comm: a.out Not tainted 4.5.0+ #264
[...]
[ 52.765768] Call Trace:
[ 52.765775] [<ffffffff813e488d>] dump_stack+0x85/0xc8
[ 52.765784] [<ffffffff810f2fa5>] lockdep_rcu_suspicious+0xd5/0x110
[ 52.765792] [<ffffffff816afdc2>] sk_detach_filter+0x82/0x90
[ 52.765801] [<ffffffffa0883425>] tun_detach_filter+0x35/0x90 [tun]
[ 52.765810] [<ffffffffa0884ed4>] __tun_chr_ioctl+0x354/0x1130 [tun]
[ 52.765818] [<ffffffff8136fed0>] ? selinux_file_ioctl+0x130/0x210
[ 52.765827] [<ffffffffa0885ce3>] tun_chr_ioctl+0x13/0x20 [tun]
[ 52.765834] [<ffffffff81260ea6>] do_vfs_ioctl+0x96/0x690
[ 52.765843] [<ffffffff81364af3>] ? security_file_ioctl+0x43/0x60
[ 52.765850] [<ffffffff81261519>] SyS_ioctl+0x79/0x90
[ 52.765858] [<ffffffff81003ba2>] do_syscall_64+0x62/0x140
[ 52.765866] [<ffffffff817d563f>] entry_SYSCALL64_slow_path+0x25/0x25

Same can be triggered with PROVE_RCU (+ PROVE_RCU_REPEATEDLY) enabled
from tun_attach_filter() when user space calls ioctl(tun_fd, TUN{ATTACH,
DETACH}FILTER, ...) for adding/removing a BPF filter on tap devices.

Since the fix in f91ff5b9ff52 ("net: sk_{detach|attach}_filter() rcu
fixes") sk_attach_filter()/sk_detach_filter() now dereferences the
filter with rcu_dereference_protected(), checking whether socket lock
is held in control path.

Since its introduction in 994051625981 ("tun: socket filter support"),
tap filters are managed under RTNL lock from __tun_chr_ioctl(). Thus the
sock_owned_by_user(sk) doesn't apply in this specific case and therefore
triggers the false positive.

Extend the BPF API with __sk_attach_filter()/__sk_detach_filter() pair
that is used by tap filters and pass in lockdep_rtnl_is_held() for the
rcu_dereference_protected() checks instead.

Reported-by: Sasha Levin <sasha.levin@oracle.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
[bwh: Backported to 3.16:
- Drop changes to sk_attach_bpf(), __sk_attach_prog()
- Adjust context]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
---
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -553,7 +553,8 @@ static int tun_attach(struct tun_struct

/* Re-attach the filter to persist device */
if (!skip_filter && (tun->filter_attached == true)) {
- err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+ err = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
+ lockdep_rtnl_is_held());
if (!err)
goto out;
}
@@ -1792,7 +1793,7 @@ static void tun_detach_filter(struct tun

for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- sk_detach_filter(tfile->socket.sk);
+ __sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held());
}

tun->filter_attached = false;
@@ -1805,7 +1806,8 @@ static int tun_attach_filter(struct tun_

for (i = 0; i < tun->numqueues; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+ ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
+ lockdep_rtnl_is_held());
if (ret) {
tun_detach_filter(tun, i);
return ret;
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -359,7 +359,10 @@ int sk_unattached_filter_create(struct s
void sk_unattached_filter_destroy(struct sk_filter *fp);

int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
+int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
+ bool locked);
int sk_detach_filter(struct sock *sk);
+int __sk_detach_filter(struct sock *sk, bool locked);

int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1590,7 +1590,8 @@ EXPORT_SYMBOL_GPL(sk_unattached_filter_d
* occurs or there is insufficient memory for the filter a negative
* errno code is returned. On success the return is zero.
*/
-int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
+int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
+ bool locked)
{
struct sk_filter *fp, *old_fp;
unsigned int fsize = sk_filter_proglen(fprog);
@@ -1629,8 +1630,7 @@ int sk_attach_filter(struct sock_fprog *
if (IS_ERR(fp))
return PTR_ERR(fp);

- old_fp = rcu_dereference_protected(sk->sk_filter,
- sock_owned_by_user(sk));
+ old_fp = rcu_dereference_protected(sk->sk_filter, locked);
rcu_assign_pointer(sk->sk_filter, fp);

if (old_fp)
@@ -1638,9 +1638,14 @@ int sk_attach_filter(struct sock_fprog *

return 0;
}
-EXPORT_SYMBOL_GPL(sk_attach_filter);
+EXPORT_SYMBOL_GPL(__sk_attach_filter);

-int sk_detach_filter(struct sock *sk)
+int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
+{
+ return __sk_attach_filter(fprog, sk, sock_owned_by_user(sk));
+}
+
+int __sk_detach_filter(struct sock *sk, bool locked)
{
int ret = -ENOENT;
struct sk_filter *filter;
@@ -1648,8 +1653,7 @@ int sk_detach_filter(struct sock *sk)
if (sock_flag(sk, SOCK_FILTER_LOCKED))
return -EPERM;

- filter = rcu_dereference_protected(sk->sk_filter,
- sock_owned_by_user(sk));
+ filter = rcu_dereference_protected(sk->sk_filter, locked);
if (filter) {
RCU_INIT_POINTER(sk->sk_filter, NULL);
sk_filter_uncharge(sk, filter);
@@ -1658,7 +1662,12 @@ int sk_detach_filter(struct sock *sk)

return ret;
}
-EXPORT_SYMBOL_GPL(sk_detach_filter);
+EXPORT_SYMBOL_GPL(__sk_detach_filter);
+
+int sk_detach_filter(struct sock *sk)
+{
+ return __sk_detach_filter(sk, sock_owned_by_user(sk));
+}

int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
unsigned int len)
\
 
 \ /
  Last update: 2016-04-27 02:41    [W:0.905 / U:0.352 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site