diff options
author | Cong Wang <xiyou.wangcong@gmail.com> | 2018-12-12 08:43:51 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-12-15 02:48:49 +0300 |
commit | 143ece654f9f5b37bedea252a990be37e48ae3a5 (patch) | |
tree | 3268762e6be9aa0bbb5a1a6de35e77d67e08175b /net | |
parent | 65cab850f0eeaa9180bd2e10a231964f33743edf (diff) | |
download | linux-143ece654f9f5b37bedea252a990be37e48ae3a5.tar.xz |
tipc: check tsk->group in tipc_wait_for_cond()
tipc_wait_for_cond() drops socket lock before going to sleep,
but tsk->group could be freed right after that release_sock().
So we have to re-check and reload tsk->group after it wakes up.
After this patch, tipc_wait_for_cond() returns -ERESTARTSYS when
tsk->group is NULL, instead of continuing with the assumption of
a non-NULL tsk->group.
(It looks like 'dsts' should be re-checked and reloaded too, but
it is a different bug.)
Similar for tipc_send_group_unicast() and tipc_send_group_anycast().
Reported-by: syzbot+10a9db47c3a0e13eb31c@syzkaller.appspotmail.com
Fixes: b7d42635517f ("tipc: introduce flow control for group broadcast messages")
Fixes: ee106d7f942d ("tipc: introduce group anycast messaging")
Fixes: 27bd9ec027f3 ("tipc: introduce group unicast messaging")
Cc: Ying Xue <ying.xue@windriver.com>
Cc: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/tipc/socket.c | 25 |
1 files changed, 14 insertions, 11 deletions
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index e1396fb87779..656940692a44 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -880,7 +880,6 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m, DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); int blks = tsk_blocks(GROUP_H_SIZE + dlen); struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_group *grp = tsk->group; struct net *net = sock_net(sk); struct tipc_member *mb = NULL; u32 node, port; @@ -894,7 +893,9 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m, /* Block or return if destination link or member is congested */ rc = tipc_wait_for_cond(sock, &timeout, !tipc_dest_find(&tsk->cong_links, node, 0) && - !tipc_group_cong(grp, node, port, blks, &mb)); + tsk->group && + !tipc_group_cong(tsk->group, node, port, blks, + &mb)); if (unlikely(rc)) return rc; @@ -924,7 +925,6 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, struct tipc_sock *tsk = tipc_sk(sk); struct list_head *cong_links = &tsk->cong_links; int blks = tsk_blocks(GROUP_H_SIZE + dlen); - struct tipc_group *grp = tsk->group; struct tipc_msg *hdr = &tsk->phdr; struct tipc_member *first = NULL; struct tipc_member *mbr = NULL; @@ -941,9 +941,10 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, type = msg_nametype(hdr); inst = dest->addr.name.name.instance; scope = msg_lookup_scope(hdr); - exclude = tipc_group_exclude(grp); while (++lookups < 4) { + exclude = tipc_group_exclude(tsk->group); + first = NULL; /* Look for a non-congested destination member, if any */ @@ -952,7 +953,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, &dstcnt, exclude, false)) return -EHOSTUNREACH; tipc_dest_pop(&dsts, &node, &port); - cong = tipc_group_cong(grp, node, port, blks, &mbr); + cong = tipc_group_cong(tsk->group, node, port, blks, + &mbr); if (!cong) break; if (mbr == first) @@ -971,7 +973,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, /* Block or return if destination link or member is congested */ rc = tipc_wait_for_cond(sock, &timeout, !tipc_dest_find(cong_links, node, 0) && - !tipc_group_cong(grp, node, port, + tsk->group && + !tipc_group_cong(tsk->group, node, port, blks, &mbr)); if (unlikely(rc)) return rc; @@ -1006,8 +1009,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m, struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_group *grp = tsk->group; - struct tipc_nlist *dsts = tipc_group_dests(grp); + struct tipc_nlist *dsts = tipc_group_dests(tsk->group); struct tipc_mc_method *method = &tsk->mc_method; bool ack = method->mandatory && method->rcast; int blks = tsk_blocks(MCAST_H_SIZE + dlen); @@ -1020,8 +1022,9 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m, return -EHOSTUNREACH; /* Block or return if any destination link or member is congested */ - rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt && - !tipc_group_bc_cong(grp, blks)); + rc = tipc_wait_for_cond(sock, &timeout, + !tsk->cong_link_cnt && tsk->group && + !tipc_group_bc_cong(tsk->group, blks)); if (unlikely(rc)) return rc; @@ -1036,7 +1039,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m, msg_set_hdr_sz(hdr, GROUP_H_SIZE); msg_set_destport(hdr, 0); msg_set_destnode(hdr, 0); - msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp)); + msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group)); /* Avoid getting stuck with repeated forced replicasts */ msg_set_grp_bc_ack_req(hdr, ack); |