summaryrefslogtreecommitdiff
path: root/net/tipc/link.c
diff options
context:
space:
mode:
authorJon Paul Maloy <jon.maloy@ericsson.com>2015-07-31 01:24:24 +0300
committerDavid S. Miller <davem@davemloft.net>2015-07-31 03:25:14 +0300
commit23d8335d786472021b5c733f228c7074208dcfa0 (patch)
treec7bcdc08b5567835c5a38f0d93cc46c7e8a55fea /net/tipc/link.c
parent598411d70f85dcf5b5c6c2369cc48637c251b656 (diff)
downloadlinux-23d8335d786472021b5c733f228c7074208dcfa0.tar.xz
tipc: remove implicit message delivery in node_unlock()
After the most recent changes, all access calls to a link which may entail addition of messages to the link's input queue are postpended by an explicit call to tipc_sk_rcv(), using a reference to the correct queue. This means that the potentially hazardous implicit delivery, using tipc_node_unlock() in combination with a binary flag and a cached queue pointer, now has become redundant. This commit removes this implicit delivery mechanism both for regular data messages and for binding table update messages. Tested-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/link.c')
-rw-r--r--net/tipc/link.c21
1 files changed, 3 insertions, 18 deletions
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 3a92924711a1..2aa19de715f6 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -559,8 +559,6 @@ void link_prepare_wakeup(struct tipc_link *l)
break;
skb_unlink(skb, &l->wakeupq);
skb_queue_tail(l->inputq, skb);
- l->owner->inputq = l->inputq;
- l->owner->action_flags |= TIPC_MSG_EVT;
}
}
@@ -598,8 +596,6 @@ void tipc_link_purge_queues(struct tipc_link *l_ptr)
void tipc_link_reset(struct tipc_link *l)
{
- struct tipc_node *owner = l->owner;
-
tipc_link_fsm_evt(l, LINK_RESET_EVT);
/* Link is down, accept any session */
@@ -611,14 +607,10 @@ void tipc_link_reset(struct tipc_link *l)
/* Prepare for renewed mtu size negotiation */
l->mtu = l->advertised_mtu;
- /* Clean up all queues, except inputq: */
+ /* Clean up all queues: */
__skb_queue_purge(&l->transmq);
__skb_queue_purge(&l->deferdq);
- if (!owner->inputq)
- owner->inputq = l->inputq;
- skb_queue_splice_init(&l->wakeupq, owner->inputq);
- if (!skb_queue_empty(owner->inputq))
- owner->action_flags |= TIPC_MSG_EVT;
+ skb_queue_splice_init(&l->wakeupq, l->inputq);
tipc_link_purge_backlog(l);
kfree_skb(l->reasm_buf);
@@ -972,7 +964,6 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
{
struct tipc_node *node = link->owner;
struct tipc_msg *msg = buf_msg(skb);
- u32 dport = msg_destport(msg);
switch (msg_user(msg)) {
case TIPC_LOW_IMPORTANCE:
@@ -980,17 +971,11 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
case TIPC_HIGH_IMPORTANCE:
case TIPC_CRITICAL_IMPORTANCE:
case CONN_MANAGER:
- if (tipc_skb_queue_tail(link->inputq, skb, dport)) {
- node->inputq = link->inputq;
- node->action_flags |= TIPC_MSG_EVT;
- }
+ skb_queue_tail(link->inputq, skb);
return true;
case NAME_DISTRIBUTOR:
node->bclink.recv_permitted = true;
- node->namedq = link->namedq;
skb_queue_tail(link->namedq, skb);
- if (skb_queue_len(link->namedq) == 1)
- node->action_flags |= TIPC_NAMED_MSG_EVT;
return true;
case MSG_BUNDLER:
case TUNNEL_PROTOCOL: