summaryrefslogtreecommitdiff
path: root/net/tipc
diff options
context:
space:
mode:
authorJon Paul Maloy <jon.maloy@ericsson.com>2015-10-22 15:51:47 +0300
committerDavid S. Miller <davem@davemloft.net>2015-10-24 16:56:46 +0300
commitc49a0a84391bcc313b3dc2a9ceee6de684e07655 (patch)
tree5f4c6686b23b073ff210628f666f1a73de8c6cd2 /net/tipc
parentc72fa872a23f03b2b9c17e88f3b0a8070924e5f1 (diff)
downloadlinux-c49a0a84391bcc313b3dc2a9ceee6de684e07655.tar.xz
tipc: ensure binding table initial distribution is sent via first link
Correct synchronization of the broadcast link at first contact between two nodes is dependent on the assumption that the binding table "bulk" update passes via the same link as the initial broadcast syncronization message, i.e., via the first link that is established. This is not guaranteed in the current implementation. If two link come up very close to each other in time, the "bulk" may quite well pass via the second link, and hence void the guarantee of a correct initial synchronization before the broadcast link is opened. This commit makes two small changes to strengthen this guarantee. 1) We let the second established link occupy slot 1 of the "active_links" array, while the first link will retain slot 0. (This is in reality a cosmetic change, we could just as well keep the current, opposite order) 2) We let the name distributor always use link selector/slot 0 when it sends it binding table updates. The extra traffic bias on the first link caused by this change should be negligible, since binding table updates constitutes a very small fraction of the total traffic. Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Reviewed-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/name_distr.c4
-rw-r--r--net/tipc/node.c2
2 files changed, 3 insertions, 3 deletions
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index e6018b7eb197..c07612bab95c 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -102,7 +102,7 @@ void named_cluster_distribute(struct net *net, struct sk_buff *skb)
if (!oskb)
break;
msg_set_destnode(buf_msg(oskb), dnode);
- tipc_node_xmit_skb(net, oskb, dnode, dnode);
+ tipc_node_xmit_skb(net, oskb, dnode, 0);
}
rcu_read_unlock();
@@ -223,7 +223,7 @@ void tipc_named_node_up(struct net *net, u32 dnode)
&tn->nametbl->publ_list[TIPC_ZONE_SCOPE]);
rcu_read_unlock();
- tipc_node_xmit(net, &head, dnode, dnode);
+ tipc_node_xmit(net, &head, dnode, 0);
}
static void tipc_publ_subscribe(struct net *net, struct publication *publ,
diff --git a/net/tipc/node.c b/net/tipc/node.c
index eb739d20ed46..f4772f53f41a 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -370,7 +370,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
tipc_link_set_active(ol, false);
} else if (nl->priority == ol->priority) {
tipc_link_set_active(nl, true);
- *slot0 = bearer_id;
+ *slot1 = bearer_id;
} else {
pr_debug("New link <%s> is standby\n", nl->name);
}