summaryrefslogtreecommitdiff
path: root/net/batman-adv/bridge_loop_avoidance.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-23 09:44:31 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-23 09:44:31 +0300
commit107df03203bb66de56e2caec3bde6d22b55480c5 (patch)
treecff42c091a4a9f43203bbb85c9cf526857470a8f /net/batman-adv/bridge_loop_avoidance.c
parent88083e9845612826dfd44a5215647b4f6567317c (diff)
parentf8e7718cc0445587fe8530fc2d240d9aac2c9072 (diff)
downloadlinux-107df03203bb66de56e2caec3bde6d22b55480c5.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix memory leak in nftables, from Liping Zhang. 2) Need to check result of vlan_insert_tag() in batman-adv otherwise we risk NULL skb derefs, from Sven Eckelmann. 3) Check for dev_alloc_skb() failures in cfg80211, from Gregory Greenman. 4) Handle properly when we have ppp_unregister_channel() happening in parallel with ppp_connect_channel(), from WANG Cong. 5) Fix DCCP deadlock, from Eric Dumazet. 6) Bail out properly in UDP if sk_filter() truncates the packet to be smaller than even the space that the protocol headers need. From Michal Kubecek. 7) Similarly for rose, dccp, and sctp, from Willem de Bruijn. 8) Make TCP challenge ACKs less predictable, from Eric Dumazet. 9) Fix infinite loop in bgmac_dma_tx_add() from Florian Fainelli. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (65 commits) packet: propagate sock_cmsg_send() error net/mlx5e: Fix del vxlan port command buffer memset packet: fix second argument of sock_tx_timestamp() net: switchdev: change ageing_time type to clock_t Update maintainer for EHEA driver. net/mlx4_en: Add resilience in low memory systems net/mlx4_en: Move filters cleanup to a proper location sctp: load transport header after sk_filter net/sched/sch_htb: clamp xstats tokens to fit into 32-bit int net: cavium: liquidio: Avoid dma_unmap_single on uninitialized ndata net: nb8800: Fix SKB leak in nb8800_receive() et131x: Fix logical vs bitwise check in et131x_tx_timeout() vlan: use a valid default mtu value for vlan over macsec net: bgmac: Fix infinite loop in bgmac_dma_tx_add() mlxsw: spectrum: Prevent invalid ingress buffer mapping mlxsw: spectrum: Prevent overwrite of DCB capability fields mlxsw: spectrum: Don't emit errors when PFC is disabled mlxsw: spectrum: Indicate support for autonegotiation mlxsw: spectrum: Force link training according to admin state r8152: add MODULE_VERSION ...
Diffstat (limited to 'net/batman-adv/bridge_loop_avoidance.c')
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c116
1 files changed, 92 insertions, 24 deletions
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 748a9ead7ce5..825a5cdf4382 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -177,10 +177,21 @@ static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
static void batadv_claim_release(struct kref *ref)
{
struct batadv_bla_claim *claim;
+ struct batadv_bla_backbone_gw *old_backbone_gw;
claim = container_of(ref, struct batadv_bla_claim, refcount);
- batadv_backbone_gw_put(claim->backbone_gw);
+ spin_lock_bh(&claim->backbone_lock);
+ old_backbone_gw = claim->backbone_gw;
+ claim->backbone_gw = NULL;
+ spin_unlock_bh(&claim->backbone_lock);
+
+ spin_lock_bh(&old_backbone_gw->crc_lock);
+ old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+ spin_unlock_bh(&old_backbone_gw->crc_lock);
+
+ batadv_backbone_gw_put(old_backbone_gw);
+
kfree_rcu(claim, rcu);
}
@@ -418,9 +429,12 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
break;
}
- if (vid & BATADV_VLAN_HAS_TAG)
+ if (vid & BATADV_VLAN_HAS_TAG) {
skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
vid & VLAN_VID_MASK);
+ if (!skb)
+ goto out;
+ }
skb_reset_mac_header(skb);
skb->protocol = eth_type_trans(skb, soft_iface);
@@ -674,8 +688,10 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
const u8 *mac, const unsigned short vid,
struct batadv_bla_backbone_gw *backbone_gw)
{
+ struct batadv_bla_backbone_gw *old_backbone_gw;
struct batadv_bla_claim *claim;
struct batadv_bla_claim search_claim;
+ bool remove_crc = false;
int hash_added;
ether_addr_copy(search_claim.addr, mac);
@@ -689,8 +705,10 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
return;
ether_addr_copy(claim->addr, mac);
+ spin_lock_init(&claim->backbone_lock);
claim->vid = vid;
claim->lasttime = jiffies;
+ kref_get(&backbone_gw->refcount);
claim->backbone_gw = backbone_gw;
kref_init(&claim->refcount);
@@ -718,15 +736,26 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
"bla_add_claim(): changing ownership for %pM, vid %d\n",
mac, BATADV_PRINT_VID(vid));
- spin_lock_bh(&claim->backbone_gw->crc_lock);
- claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
- spin_unlock_bh(&claim->backbone_gw->crc_lock);
- batadv_backbone_gw_put(claim->backbone_gw);
+ remove_crc = true;
}
- /* set (new) backbone gw */
+
+ /* replace backbone_gw atomically and adjust reference counters */
+ spin_lock_bh(&claim->backbone_lock);
+ old_backbone_gw = claim->backbone_gw;
kref_get(&backbone_gw->refcount);
claim->backbone_gw = backbone_gw;
+ spin_unlock_bh(&claim->backbone_lock);
+ if (remove_crc) {
+ /* remove claim address from old backbone_gw */
+ spin_lock_bh(&old_backbone_gw->crc_lock);
+ old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+ spin_unlock_bh(&old_backbone_gw->crc_lock);
+ }
+
+ batadv_backbone_gw_put(old_backbone_gw);
+
+ /* add claim address to new backbone_gw */
spin_lock_bh(&backbone_gw->crc_lock);
backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
spin_unlock_bh(&backbone_gw->crc_lock);
@@ -737,6 +766,26 @@ claim_free_ref:
}
/**
+ * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
+ * claim
+ * @claim: claim whose backbone_gw should be returned
+ *
+ * Return: valid reference to claim::backbone_gw
+ */
+static struct batadv_bla_backbone_gw *
+batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
+{
+ struct batadv_bla_backbone_gw *backbone_gw;
+
+ spin_lock_bh(&claim->backbone_lock);
+ backbone_gw = claim->backbone_gw;
+ kref_get(&backbone_gw->refcount);
+ spin_unlock_bh(&claim->backbone_lock);
+
+ return backbone_gw;
+}
+
+/**
* batadv_bla_del_claim - delete a claim from the claim hash
* @bat_priv: the bat priv with all the soft interface information
* @mac: mac address of the claim to be removed
@@ -760,10 +809,6 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
batadv_choose_claim, claim);
batadv_claim_put(claim); /* reference from the hash is gone */
- spin_lock_bh(&claim->backbone_gw->crc_lock);
- claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
- spin_unlock_bh(&claim->backbone_gw->crc_lock);
-
/* don't need the reference from hash_find() anymore */
batadv_claim_put(claim);
}
@@ -1216,6 +1261,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
int now)
{
+ struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_bla_claim *claim;
struct hlist_head *head;
struct batadv_hashtable *hash;
@@ -1230,14 +1276,17 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
rcu_read_lock();
hlist_for_each_entry_rcu(claim, head, hash_entry) {
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
if (now)
goto purge_now;
- if (!batadv_compare_eth(claim->backbone_gw->orig,
+
+ if (!batadv_compare_eth(backbone_gw->orig,
primary_if->net_dev->dev_addr))
- continue;
+ goto skip;
+
if (!batadv_has_timed_out(claim->lasttime,
BATADV_BLA_CLAIM_TIMEOUT))
- continue;
+ goto skip;
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"bla_purge_claims(): %pM, vid %d, time out\n",
@@ -1245,8 +1294,10 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
purge_now:
batadv_handle_unclaim(bat_priv, primary_if,
- claim->backbone_gw->orig,
+ backbone_gw->orig,
claim->addr, claim->vid);
+skip:
+ batadv_backbone_gw_put(backbone_gw);
}
rcu_read_unlock();
}
@@ -1757,9 +1808,11 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid, bool is_bcast)
{
+ struct batadv_bla_backbone_gw *backbone_gw;
struct ethhdr *ethhdr;
struct batadv_bla_claim search_claim, *claim = NULL;
struct batadv_hard_iface *primary_if;
+ bool own_claim;
bool ret;
ethhdr = eth_hdr(skb);
@@ -1794,8 +1847,12 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
}
/* if it is our own claim ... */
- if (batadv_compare_eth(claim->backbone_gw->orig,
- primary_if->net_dev->dev_addr)) {
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+ own_claim = batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr);
+ batadv_backbone_gw_put(backbone_gw);
+
+ if (own_claim) {
/* ... allow it in any case */
claim->lasttime = jiffies;
goto allow;
@@ -1859,7 +1916,9 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
{
struct ethhdr *ethhdr;
struct batadv_bla_claim search_claim, *claim = NULL;
+ struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_hard_iface *primary_if;
+ bool client_roamed;
bool ret = false;
primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -1889,8 +1948,12 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
goto allow;
/* check if we are responsible. */
- if (batadv_compare_eth(claim->backbone_gw->orig,
- primary_if->net_dev->dev_addr)) {
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+ client_roamed = batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr);
+ batadv_backbone_gw_put(backbone_gw);
+
+ if (client_roamed) {
/* if yes, the client has roamed and we have
* to unclaim it.
*/
@@ -1938,6 +2001,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
struct net_device *net_dev = (struct net_device *)seq->private;
struct batadv_priv *bat_priv = netdev_priv(net_dev);
struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
+ struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_bla_claim *claim;
struct batadv_hard_iface *primary_if;
struct hlist_head *head;
@@ -1962,17 +2026,21 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
rcu_read_lock();
hlist_for_each_entry_rcu(claim, head, hash_entry) {
- is_own = batadv_compare_eth(claim->backbone_gw->orig,
+ backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+
+ is_own = batadv_compare_eth(backbone_gw->orig,
primary_addr);
- spin_lock_bh(&claim->backbone_gw->crc_lock);
- backbone_crc = claim->backbone_gw->crc;
- spin_unlock_bh(&claim->backbone_gw->crc_lock);
+ spin_lock_bh(&backbone_gw->crc_lock);
+ backbone_crc = backbone_gw->crc;
+ spin_unlock_bh(&backbone_gw->crc_lock);
seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
claim->addr, BATADV_PRINT_VID(claim->vid),
- claim->backbone_gw->orig,
+ backbone_gw->orig,
(is_own ? 'x' : ' '),
backbone_crc);
+
+ batadv_backbone_gw_put(backbone_gw);
}
rcu_read_unlock();
}