diff options
Diffstat (limited to 'net/batman-adv/fragmentation.c')
-rw-r--r-- | net/batman-adv/fragmentation.c | 82 |
1 files changed, 49 insertions, 33 deletions
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 0934730fb7ff..9c561e683f4b 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@ -20,6 +20,7 @@ #include <linux/atomic.h> #include <linux/byteorder/generic.h> +#include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/fs.h> #include <linux/if_ether.h> @@ -42,17 +43,23 @@ /** * batadv_frag_clear_chain - delete entries in the fragment buffer chain * @head: head of chain with entries. + * @dropped: whether the chain is cleared because all fragments are dropped * * Free fragments in the passed hlist. Should be called with appropriate lock. */ -static void batadv_frag_clear_chain(struct hlist_head *head) +static void batadv_frag_clear_chain(struct hlist_head *head, bool dropped) { struct batadv_frag_list_entry *entry; struct hlist_node *node; hlist_for_each_entry_safe(entry, node, head, list) { hlist_del(&entry->list); - kfree_skb(entry->skb); + + if (dropped) + kfree_skb(entry->skb); + else + consume_skb(entry->skb); + kfree(entry); } } @@ -73,7 +80,7 @@ void batadv_frag_purge_orig(struct batadv_orig_node *orig_node, spin_lock_bh(&chain->lock); if (!check_cb || check_cb(chain)) { - batadv_frag_clear_chain(&chain->head); + batadv_frag_clear_chain(&chain->fragment_list, true); chain->size = 0; } @@ -117,8 +124,8 @@ static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain, if (chain->seqno == seqno) return false; - if (!hlist_empty(&chain->head)) - batadv_frag_clear_chain(&chain->head); + if (!hlist_empty(&chain->fragment_list)) + batadv_frag_clear_chain(&chain->fragment_list, true); chain->size = 0; chain->seqno = seqno; @@ -176,7 +183,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node, chain = &orig_node->fragments[bucket]; spin_lock_bh(&chain->lock); if (batadv_frag_init_chain(chain, seqno)) { - hlist_add_head(&frag_entry_new->list, &chain->head); + hlist_add_head(&frag_entry_new->list, &chain->fragment_list); chain->size = skb->len - hdr_size; chain->timestamp = jiffies; chain->total_size = ntohs(frag_packet->total_size); @@ -185,7 +192,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node, } /* Find the position for the new fragment. */ - hlist_for_each_entry(frag_entry_curr, &chain->head, list) { + hlist_for_each_entry(frag_entry_curr, &chain->fragment_list, list) { /* Drop packet if fragment already exists. */ if (frag_entry_curr->no == frag_entry_new->no) goto err_unlock; @@ -220,11 +227,11 @@ out: * exceeds the maximum size of one merged packet. Don't allow * packets to have different total_size. */ - batadv_frag_clear_chain(&chain->head); + batadv_frag_clear_chain(&chain->fragment_list, true); chain->size = 0; } else if (ntohs(frag_packet->total_size) == chain->size) { /* All fragments received. Hand over chain to caller. */ - hlist_move_list(&chain->head, chain_out); + hlist_move_list(&chain->fragment_list, chain_out); chain->size = 0; } @@ -252,8 +259,9 @@ batadv_frag_merge_packets(struct hlist_head *chain) { struct batadv_frag_packet *packet; struct batadv_frag_list_entry *entry; - struct sk_buff *skb_out = NULL; + struct sk_buff *skb_out; int size, hdr_size = sizeof(struct batadv_frag_packet); + bool dropped = false; /* Remove first entry, as this is the destination for the rest of the * fragments. @@ -270,6 +278,7 @@ batadv_frag_merge_packets(struct hlist_head *chain) if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) { kfree_skb(skb_out); skb_out = NULL; + dropped = true; goto free; } @@ -291,7 +300,7 @@ batadv_frag_merge_packets(struct hlist_head *chain) free: /* Locking is not needed, because 'chain' is not part of any orig. */ - batadv_frag_clear_chain(chain); + batadv_frag_clear_chain(chain, dropped); return skb_out; } @@ -352,7 +361,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb, struct batadv_orig_node *orig_node_src) { struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); - struct batadv_orig_node *orig_node_dst = NULL; + struct batadv_orig_node *orig_node_dst; struct batadv_neigh_node *neigh_node = NULL; struct batadv_frag_packet *packet; u16 total_size; @@ -433,8 +442,7 @@ err: * @orig_node: final destination of the created fragments * @neigh_node: next-hop of the created fragments * - * Return: the netdev tx status or -1 in case of error. - * When -1 is returned the skb is not consumed. + * Return: the netdev tx status or a negative errno code on a failure */ int batadv_frag_send_packet(struct sk_buff *skb, struct batadv_orig_node *orig_node, @@ -447,7 +455,7 @@ int batadv_frag_send_packet(struct sk_buff *skb, unsigned int mtu = neigh_node->if_incoming->net_dev->mtu; unsigned int header_size = sizeof(frag_header); unsigned int max_fragment_size, max_packet_size; - int ret = -1; + int ret; /* To avoid merge and refragmentation at next-hops we never send * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE @@ -457,13 +465,17 @@ int batadv_frag_send_packet(struct sk_buff *skb, max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS; /* Don't even try to fragment, if we need more than 16 fragments */ - if (skb->len > max_packet_size) - goto out; + if (skb->len > max_packet_size) { + ret = -EAGAIN; + goto free_skb; + } bat_priv = orig_node->bat_priv; primary_if = batadv_primary_if_get_selected(bat_priv); - if (!primary_if) - goto out; + if (!primary_if) { + ret = -EINVAL; + goto put_primary_if; + } /* Create one header to be copied to all fragments */ frag_header.packet_type = BATADV_UNICAST_FRAG; @@ -488,34 +500,35 @@ int batadv_frag_send_packet(struct sk_buff *skb, /* Eat and send fragments from the tail of skb */ while (skb->len > max_fragment_size) { skb_fragment = batadv_frag_create(skb, &frag_header, mtu); - if (!skb_fragment) - goto out; + if (!skb_fragment) { + ret = -ENOMEM; + goto free_skb; + } batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX); batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES, skb_fragment->len + ETH_HLEN); ret = batadv_send_unicast_skb(skb_fragment, neigh_node); if (ret != NET_XMIT_SUCCESS) { - /* return -1 so that the caller can free the original - * skb - */ - ret = -1; - goto out; + ret = NET_XMIT_DROP; + goto free_skb; } frag_header.no++; /* The initial check in this function should cover this case */ if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) { - ret = -1; - goto out; + ret = -EINVAL; + goto free_skb; } } /* Make room for the fragment header. */ if (batadv_skb_head_push(skb, header_size) < 0 || - pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) - goto out; + pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) { + ret = -ENOMEM; + goto free_skb; + } memcpy(skb->data, &frag_header, header_size); @@ -524,10 +537,13 @@ int batadv_frag_send_packet(struct sk_buff *skb, batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES, skb->len + ETH_HLEN); ret = batadv_send_unicast_skb(skb, neigh_node); + /* skb was consumed */ + skb = NULL; -out: - if (primary_if) - batadv_hardif_put(primary_if); +put_primary_if: + batadv_hardif_put(primary_if); +free_skb: + kfree_skb(skb); return ret; } |