diff options
author | Marek Lindner <lindner_marek@yahoo.de> | 2010-05-22 19:48:44 +0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2010-06-05 00:38:55 +0400 |
commit | 5f411a90ee163801434775264b4f9932f1de9e4c (patch) | |
tree | 3837fcba82e6c9d6786330240d6b9385fac31650 /drivers | |
parent | 9d20015391dfc47f6371492925cc0333ac403414 (diff) | |
download | linux-5f411a90ee163801434775264b4f9932f1de9e4c.tar.xz |
Staging: batman-adv: fix rogue packets on shutdown
On module shutdown batman-adv would purge the internal packet
queue by sending all remaining packets which could confuse
other nodes. Now, the packets are silently discarded.
Signed-off-by: Marek Lindner <lindner_marek@yahoo.de>
Signed-off-by: Sven Eckelmann <sven.eckelmann@gmx.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/staging/batman-adv/send.c | 24 |
1 files changed, 15 insertions, 9 deletions
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c index d8536e277a26..ac69ed871a76 100644 --- a/drivers/staging/batman-adv/send.c +++ b/drivers/staging/batman-adv/send.c @@ -440,6 +440,9 @@ void send_outstanding_bcast_packet(struct work_struct *work) hlist_del(&forw_packet->list); spin_unlock_irqrestore(&forw_bcast_list_lock, flags); + if (atomic_read(&module_state) == MODULE_DEACTIVATING) + goto out; + /* rebroadcast packet */ rcu_read_lock(); list_for_each_entry_rcu(batman_if, &if_list, list) { @@ -453,15 +456,15 @@ void send_outstanding_bcast_packet(struct work_struct *work) forw_packet->num_packets++; - /* if we still have some more bcasts to send and we are not shutting - * down */ - if ((forw_packet->num_packets < 3) && - (atomic_read(&module_state) != MODULE_DEACTIVATING)) + /* if we still have some more bcasts to send */ + if (forw_packet->num_packets < 3) { _add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000)); - else { - forw_packet_free(forw_packet); - atomic_inc(&bcast_queue_left); + return; } + +out: + forw_packet_free(forw_packet); + atomic_inc(&bcast_queue_left); } void send_outstanding_bat_packet(struct work_struct *work) @@ -476,6 +479,9 @@ void send_outstanding_bat_packet(struct work_struct *work) hlist_del(&forw_packet->list); spin_unlock_irqrestore(&forw_bat_list_lock, flags); + if (atomic_read(&module_state) == MODULE_DEACTIVATING) + goto out; + send_packet(forw_packet); /** @@ -483,10 +489,10 @@ void send_outstanding_bat_packet(struct work_struct *work) * to determine the queues wake up time unless we are * shutting down */ - if ((forw_packet->own) && - (atomic_read(&module_state) != MODULE_DEACTIVATING)) + if (forw_packet->own) schedule_own_packet(forw_packet->if_incoming); +out: /* don't count own packet */ if (!forw_packet->own) atomic_inc(&batman_queue_left); |