diff options
author | Stephen Hemminger <stephen@networkplumber.org> | 2018-03-21 01:03:05 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-03-22 19:45:09 +0300 |
commit | 7b2ee50c0cd513a176a26a71f2989facdd75bfea (patch) | |
tree | 3056b5d027d819b98ce01e26128b3617b3a23e58 /drivers/net/hyperv/netvsc.c | |
parent | 0ef58b0a05c127762f975c3dfe8b922e4aa87a29 (diff) | |
download | linux-7b2ee50c0cd513a176a26a71f2989facdd75bfea.tar.xz |
hv_netvsc: common detach logic
Make common function for detaching internals of device
during changes to MTU and RSS. Make sure no more packets
are transmitted and all packets have been received before
doing device teardown.
Change the wait logic to be common and use usleep_range().
Changes transmit enabling logic so that transmit queues are disabled
during the period when lower device is being changed. And enabled
only after sub channels are setup. This avoids issue where it could
be that a packet was being sent while subchannel was not initialized.
Fixes: 8195b1396ec8 ("hv_netvsc: fix deadlock on hotplug")
Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/hyperv/netvsc.c')
-rw-r--r-- | drivers/net/hyperv/netvsc.c | 20 |
1 files changed, 11 insertions, 9 deletions
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 37b0a30d6b03..7472172823f3 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -555,8 +555,6 @@ void netvsc_device_remove(struct hv_device *device) = rtnl_dereference(net_device_ctx->nvdev); int i; - cancel_work_sync(&net_device->subchan_work); - netvsc_revoke_buf(device, net_device); RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); @@ -643,14 +641,18 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, queue_sends = atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); - if (net_device->destroy && queue_sends == 0) - wake_up(&net_device->wait_drain); + if (unlikely(net_device->destroy)) { + if (queue_sends == 0) + wake_up(&net_device->wait_drain); + } else { + struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); - if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && - (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || - queue_sends < 1)) { - netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx)); - ndev_ctx->eth_stats.wake_queue++; + if (netif_tx_queue_stopped(txq) && + (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || + queue_sends < 1)) { + netif_tx_wake_queue(txq); + ndev_ctx->eth_stats.wake_queue++; + } } } |