diff options
author | Vadim Lomovtsev <vlomovtsev@marvell.com> | 2019-02-20 14:02:43 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-02-22 22:43:44 +0300 |
commit | 2ecbe4f4a027890a5d74a5100075aa6a373bea2c (patch) | |
tree | bef3411a1e1ffb00811d7c498f0090286b80a59d /drivers/net/ethernet/cavium/thunder | |
parent | f6d25aca1ba3f46b76dabf6023a0dc2062dc792e (diff) | |
download | linux-2ecbe4f4a027890a5d74a5100075aa6a373bea2c.tar.xz |
net: thunderx: replace global nicvf_rx_mode_wq work queue for all VFs to private for each of them.
Having one work queue for receive mode configuration ndo_set_rx_mode()
call for all VFs results in making each of them wait till the
set_rx_mode() call completes for another VF if any of close, set
receive mode and change flags calls being already invoked. Potentially
this could cause device state change before appropriate call of receive
mode configuration completes, so the call itself became meaningless,
corrupt data or break configuration sequence.
We don't need any delays in NIC VF configuration sequence so having delayed
work call with 0 delay has no sense.
This commit is to implement one work queue for each NIC VF for set_rx_mode
task and to let them work independently and replacing delayed_work
with work_struct.
Signed-off-by: Vadim Lomovtsev <vlomovtsev@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/cavium/thunder')
-rw-r--r-- | drivers/net/ethernet/cavium/thunder/nic.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/cavium/thunder/nicvf_main.c | 30 |
2 files changed, 19 insertions, 15 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index f4d81765221e..376a96bce33f 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -271,7 +271,7 @@ struct xcast_addr_list { }; struct nicvf_work { - struct delayed_work work; + struct work_struct work; u8 mode; struct xcast_addr_list *mc; }; @@ -327,6 +327,8 @@ struct nicvf { struct nicvf_work rx_mode_work; /* spinlock to protect workqueue arguments from concurrent access */ spinlock_t rx_mode_wq_lock; + /* workqueue for handling kernel ndo_set_rx_mode() calls */ + struct workqueue_struct *nicvf_rx_mode_wq; /* PTP timestamp */ struct cavium_ptp *ptp_clock; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 88f8a8fa93cd..abf24e7dff2d 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -68,9 +68,6 @@ module_param(cpi_alg, int, 0444); MODULE_PARM_DESC(cpi_alg, "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); -/* workqueue for handling kernel ndo_set_rx_mode() calls */ -static struct workqueue_struct *nicvf_rx_mode_wq; - static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) { if (nic->sqs_mode) @@ -1311,6 +1308,9 @@ int nicvf_stop(struct net_device *netdev) struct nicvf_cq_poll *cq_poll = NULL; union nic_mbx mbx = {}; + /* wait till all queued set_rx_mode tasks completes */ + drain_workqueue(nic->nicvf_rx_mode_wq); + mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; nicvf_send_msg_to_pf(nic, &mbx); @@ -1418,6 +1418,9 @@ int nicvf_open(struct net_device *netdev) struct nicvf_cq_poll *cq_poll = NULL; union nic_mbx mbx = {}; + /* wait till all queued set_rx_mode tasks completes if any */ + drain_workqueue(nic->nicvf_rx_mode_wq); + netif_carrier_off(netdev); err = nicvf_register_misc_interrupt(nic); @@ -1973,7 +1976,7 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs, static void nicvf_set_rx_mode_task(struct work_struct *work_arg) { struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, - work.work); + work); struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); u8 mode; struct xcast_addr_list *mc; @@ -2030,7 +2033,7 @@ static void nicvf_set_rx_mode(struct net_device *netdev) kfree(nic->rx_mode_work.mc); nic->rx_mode_work.mc = mc_list; nic->rx_mode_work.mode = mode; - queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0); + queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work); spin_unlock(&nic->rx_mode_wq_lock); } @@ -2187,7 +2190,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&nic->reset_task, nicvf_reset_task); - INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); + nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d", + WQ_MEM_RECLAIM, + nic->vf_id); + INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); spin_lock_init(&nic->rx_mode_wq_lock); err = register_netdev(netdev); @@ -2228,13 +2234,15 @@ static void nicvf_remove(struct pci_dev *pdev) nic = netdev_priv(netdev); pnetdev = nic->pnicvf->netdev; - cancel_delayed_work_sync(&nic->rx_mode_work.work); - /* Check if this Qset is assigned to different VF. * If yes, clean primary and all secondary Qsets. */ if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED)) unregister_netdev(pnetdev); + if (nic->nicvf_rx_mode_wq) { + destroy_workqueue(nic->nicvf_rx_mode_wq); + nic->nicvf_rx_mode_wq = NULL; + } nicvf_unregister_interrupts(nic); pci_set_drvdata(pdev, NULL); if (nic->drv_stats) @@ -2261,17 +2269,11 @@ static struct pci_driver nicvf_driver = { static int __init nicvf_init_module(void) { pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); - nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic", - WQ_MEM_RECLAIM); return pci_register_driver(&nicvf_driver); } static void __exit nicvf_cleanup_module(void) { - if (nicvf_rx_mode_wq) { - destroy_workqueue(nicvf_rx_mode_wq); - nicvf_rx_mode_wq = NULL; - } pci_unregister_driver(&nicvf_driver); } |