diff options
Diffstat (limited to 'drivers/net/ethernet/cisco/enic/enic_main.c')
-rw-r--r-- | drivers/net/ethernet/cisco/enic/enic_main.c | 247 |
1 files changed, 191 insertions, 56 deletions
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index f32f828b7f3d..c8832bc1c5f7 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -39,6 +39,12 @@ #include <linux/prefetch.h> #include <net/ip6_checksum.h> #include <linux/ktime.h> +#ifdef CONFIG_RFS_ACCEL +#include <linux/cpu_rmap.h> +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL +#include <net/busy_poll.h> +#endif #include "cq_enet_desc.h" #include "vnic_dev.h" @@ -49,6 +55,7 @@ #include "enic.h" #include "enic_dev.h" #include "enic_pp.h" +#include "enic_clsf.h" #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) @@ -60,7 +67,7 @@ #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ /* Supported devices */ -static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = { +static const struct pci_device_id enic_id_table[] = { { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) }, { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) }, @@ -309,40 +316,15 @@ static irqreturn_t enic_isr_msi(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t enic_isr_msix_rq(int irq, void *data) +static irqreturn_t enic_isr_msix(int irq, void *data) { struct napi_struct *napi = data; - /* schedule NAPI polling for RQ cleanup */ napi_schedule(napi); return IRQ_HANDLED; } -static irqreturn_t enic_isr_msix_wq(int irq, void *data) -{ - struct enic *enic = data; - unsigned int cq; - unsigned int intr; - unsigned int wq_work_to_do = -1; /* no limit */ - unsigned int wq_work_done; - unsigned int wq_irq; - - wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector; - cq = enic_cq_wq(enic, wq_irq); - intr = enic_msix_wq_intr(enic, wq_irq); - - wq_work_done = vnic_cq_service(&enic->cq[cq], - wq_work_to_do, enic_wq_service, NULL); - - vnic_intr_return_credits(&enic->intr[intr], - wq_work_done, - 1 /* unmask intr */, - 1 /* reset intr timer */); - - return IRQ_HANDLED; -} - static irqreturn_t enic_isr_msix_err(int irq, void *data) { struct enic *enic = data; @@ -1049,10 +1031,12 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, if (vlan_stripped) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); - if (netdev->features & NETIF_F_GRO) - napi_gro_receive(&enic->napi[q_number], skb); - else + skb_mark_napi_id(skb, &enic->napi[rq->index]); + if (enic_poll_busy_polling(rq) || + !(netdev->features & NETIF_F_GRO)) netif_receive_skb(skb); + else + napi_gro_receive(&enic->napi[q_number], skb); if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) enic_intr_update_pkt_size(&cq->pkt_size_counter, bytes_written); @@ -1089,16 +1073,22 @@ static int enic_poll(struct napi_struct *napi, int budget) unsigned int work_done, rq_work_done = 0, wq_work_done; int err; - /* Service RQ (first) and WQ - */ + wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, + enic_wq_service, NULL); + + if (!enic_poll_lock_napi(&enic->rq[cq_rq])) { + if (wq_work_done > 0) + vnic_intr_return_credits(&enic->intr[intr], + wq_work_done, + 0 /* dont unmask intr */, + 0 /* dont reset intr timer */); + return rq_work_done; + } if (budget > 0) rq_work_done = vnic_cq_service(&enic->cq[cq_rq], rq_work_to_do, enic_rq_service, NULL); - wq_work_done = vnic_cq_service(&enic->cq[cq_wq], - wq_work_to_do, enic_wq_service, NULL); - /* Accumulate intr event credits for this polling * cycle. An intr event is the completion of a * a WQ or RQ packet. @@ -1130,6 +1120,7 @@ static int enic_poll(struct napi_struct *napi, int budget) napi_complete(napi); vnic_intr_unmask(&enic->intr[intr]); } + enic_poll_unlock_napi(&enic->rq[cq_rq]); return rq_work_done; } @@ -1192,7 +1183,102 @@ static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) pkt_size_counter->small_pkt_bytes_cnt = 0; } -static int enic_poll_msix(struct napi_struct *napi, int budget) +#ifdef CONFIG_RFS_ACCEL +static void enic_free_rx_cpu_rmap(struct enic *enic) +{ + free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap); + enic->netdev->rx_cpu_rmap = NULL; +} + +static void enic_set_rx_cpu_rmap(struct enic *enic) +{ + int i, res; + + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) { + enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count); + if (unlikely(!enic->netdev->rx_cpu_rmap)) + return; + for (i = 0; i < enic->rq_count; i++) { + res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap, + enic->msix_entry[i].vector); + if (unlikely(res)) { + enic_free_rx_cpu_rmap(enic); + return; + } + } + } +} + +#else + +static void enic_free_rx_cpu_rmap(struct enic *enic) +{ +} + +static void enic_set_rx_cpu_rmap(struct enic *enic) +{ +} + +#endif /* CONFIG_RFS_ACCEL */ + +#ifdef CONFIG_NET_RX_BUSY_POLL +int enic_busy_poll(struct napi_struct *napi) +{ + struct net_device *netdev = napi->dev; + struct enic *enic = netdev_priv(netdev); + unsigned int rq = (napi - &enic->napi[0]); + unsigned int cq = enic_cq_rq(enic, rq); + unsigned int intr = enic_msix_rq_intr(enic, rq); + unsigned int work_to_do = -1; /* clean all pkts possible */ + unsigned int work_done; + + if (!enic_poll_lock_poll(&enic->rq[rq])) + return LL_FLUSH_BUSY; + work_done = vnic_cq_service(&enic->cq[cq], work_to_do, + enic_rq_service, NULL); + + if (work_done > 0) + vnic_intr_return_credits(&enic->intr[intr], + work_done, 0, 0); + vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); + if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) + enic_calc_int_moderation(enic, &enic->rq[rq]); + enic_poll_unlock_poll(&enic->rq[rq]); + + return work_done; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +static int enic_poll_msix_wq(struct napi_struct *napi, int budget) +{ + struct net_device *netdev = napi->dev; + struct enic *enic = netdev_priv(netdev); + unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count; + struct vnic_wq *wq = &enic->wq[wq_index]; + unsigned int cq; + unsigned int intr; + unsigned int wq_work_to_do = -1; /* clean all desc possible */ + unsigned int wq_work_done; + unsigned int wq_irq; + + wq_irq = wq->index; + cq = enic_cq_wq(enic, wq_irq); + intr = enic_msix_wq_intr(enic, wq_irq); + wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do, + enic_wq_service, NULL); + + vnic_intr_return_credits(&enic->intr[intr], wq_work_done, + 0 /* don't unmask intr */, + 1 /* reset intr timer */); + if (!wq_work_done) { + napi_complete(napi); + vnic_intr_unmask(&enic->intr[intr]); + } + + return 0; +} + +static int enic_poll_msix_rq(struct napi_struct *napi, int budget) { struct net_device *netdev = napi->dev; struct enic *enic = netdev_priv(netdev); @@ -1203,6 +1289,8 @@ static int enic_poll_msix(struct napi_struct *napi, int budget) unsigned int work_done = 0; int err; + if (!enic_poll_lock_napi(&enic->rq[rq])) + return work_done; /* Service RQ */ @@ -1248,6 +1336,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget) enic_set_int_moderation(enic, &enic->rq[rq]); vnic_intr_unmask(&enic->intr[intr]); } + enic_poll_unlock_napi(&enic->rq[rq]); return work_done; } @@ -1267,6 +1356,7 @@ static void enic_free_intr(struct enic *enic) struct net_device *netdev = enic->netdev; unsigned int i; + enic_free_rx_cpu_rmap(enic); switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: free_irq(enic->pdev->irq, netdev); @@ -1291,6 +1381,7 @@ static int enic_request_intr(struct enic *enic) unsigned int i, intr; int err = 0; + enic_set_rx_cpu_rmap(enic); switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: @@ -1312,17 +1403,19 @@ static int enic_request_intr(struct enic *enic) snprintf(enic->msix[intr].devname, sizeof(enic->msix[intr].devname), "%.11s-rx-%d", netdev->name, i); - enic->msix[intr].isr = enic_isr_msix_rq; + enic->msix[intr].isr = enic_isr_msix; enic->msix[intr].devid = &enic->napi[i]; } for (i = 0; i < enic->wq_count; i++) { + int wq = enic_cq_wq(enic, i); + intr = enic_msix_wq_intr(enic, i); snprintf(enic->msix[intr].devname, sizeof(enic->msix[intr].devname), "%.11s-tx-%d", netdev->name, i); - enic->msix[intr].isr = enic_isr_msix_wq; - enic->msix[intr].devid = enic; + enic->msix[intr].isr = enic_isr_msix; + enic->msix[intr].devid = &enic->napi[wq]; } intr = enic_msix_err_intr(enic); @@ -1421,7 +1514,7 @@ static int enic_dev_notify_set(struct enic *enic) { int err; - spin_lock(&enic->devcmd_lock); + spin_lock_bh(&enic->devcmd_lock); switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: err = vnic_dev_notify_set(enic->vdev, @@ -1435,7 +1528,7 @@ static int enic_dev_notify_set(struct enic *enic) err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); break; } - spin_unlock(&enic->devcmd_lock); + spin_unlock_bh(&enic->devcmd_lock); return err; } @@ -1494,15 +1587,20 @@ static int enic_open(struct net_device *netdev) netif_tx_wake_all_queues(netdev); - for (i = 0; i < enic->rq_count; i++) + for (i = 0; i < enic->rq_count; i++) { + enic_busy_poll_init_lock(&enic->rq[i]); napi_enable(&enic->napi[i]); - + } + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) + for (i = 0; i < enic->wq_count; i++) + napi_enable(&enic->napi[enic_cq_wq(enic, i)]); enic_dev_enable(enic); for (i = 0; i < enic->intr_count; i++) vnic_intr_unmask(&enic->intr[i]); enic_notify_timer_start(enic); + enic_rfs_flw_tbl_init(enic); return 0; @@ -1529,14 +1627,23 @@ static int enic_stop(struct net_device *netdev) enic_synchronize_irqs(enic); del_timer_sync(&enic->notify_timer); + enic_rfs_flw_tbl_free(enic); enic_dev_disable(enic); - for (i = 0; i < enic->rq_count; i++) + local_bh_disable(); + for (i = 0; i < enic->rq_count; i++) { napi_disable(&enic->napi[i]); + while (!enic_poll_lock_napi(&enic->rq[i])) + mdelay(1); + } + local_bh_enable(); netif_carrier_off(netdev); netif_tx_disable(netdev); + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) + for (i = 0; i < enic->wq_count; i++) + napi_disable(&enic->napi[enic_cq_wq(enic, i)]); if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) enic_dev_del_station_addr(enic); @@ -1656,13 +1763,14 @@ static void enic_poll_controller(struct net_device *netdev) case VNIC_DEV_INTR_MODE_MSIX: for (i = 0; i < enic->rq_count; i++) { intr = enic_msix_rq_intr(enic, i); - enic_isr_msix_rq(enic->msix_entry[intr].vector, - &enic->napi[i]); + enic_isr_msix(enic->msix_entry[intr].vector, + &enic->napi[i]); } for (i = 0; i < enic->wq_count; i++) { intr = enic_msix_wq_intr(enic, i); - enic_isr_msix_wq(enic->msix_entry[intr].vector, enic); + enic_isr_msix(enic->msix_entry[intr].vector, + &enic->napi[enic_cq_wq(enic, i)]); } break; @@ -1758,11 +1866,11 @@ static int enic_set_rsskey(struct enic *enic) memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key)); - spin_lock(&enic->devcmd_lock); + spin_lock_bh(&enic->devcmd_lock); err = enic_set_rss_key(enic, rss_key_buf_pa, sizeof(union vnic_rss_key)); - spin_unlock(&enic->devcmd_lock); + spin_unlock_bh(&enic->devcmd_lock); pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key), rss_key_buf_va, rss_key_buf_pa); @@ -1785,11 +1893,11 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) for (i = 0; i < (1 << rss_hash_bits); i++) (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; - spin_lock(&enic->devcmd_lock); + spin_lock_bh(&enic->devcmd_lock); err = enic_set_rss_cpu(enic, rss_cpu_buf_pa, sizeof(union vnic_rss_cpu)); - spin_unlock(&enic->devcmd_lock); + spin_unlock_bh(&enic->devcmd_lock); pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), rss_cpu_buf_va, rss_cpu_buf_pa); @@ -1807,13 +1915,13 @@ static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, /* Enable VLAN tag stripping. */ - spin_lock(&enic->devcmd_lock); + spin_lock_bh(&enic->devcmd_lock); err = enic_set_nic_cfg(enic, rss_default_cpu, rss_hash_type, rss_hash_bits, rss_base_cpu, rss_enable, tso_ipid_split_en, ig_vlan_strip_en); - spin_unlock(&enic->devcmd_lock); + spin_unlock_bh(&enic->devcmd_lock); return err; } @@ -2021,6 +2129,12 @@ static const struct net_device_ops enic_netdev_dynamic_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = enic_poll_controller, #endif +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = enic_rx_flow_steer, +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = enic_busy_poll, +#endif }; static const struct net_device_ops enic_netdev_ops = { @@ -2041,14 +2155,25 @@ static const struct net_device_ops enic_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = enic_poll_controller, #endif +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = enic_rx_flow_steer, +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = enic_busy_poll, +#endif }; static void enic_dev_deinit(struct enic *enic) { unsigned int i; - for (i = 0; i < enic->rq_count; i++) + for (i = 0; i < enic->rq_count; i++) { + napi_hash_del(&enic->napi[i]); netif_napi_del(&enic->napi[i]); + } + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) + for (i = 0; i < enic->wq_count; i++) + netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]); enic_free_vnic_resources(enic); enic_clear_intr_mode(enic); @@ -2114,11 +2239,17 @@ static int enic_dev_init(struct enic *enic) switch (vnic_dev_get_intr_mode(enic->vdev)) { default: netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); + napi_hash_add(&enic->napi[0]); break; case VNIC_DEV_INTR_MODE_MSIX: - for (i = 0; i < enic->rq_count; i++) + for (i = 0; i < enic->rq_count; i++) { netif_napi_add(netdev, &enic->napi[i], - enic_poll_msix, 64); + enic_poll_msix_rq, NAPI_POLL_WEIGHT); + napi_hash_add(&enic->napi[i]); + } + for (i = 0; i < enic->wq_count; i++) + netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)], + enic_poll_msix_wq, NAPI_POLL_WEIGHT); break; } @@ -2386,6 +2517,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->features |= netdev->hw_features; +#ifdef CONFIG_RFS_ACCEL + netdev->hw_features |= NETIF_F_NTUPLE; +#endif + if (using_dac) netdev->features |= NETIF_F_HIGHDMA; |