diff options
Diffstat (limited to 'drivers/net/ethernet/chelsio')
-rw-r--r-- | drivers/net/ethernet/chelsio/Kconfig | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c | 55 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 74 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/sge.c | 223 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 50 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/t4_hw.h | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/t4_msg.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 25 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 119 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 10 |
13 files changed, 450 insertions, 138 deletions
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index c3ce9df0041a..ac6473f75eb9 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -68,7 +68,7 @@ config CHELSIO_T3 config CHELSIO_T4 tristate "Chelsio Communications T4/T5 Ethernet support" - depends on PCI + depends on PCI && (IPV6 || IPV6=n) select FW_LOADER select MDIO ---help--- diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index c067b7888ac4..3c481b260745 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -431,6 +431,7 @@ struct sge_fl { /* SGE free-buffer queue state */ struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ __be64 *desc; /* address of HW Rx descriptor ring */ dma_addr_t addr; /* bus address of HW ring start */ + u64 udb; /* BAR2 offset of User Doorbell area */ }; /* A packet gather list */ @@ -451,6 +452,7 @@ struct sge_rspq { /* state for an SGE response queue */ u8 gen; /* current generation bit */ u8 intr_params; /* interrupt holdoff parameters */ u8 next_intr_params; /* holdoff params for next interrupt */ + u8 adaptive_rx; u8 pktcnt_idx; /* interrupt packet threshold */ u8 uld; /* ULD handling this queue */ u8 idx; /* queue index within its group */ @@ -459,6 +461,7 @@ struct sge_rspq { /* state for an SGE response queue */ u16 abs_id; /* absolute SGE id for the response q */ __be64 *desc; /* address of HW response ring */ dma_addr_t phys_addr; /* physical address of the ring */ + u64 udb; /* BAR2 offset of User Doorbell area */ unsigned int iqe_len; /* entry size */ unsigned int size; /* capacity of response queue */ struct adapter *adap; @@ -516,7 +519,7 @@ struct sge_txq { int db_disabled; unsigned short db_pidx; unsigned short db_pidx_inc; - u64 udb; + u64 udb; /* BAR2 offset of User Doorbell area */ }; struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ @@ -965,7 +968,7 @@ void t4_intr_enable(struct adapter *adapter); void t4_intr_disable(struct adapter *adapter); int t4_slow_intr_handler(struct adapter *adapter); -int t4_wait_dev_ready(struct adapter *adap); +int t4_wait_dev_ready(void __iomem *regs); int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc); int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); @@ -983,6 +986,8 @@ static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, int t4_seeprom_wp(struct adapter *adapter, bool enable); int get_vpd_params(struct adapter *adapter, struct vpd_params *p); int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); +int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, + const u8 *fw_data, unsigned int size, int force); unsigned int t4_flash_cfg_addr(struct adapter *adapter); int t4_get_fw_version(struct adapter *adapter, u32 *vers); int t4_get_tp_version(struct adapter *adapter, u32 *vers); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 8edf0f5bd679..6fe300e316c3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -60,6 +60,42 @@ void cxgb4_dcb_version_init(struct net_device *dev) dcb->dcb_version = FW_PORT_DCB_VER_AUTO; } +static void cxgb4_dcb_cleanup_apps(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = pi->adapter; + struct port_dcb_info *dcb = &pi->dcb; + struct dcb_app app; + int i, err; + + /* zero priority implies remove */ + app.priority = 0; + + for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) { + /* Check if app list is exhausted */ + if (!dcb->app_priority[i].protocolid) + break; + + app.protocol = dcb->app_priority[i].protocolid; + + if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { + app.selector = dcb->app_priority[i].sel_field + 1; + err = dcb_ieee_setapp(dev, &app); + } else { + app.selector = !!(dcb->app_priority[i].sel_field); + err = dcb_setapp(dev, &app); + } + + if (err) { + dev_err(adap->pdev_dev, + "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n", + dcb_ver_array[dcb->dcb_version], app.selector, + app.protocol, -err); + break; + } + } +} + /* Finite State machine for Data Center Bridging. */ void cxgb4_dcb_state_fsm(struct net_device *dev, @@ -80,7 +116,6 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, /* we're going to use Host DCB */ dcb->state = CXGB4_DCB_STATE_HOST; dcb->supported = CXGB4_DCBX_HOST_SUPPORT; - dcb->enabled = 1; break; } @@ -145,6 +180,7 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, * state. We need to reset back to a ground state * of incomplete. */ + cxgb4_dcb_cleanup_apps(dev); cxgb4_dcb_state_init(dev); dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; dcb->supported = CXGB4_DCBX_FW_SUPPORT; @@ -349,6 +385,12 @@ static u8 cxgb4_setstate(struct net_device *dev, u8 enabled) { struct port_info *pi = netdev2pinfo(dev); + /* If DCBx is host-managed, dcb is enabled by outside lldp agents */ + if (pi->dcb.state == CXGB4_DCB_STATE_HOST) { + pi->dcb.enabled = enabled; + return 0; + } + /* Firmware doesn't provide any mechanism to control the DCB state. */ if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED)) @@ -833,11 +875,16 @@ static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, /* Return whether IEEE Data Center Bridging has been negotiated. */ -static inline int cxgb4_ieee_negotiation_complete(struct net_device *dev) +static inline int +cxgb4_ieee_negotiation_complete(struct net_device *dev, + enum cxgb4_dcb_fw_msgs dcb_subtype) { struct port_info *pi = netdev2pinfo(dev); struct port_dcb_info *dcb = &pi->dcb; + if (dcb_subtype && !(dcb->msgs & dcb_subtype)) + return 0; + return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED && (dcb->supported & DCB_CAP_DCBX_VER_IEEE)); } @@ -850,7 +897,7 @@ static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app) { int prio; - if (!cxgb4_ieee_negotiation_complete(dev)) + if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID)) return -EINVAL; if (!(app->selector && app->protocol)) return -EINVAL; @@ -872,7 +919,7 @@ static int cxgb4_ieee_setapp(struct net_device *dev, struct dcb_app *app) { int ret; - if (!cxgb4_ieee_negotiation_complete(dev)) + if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID)) return -EINVAL; if (!(app->selector && app->protocol)) return -EINVAL; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index e5be511a3c38..8520d5529df8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -283,6 +283,9 @@ static const struct pci_device_id cxgb4_pci_tbl[] = { CH_DEVICE(0x5083, 4), CH_DEVICE(0x5084, 4), CH_DEVICE(0x5085, 4), + CH_DEVICE(0x5086, 4), + CH_DEVICE(0x5087, 4), + CH_DEVICE(0x5088, 4), CH_DEVICE(0x5401, 4), CH_DEVICE(0x5402, 4), CH_DEVICE(0x5403, 4), @@ -310,6 +313,9 @@ static const struct pci_device_id cxgb4_pci_tbl[] = { CH_DEVICE(0x5483, 4), CH_DEVICE(0x5484, 4), CH_DEVICE(0x5485, 4), + CH_DEVICE(0x5486, 4), + CH_DEVICE(0x5487, 4), + CH_DEVICE(0x5488, 4), { 0, } }; @@ -688,7 +694,11 @@ int cxgb4_dcb_enabled(const struct net_device *dev) #ifdef CONFIG_CHELSIO_T4_DCB struct port_info *pi = netdev_priv(dev); - return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED; + if (!pi->dcb.enabled) + return 0; + + return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || + (pi->dcb.state == CXGB4_DCB_STATE_HOST)); #else return 0; #endif @@ -2747,8 +2757,31 @@ static int set_rx_intr_params(struct net_device *dev, return 0; } +static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx) +{ + int i; + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; + + for (i = 0; i < pi->nqsets; i++, q++) + q->rspq.adaptive_rx = adaptive_rx; + + return 0; +} + +static int get_adaptive_rx_setting(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; + + return q->rspq.adaptive_rx; +} + static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { + set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce); return set_rx_intr_params(dev, c->rx_coalesce_usecs, c->rx_max_coalesced_frames); } @@ -2762,6 +2795,7 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) c->rx_coalesce_usecs = qtimer_val(adap, rq); c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ? adap->sge.counter_val[rq->pktcnt_idx] : 0; + c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev); return 0; } @@ -2899,16 +2933,26 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) int ret; const struct firmware *fw; struct adapter *adap = netdev2adap(netdev); + unsigned int mbox = FW_PCIE_FW_MASTER_MASK + 1; ef->data[sizeof(ef->data) - 1] = '\0'; ret = request_firmware(&fw, ef->data, adap->pdev_dev); if (ret < 0) return ret; - ret = t4_load_fw(adap, fw->data, fw->size); + /* If the adapter has been fully initialized then we'll go ahead and + * try to get the firmware's cooperation in upgrading to the new + * firmware image otherwise we'll try to do the entire job from the + * host ... and we always "force" the operation in this path. + */ + if (adap->flags & FULL_INIT_DONE) + mbox = adap->mbox; + + ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1); release_firmware(fw); if (!ret) - dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data); + dev_info(adap->pdev_dev, "loaded firmware %s," + " reload cxgb4 driver\n", ef->data); return ret; } @@ -4329,6 +4373,7 @@ EXPORT_SYMBOL(cxgb4_unregister_uld); * success (true) if it belongs otherwise failure (false). * Called with rcu_read_lock() held. */ +#if IS_ENABLED(CONFIG_IPV6) static bool cxgb4_netdev(const struct net_device *netdev) { struct adapter *adap; @@ -4390,7 +4435,6 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this, * bond. We need to find such different adapters and add clip * in all of them only once. */ - read_lock(&bond->lock); bond_for_each_slave(bond, slave, iter) { if (!first_pdev) { ret = clip_add(slave->dev, ifa, event); @@ -4404,7 +4448,6 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this, to_pci_dev(slave->dev->dev.parent)) ret = clip_add(slave->dev, ifa, event); } - read_unlock(&bond->lock); } else ret = clip_add(ifa->idev->dev, ifa, event); @@ -4452,6 +4495,13 @@ static int update_root_dev_clip(struct net_device *dev) return ret; /* Parse all bond and vlan devices layered on top of the physical dev */ + root_dev = netdev_master_upper_dev_get_rcu(dev); + if (root_dev) { + ret = update_dev_clip(root_dev, dev); + if (ret) + return ret; + } + for (i = 0; i < VLAN_N_VID; i++) { root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i); if (!root_dev) @@ -4484,6 +4534,7 @@ static void update_clip(const struct adapter *adap) } rcu_read_unlock(); } +#endif /* IS_ENABLED(CONFIG_IPV6) */ /** * cxgb_up - enable the adapter @@ -4530,7 +4581,9 @@ static int cxgb_up(struct adapter *adap) t4_intr_enable(adap); adap->flags |= FULL_INIT_DONE; notify_ulds(adap, CXGB4_STATE_UP); +#if IS_ENABLED(CONFIG_IPV6) update_clip(adap); +#endif out: return err; irq_err: @@ -6109,7 +6162,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) pci_save_state(pdev); pci_cleanup_aer_uncorrect_error_status(pdev); - if (t4_wait_dev_ready(adap) < 0) + if (t4_wait_dev_ready(adap->regs) < 0) return PCI_ERS_RESULT_DISCONNECT; if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0) return PCI_ERS_RESULT_DISCONNECT; @@ -6502,6 +6555,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_disable_device; } + err = t4_wait_dev_ready(regs); + if (err < 0) + goto out_unmap_bar0; + /* We control everything through one PF */ func = SOURCEPF_GET(readl(regs + PL_WHOAMI)); if (func != ent->driver_data) { @@ -6557,6 +6614,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&adapter->stats_lock); spin_lock_init(&adapter->tid_release_lock); + spin_lock_init(&adapter->win0_lock); INIT_WORK(&adapter->tid_release_task, process_tid_release_list); INIT_WORK(&adapter->db_full_task, process_db_full); @@ -6820,14 +6878,18 @@ static int __init cxgb4_init_module(void) if (ret < 0) debugfs_remove(cxgb4_debugfs_root); +#if IS_ENABLED(CONFIG_IPV6) register_inet6addr_notifier(&cxgb4_inet6addr_notifier); +#endif return ret; } static void __exit cxgb4_cleanup_module(void) { +#if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier); +#endif pci_unregister_driver(&cxgb4_driver); debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index d22d728d4e5c..5e1b314e11af 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -203,6 +203,9 @@ enum { RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */ }; +static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5}; +#define MIN_NAPI_WORK 1 + static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) { return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS; @@ -521,9 +524,23 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) val = PIDX(q->pend_cred / 8); if (!is_t4(adap->params.chip)) val |= DBTYPE(1); + val |= DBPRIO(1); wmb(); - t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) | - QID(q->cntxt_id) | val); + + /* If we're on T4, use the old doorbell mechanism; otherwise + * use the new BAR2 mechanism. + */ + if (is_t4(adap->params.chip)) { + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + val | QID(q->cntxt_id)); + } else { + writel(val, adap->bar2 + q->udb + SGE_UDB_KDOORBELL); + + /* This Write memory Barrier will force the write to + * the User Doorbell area to be flushed. + */ + wmb(); + } q->pend_cred &= 7; } } @@ -833,13 +850,14 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, *end = 0; } -/* This function copies 64 byte coalesced work request to - * memory mapped BAR2 space(user space writes). - * For coalesced WR SGE, fetches data from the FIFO instead of from Host. +/* This function copies a tx_desc struct to memory mapped BAR2 space(user space + * writes). For coalesced WR SGE, fetches data from the FIFO instead of from + * Host. */ -static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) +static void cxgb_pio_copy(u64 __iomem *dst, struct tx_desc *desc) { - int count = 8; + int count = sizeof(*desc) / sizeof(u64); + u64 *src = (u64 *)desc; while (count) { writeq(*src, dst); @@ -859,30 +877,63 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) */ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) { - unsigned int *wr, index; - unsigned long flags; - wmb(); /* write descriptors before telling HW */ - spin_lock_irqsave(&q->db_lock, flags); - if (!q->db_disabled) { - if (is_t4(adap->params.chip)) { + + if (is_t4(adap->params.chip)) { + u32 val = PIDX(n); + unsigned long flags; + + /* For T4 we need to participate in the Doorbell Recovery + * mechanism. + */ + spin_lock_irqsave(&q->db_lock, flags); + if (!q->db_disabled) t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), - QID(q->cntxt_id) | PIDX(n)); + QID(q->cntxt_id) | val); + else + q->db_pidx_inc += n; + q->db_pidx = q->pidx; + spin_unlock_irqrestore(&q->db_lock, flags); + } else { + u32 val = PIDX_T5(n); + + /* T4 and later chips share the same PIDX field offset within + * the doorbell, but T5 and later shrank the field in order to + * gain a bit for Doorbell Priority. The field was absurdly + * large in the first place (14 bits) so we just use the T5 + * and later limits and warn if a Queue ID is too large. + */ + WARN_ON(val & DBPRIO(1)); + + /* For T5 and later we use the Write-Combine mapped BAR2 User + * Doorbell mechanism. If we're only writing a single TX + * Descriptor and TX Write Combining hasn't been disabled, we + * can use the Write Combining Gather Buffer; otherwise we use + * the simple doorbell. + */ + if (n == 1) { + int index = (q->pidx + ? (q->pidx - 1) + : (q->size - 1)); + + cxgb_pio_copy(adap->bar2 + q->udb + SGE_UDB_WCDOORBELL, + q->desc + index); } else { - if (n == 1) { - index = q->pidx ? (q->pidx - 1) : (q->size - 1); - wr = (unsigned int *)&q->desc[index]; - cxgb_pio_copy((u64 __iomem *) - (adap->bar2 + q->udb + 64), - (u64 *)wr); - } else - writel(n, adap->bar2 + q->udb + 8); - wmb(); + writel(val, adap->bar2 + q->udb + SGE_UDB_KDOORBELL); } - } else - q->db_pidx_inc += n; - q->db_pidx = q->pidx; - spin_unlock_irqrestore(&q->db_lock, flags); + + /* This Write Memory Barrier will force the write to the User + * Doorbell area to be flushed. This is needed to prevent + * writes on different CPUs for the same queue from hitting + * the adapter out of order. This is required when some Work + * Requests take the Write Combine Gather Buffer path (user + * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some + * take the traditional path where we simply increment the + * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the + * hardware DMA read the actual Work Request. + */ + wmb(); + } } /** @@ -1072,7 +1123,10 @@ out_free: dev_kfree_skb_any(skb); lso->c.ipid_ofst = htons(0); lso->c.mss = htons(ssi->gso_size); lso->c.seqno_offset = htonl(0); - lso->c.len = htonl(skb->len); + if (is_t4(adap->params.chip)) + lso->c.len = htonl(skb->len); + else + lso->c.len = htonl(LSO_T5_XFER_SIZE(skb->len)); cpl = (void *)(lso + 1); cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | TXPKT_IPHDR_LEN(l3hdr_len) | @@ -1916,16 +1970,40 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) unsigned int params; struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); int work_done = process_responses(q, budget); + u32 val; if (likely(work_done < budget)) { + int timer_index; + napi_complete(napi); - params = q->next_intr_params; - q->next_intr_params = q->intr_params; + timer_index = QINTR_TIMER_IDX_GET(q->next_intr_params); + + if (q->adaptive_rx) { + if (work_done > max(timer_pkt_quota[timer_index], + MIN_NAPI_WORK)) + timer_index = (timer_index + 1); + else + timer_index = timer_index - 1; + + timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1); + q->next_intr_params = QINTR_TIMER_IDX(timer_index) | + V_QINTR_CNT_EN; + params = q->next_intr_params; + } else { + params = q->next_intr_params; + q->next_intr_params = q->intr_params; + } } else params = QINTR_TIMER_IDX(7); - t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) | - INGRESSQID((u32)q->cntxt_id) | SEINTARM(params)); + val = CIDXINC(work_done) | SEINTARM(params); + if (is_t4(q->adap->params.chip)) { + t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), + val | INGRESSQID((u32)q->cntxt_id)); + } else { + writel(val, q->adap->bar2 + q->udb + SGE_UDB_GTS); + wmb(); + } return work_done; } @@ -1949,6 +2027,7 @@ static unsigned int process_intrq(struct adapter *adap) unsigned int credits; const struct rsp_ctrl *rc; struct sge_rspq *q = &adap->sge.intrq; + u32 val; spin_lock(&adap->sge.intrq_lock); for (credits = 0; ; credits++) { @@ -1967,8 +2046,14 @@ static unsigned int process_intrq(struct adapter *adap) rspq_next(q); } - t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) | - INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params)); + val = CIDXINC(credits) | SEINTARM(q->intr_params); + if (is_t4(adap->params.chip)) { + t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), + val | INGRESSQID(q->cntxt_id)); + } else { + writel(val, adap->bar2 + q->udb + SGE_UDB_GTS); + wmb(); + } spin_unlock(&adap->sge.intrq_lock); return credits; } @@ -2149,6 +2234,51 @@ static void sge_tx_timer_cb(unsigned long data) mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); } +/** + * udb_address - return the BAR2 User Doorbell address for a Queue + * @adap: the adapter + * @cntxt_id: the Queue Context ID + * @qpp: Queues Per Page (for all PFs) + * + * Returns the BAR2 address of the user Doorbell associated with the + * indicated Queue Context ID. Note that this is only applicable + * for T5 and later. + */ +static u64 udb_address(struct adapter *adap, unsigned int cntxt_id, + unsigned int qpp) +{ + u64 udb; + unsigned int s_qpp; + unsigned short udb_density; + unsigned long qpshift; + int page; + + BUG_ON(is_t4(adap->params.chip)); + + s_qpp = (QUEUESPERPAGEPF0 + + (QUEUESPERPAGEPF1 - QUEUESPERPAGEPF0) * adap->fn); + udb_density = 1 << ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK); + qpshift = PAGE_SHIFT - ilog2(udb_density); + udb = (u64)cntxt_id << qpshift; + udb &= PAGE_MASK; + page = udb / PAGE_SIZE; + udb += (cntxt_id - (page * udb_density)) * SGE_UDB_SIZE; + + return udb; +} + +static u64 udb_address_eq(struct adapter *adap, unsigned int cntxt_id) +{ + return udb_address(adap, cntxt_id, + t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF)); +} + +static u64 udb_address_iq(struct adapter *adap, unsigned int cntxt_id) +{ + return udb_address(adap, cntxt_id, + t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF)); +} + int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, struct net_device *dev, int intr_idx, struct sge_fl *fl, rspq_handler_t hnd) @@ -2214,6 +2344,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, iq->next_intr_params = iq->intr_params; iq->cntxt_id = ntohs(c.iqid); iq->abs_id = ntohs(c.physiqid); + if (!is_t4(adap->params.chip)) + iq->udb = udb_address_iq(adap, iq->cntxt_id); iq->size--; /* subtract status entry */ iq->netdev = dev; iq->handler = hnd; @@ -2229,6 +2361,12 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, fl->pidx = fl->cidx = 0; fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; + + /* Note, we must initialize the Free List User Doorbell + * address before refilling the Free List! + */ + if (!is_t4(adap->params.chip)) + fl->udb = udb_address_eq(adap, fl->cntxt_id); refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); } return 0; @@ -2254,21 +2392,8 @@ err: static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) { q->cntxt_id = id; - if (!is_t4(adap->params.chip)) { - unsigned int s_qpp; - unsigned short udb_density; - unsigned long qpshift; - int page; - - s_qpp = QUEUESPERPAGEPF1 * adap->fn; - udb_density = 1 << QUEUESPERPAGEPF0_GET((t4_read_reg(adap, - SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp)); - qpshift = PAGE_SHIFT - ilog2(udb_density); - q->udb = q->cntxt_id << qpshift; - q->udb &= PAGE_MASK; - page = q->udb / PAGE_SIZE; - q->udb += (q->cntxt_id - (page * udb_density)) * 128; - } + if (!is_t4(adap->params.chip)) + q->udb = udb_address_eq(adap, q->cntxt_id); q->in_use = 0; q->cidx = q->pidx = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 41d04462b72e..a9d9d74e4f09 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -37,8 +37,6 @@ #include "t4_regs.h" #include "t4fw_api.h" -static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, - const u8 *fw_data, unsigned int size, int force); /** * t4_wait_op_done_val - wait until an operation is completed * @adapter: the adapter performing the operation @@ -1099,6 +1097,9 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) { int ret = 0; + if (end >= adapter->params.sf_nsec) + return -EINVAL; + while (start <= end) { if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || (ret = sf1_write(adapter, 4, 0, 1, @@ -3073,8 +3074,8 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) * positive errno indicates that the adapter is ~probably~ intact, a * negative errno indicates that things are looking bad ... */ -static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, - const u8 *fw_data, unsigned int size, int force) +int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, + const u8 *fw_data, unsigned int size, int force) { const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; int reset, ret; @@ -3842,16 +3843,35 @@ static void init_link_config(struct link_config *lc, unsigned int caps) } } -int t4_wait_dev_ready(struct adapter *adap) +#define CIM_PF_NOACCESS 0xeeeeeeee + +int t4_wait_dev_ready(void __iomem *regs) { - if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff) + u32 whoami; + + whoami = readl(regs + PL_WHOAMI); + if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS) return 0; + msleep(500); - return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; + whoami = readl(regs + PL_WHOAMI); + return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO); } +struct flash_desc { + u32 vendor_and_model_id; + u32 size_mb; +}; + static int get_flash_params(struct adapter *adap) { + /* Table for non-Numonix supported flash parts. Numonix parts are left + * to the preexisting code. All flash parts have 64KB sectors. + */ + static struct flash_desc supported_flash[] = { + { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ + }; + int ret; u32 info; @@ -3862,6 +3882,14 @@ static int get_flash_params(struct adapter *adap) if (ret) return ret; + for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret) + if (supported_flash[ret].vendor_and_model_id == info) { + adap->params.sf_size = supported_flash[ret].size_mb; + adap->params.sf_nsec = + adap->params.sf_size / SF_SEC_SIZE; + return 0; + } + if ((info & 0xff) != 0x20) /* not a Numonix flash */ return -EINVAL; info >>= 16; /* log2 of size */ @@ -3874,6 +3902,10 @@ static int get_flash_params(struct adapter *adap) adap->params.sf_size = 1 << info; adap->params.sf_fw_start = t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK; + + if (adap->params.sf_size < FLASH_MIN_SIZE) + dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n", + adap->params.sf_size, FLASH_MIN_SIZE); return 0; } @@ -3892,10 +3924,6 @@ int t4_prep_adapter(struct adapter *adapter) uint16_t device_id; u32 pl_rev; - ret = t4_wait_dev_ready(adapter); - if (ret < 0) - return ret; - get_pci_mode(adapter, &adapter->params.pci); pl_rev = G_REV(t4_read_reg(adapter, PL_REV)); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index 35e3d8e32881..c19a90e7f7d1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -135,6 +135,7 @@ struct rsp_ctrl { #define RSPD_GEN(x) ((x) >> 7) #define RSPD_TYPE(x) (((x) >> 4) & 3) +#define V_QINTR_CNT_EN 0x0 #define QINTR_CNT_EN 0x1 #define QINTR_TIMER_IDX(x) ((x) << 1) #define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7) @@ -175,7 +176,7 @@ enum { * Location of firmware image in FLASH. */ FLASH_FW_START_SEC = 8, - FLASH_FW_NSECS = 8, + FLASH_FW_NSECS = 16, FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC), FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS), @@ -206,6 +207,12 @@ enum { FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC), FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS), + /* We don't support FLASH devices which can't support the full + * standard set of sections which we need for normal + * operations. + */ + FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE, + FLASH_FPGA_CFG_START_SEC = 15, FLASH_FPGA_CFG_START = FLASH_START(FLASH_FPGA_CFG_START_SEC), diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 52e08103f221..5f4db2398c71 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -527,6 +527,7 @@ struct cpl_tx_pkt_lso_core { #define LSO_LAST_SLICE (1 << 22) #define LSO_FIRST_SLICE (1 << 23) #define LSO_OPCODE(x) ((x) << 24) +#define LSO_T5_XFER_SIZE(x) ((x) << 0) __be16 ipid_ofst; __be16 mss; __be32 seqno_offset; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 39fb325474f7..a1024db5dc13 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -72,11 +72,11 @@ #define PIDX_MASK 0x00003fffU #define PIDX_SHIFT 0 #define PIDX(x) ((x) << PIDX_SHIFT) -#define S_PIDX_T5 0 -#define M_PIDX_T5 0x1fffU -#define PIDX_T5(x) (((x) >> S_PIDX_T5) & M_PIDX_T5) +#define PIDX_SHIFT_T5 0 +#define PIDX_T5(x) ((x) << PIDX_SHIFT_T5) +#define SGE_TIMERREGS 6 #define SGE_PF_GTS 0x4 #define INGRESSQID_MASK 0xffff0000U #define INGRESSQID_SHIFT 16 @@ -157,8 +157,27 @@ #define QUEUESPERPAGEPF0_MASK 0x0000000fU #define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) +#define QUEUESPERPAGEPF0 0 #define QUEUESPERPAGEPF1 4 +/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues. + * The User Doorbells are each 128 bytes in length with a Simple Doorbell at + * offsets 8x and a Write Combining single 64-byte Egress Queue Unit + * (X_IDXSIZE_UNIT) Gather Buffer interface at offset 64. For Ingress Queues, + * we have a Going To Sleep register at offsets 8x+4. + * + * As noted above, we have many instances of the Simple Doorbell and Going To + * Sleep registers at offsets 8x and 8x+4, respectively. We want to use a + * non-64-byte aligned offset for the Simple Doorbell in order to attempt to + * avoid buffering of the writes to the Simple Doorbell and we want to use a + * non-contiguous offset for the Going To Sleep writes in order to avoid + * possible combining between them. + */ +#define SGE_UDB_SIZE 128 +#define SGE_UDB_KDOORBELL 8 +#define SGE_UDB_GTS 20 +#define SGE_UDB_WCDOORBELL 64 + #define SGE_INT_CAUSE1 0x1024 #define SGE_INT_CAUSE2 0x1030 #define SGE_INT_CAUSE3 0x103c diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 2102a4c91737..0b42bddaf284 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -163,15 +163,19 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) netif_carrier_on(dev); switch (pi->link_cfg.speed) { - case SPEED_10000: + case 40000: + s = "40Gbps"; + break; + + case 10000: s = "10Gbps"; break; - case SPEED_1000: + case 1000: s = "1000Mbps"; break; - case SPEED_100: + case 100: s = "100Mbps"; break; @@ -2351,7 +2355,7 @@ static void cfg_queues(struct adapter *adapter) struct port_info *pi = adap2pinfo(adapter, pidx); pi->first_qset = qidx; - pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1; + pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1; qidx += pi->nqsets; } s->ethqsets = qidx; @@ -2907,61 +2911,62 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev) /* * PCI Device registration data structures. */ -#define CH_DEVICE(devid, idx) \ - { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx } +#define CH_DEVICE(devid) \ + { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 } static const struct pci_device_id cxgb4vf_pci_tbl[] = { - CH_DEVICE(0xb000, 0), /* PE10K FPGA */ - CH_DEVICE(0x4800, 0), /* T440-dbg */ - CH_DEVICE(0x4801, 0), /* T420-cr */ - CH_DEVICE(0x4802, 0), /* T422-cr */ - CH_DEVICE(0x4803, 0), /* T440-cr */ - CH_DEVICE(0x4804, 0), /* T420-bch */ - CH_DEVICE(0x4805, 0), /* T440-bch */ - CH_DEVICE(0x4806, 0), /* T460-ch */ - CH_DEVICE(0x4807, 0), /* T420-so */ - CH_DEVICE(0x4808, 0), /* T420-cx */ - CH_DEVICE(0x4809, 0), /* T420-bt */ - CH_DEVICE(0x480a, 0), /* T404-bt */ - CH_DEVICE(0x480d, 0), /* T480-cr */ - CH_DEVICE(0x480e, 0), /* T440-lp-cr */ - CH_DEVICE(0x4880, 0), - CH_DEVICE(0x4880, 1), - CH_DEVICE(0x4880, 2), - CH_DEVICE(0x4880, 3), - CH_DEVICE(0x4880, 4), - CH_DEVICE(0x4880, 5), - CH_DEVICE(0x4880, 6), - CH_DEVICE(0x4880, 7), - CH_DEVICE(0x4880, 8), - CH_DEVICE(0x5800, 0), /* T580-dbg */ - CH_DEVICE(0x5801, 0), /* T520-cr */ - CH_DEVICE(0x5802, 0), /* T522-cr */ - CH_DEVICE(0x5803, 0), /* T540-cr */ - CH_DEVICE(0x5804, 0), /* T520-bch */ - CH_DEVICE(0x5805, 0), /* T540-bch */ - CH_DEVICE(0x5806, 0), /* T540-ch */ - CH_DEVICE(0x5807, 0), /* T520-so */ - CH_DEVICE(0x5808, 0), /* T520-cx */ - CH_DEVICE(0x5809, 0), /* T520-bt */ - CH_DEVICE(0x580a, 0), /* T504-bt */ - CH_DEVICE(0x580b, 0), /* T520-sr */ - CH_DEVICE(0x580c, 0), /* T504-bt */ - CH_DEVICE(0x580d, 0), /* T580-cr */ - CH_DEVICE(0x580e, 0), /* T540-lp-cr */ - CH_DEVICE(0x580f, 0), /* Amsterdam */ - CH_DEVICE(0x5810, 0), /* T580-lp-cr */ - CH_DEVICE(0x5811, 0), /* T520-lp-cr */ - CH_DEVICE(0x5812, 0), /* T560-cr */ - CH_DEVICE(0x5813, 0), /* T580-cr */ - CH_DEVICE(0x5814, 0), /* T580-so-cr */ - CH_DEVICE(0x5815, 0), /* T502-bt */ - CH_DEVICE(0x5880, 0), - CH_DEVICE(0x5881, 0), - CH_DEVICE(0x5882, 0), - CH_DEVICE(0x5883, 0), - CH_DEVICE(0x5884, 0), - CH_DEVICE(0x5885, 0), + CH_DEVICE(0xb000), /* PE10K FPGA */ + CH_DEVICE(0x4801), /* T420-cr */ + CH_DEVICE(0x4802), /* T422-cr */ + CH_DEVICE(0x4803), /* T440-cr */ + CH_DEVICE(0x4804), /* T420-bch */ + CH_DEVICE(0x4805), /* T440-bch */ + CH_DEVICE(0x4806), /* T460-ch */ + CH_DEVICE(0x4807), /* T420-so */ + CH_DEVICE(0x4808), /* T420-cx */ + CH_DEVICE(0x4809), /* T420-bt */ + CH_DEVICE(0x480a), /* T404-bt */ + CH_DEVICE(0x480d), /* T480-cr */ + CH_DEVICE(0x480e), /* T440-lp-cr */ + CH_DEVICE(0x4880), + CH_DEVICE(0x4881), + CH_DEVICE(0x4882), + CH_DEVICE(0x4883), + CH_DEVICE(0x4884), + CH_DEVICE(0x4885), + CH_DEVICE(0x4886), + CH_DEVICE(0x4887), + CH_DEVICE(0x4888), + CH_DEVICE(0x5801), /* T520-cr */ + CH_DEVICE(0x5802), /* T522-cr */ + CH_DEVICE(0x5803), /* T540-cr */ + CH_DEVICE(0x5804), /* T520-bch */ + CH_DEVICE(0x5805), /* T540-bch */ + CH_DEVICE(0x5806), /* T540-ch */ + CH_DEVICE(0x5807), /* T520-so */ + CH_DEVICE(0x5808), /* T520-cx */ + CH_DEVICE(0x5809), /* T520-bt */ + CH_DEVICE(0x580a), /* T504-bt */ + CH_DEVICE(0x580b), /* T520-sr */ + CH_DEVICE(0x580c), /* T504-bt */ + CH_DEVICE(0x580d), /* T580-cr */ + CH_DEVICE(0x580e), /* T540-lp-cr */ + CH_DEVICE(0x580f), /* Amsterdam */ + CH_DEVICE(0x5810), /* T580-lp-cr */ + CH_DEVICE(0x5811), /* T520-lp-cr */ + CH_DEVICE(0x5812), /* T560-cr */ + CH_DEVICE(0x5813), /* T580-cr */ + CH_DEVICE(0x5814), /* T580-so-cr */ + CH_DEVICE(0x5815), /* T502-bt */ + CH_DEVICE(0x5880), + CH_DEVICE(0x5881), + CH_DEVICE(0x5882), + CH_DEVICE(0x5883), + CH_DEVICE(0x5884), + CH_DEVICE(0x5885), + CH_DEVICE(0x5886), + CH_DEVICE(0x5887), + CH_DEVICE(0x5888), { 0, } }; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index a5fb9493dee8..85036e6b42c4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1208,7 +1208,10 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) lso->ipid_ofst = cpu_to_be16(0); lso->mss = cpu_to_be16(ssi->gso_size); lso->seqno_offset = cpu_to_be32(0); - lso->len = cpu_to_be32(skb->len); + if (is_t4(adapter->params.chip)) + lso->len = cpu_to_be32(skb->len); + else + lso->len = cpu_to_be32(LSO_T5_XFER_SIZE(skb->len)); /* * Set up TX Packet CPL pointer, control word and perform diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index f412d0fa0850..95df61dcb4ce 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -228,6 +228,12 @@ static inline bool is_10g_port(const struct link_config *lc) return (lc->supported & SUPPORTED_10000baseT_Full) != 0; } +static inline bool is_x_10g_port(const struct link_config *lc) +{ + return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 || + (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; +} + static inline unsigned int core_ticks_per_usec(const struct adapter *adapter) { return adapter->params.vpd.cclk / 1000; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 25dfeb8f28ed..e984fdc48ba2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -327,6 +327,8 @@ int t4vf_port_init(struct adapter *adapter, int pidx) v |= SUPPORTED_1000baseT_Full; if (word & FW_PORT_CAP_SPEED_10G) v |= SUPPORTED_10000baseT_Full; + if (word & FW_PORT_CAP_SPEED_40G) + v |= SUPPORTED_40000baseSR4_Full; if (word & FW_PORT_CAP_ANEG) v |= SUPPORTED_Autoneg; init_link_config(&pi->link_cfg, v); @@ -1352,11 +1354,13 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) if (word & FW_PORT_CMD_TXPAUSE) fc |= PAUSE_TX; if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) - speed = SPEED_100; + speed = 100; else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) - speed = SPEED_1000; + speed = 1000; else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) - speed = SPEED_10000; + speed = 10000; + else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) + speed = 40000; /* * Scan all of our "ports" (Virtual Interfaces) looking for |