diff options
Diffstat (limited to 'drivers/net/ethernet/chelsio')
23 files changed, 1504 insertions, 133 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index d56142b98534..0f13a7f7c1d3 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -1801,7 +1801,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) eth_type = skb_network_offset(skb) == ETH_HLEN ? CPL_ETH_II : CPL_ETH_II_VLAN; - hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); + hdr = skb_push(skb, sizeof(*hdr)); hdr->opcode = CPL_TX_PKT_LSO; hdr->ip_csum_dis = hdr->l4_csum_dis = 0; hdr->ip_hdr_words = ip_hdr(skb)->ihl; @@ -1849,7 +1849,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) } } - cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl)); + cpl = __skb_push(skb, sizeof(*cpl)); cpl->opcode = CPL_TX_PKT; cpl->ip_csum_dis = 1; /* SW calculates IP csum */ cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1; diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 2ff6bd139c96..0bc6a4ffce30 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -471,8 +471,7 @@ static int init_tp_parity(struct adapter *adap) if (!skb) goto alloc_skb_fail; - req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); + req = __skb_put_zero(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i)); req->mtu_idx = NMTUS - 1; @@ -495,8 +494,7 @@ static int init_tp_parity(struct adapter *adap) if (!skb) goto alloc_skb_fail; - req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); + req = __skb_put_zero(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i)); req->params = htonl(V_L2T_W_IDX(i)); @@ -518,8 +516,7 @@ static int init_tp_parity(struct adapter *adap) if (!skb) goto alloc_skb_fail; - req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); + req = __skb_put_zero(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i)); req->l2t_idx = htonl(V_L2T_W_IDX(i)); @@ -538,8 +535,7 @@ static int init_tp_parity(struct adapter *adap) if (!skb) goto alloc_skb_fail; - greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq)); - memset(greq, 0, sizeof(*greq)); + greq = __skb_put_zero(skb, sizeof(*greq)); greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0)); greq->mask = cpu_to_be64(1); @@ -909,7 +905,7 @@ static int write_smt_entry(struct adapter *adapter, int idx) if (!skb) return -ENOMEM; - req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req)); + req = __skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx)); req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */ @@ -952,7 +948,7 @@ static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, if (!skb) return -ENOMEM; - req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT)); req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET; req->sched = sched; diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c index fa81445e334c..50cd660732c5 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c @@ -552,7 +552,7 @@ static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid) struct cpl_tid_release *req; skb->priority = CPL_PRIORITY_SETUP; - req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); + req = __skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); } @@ -1096,7 +1096,7 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e) return; } skb->priority = CPL_PRIORITY_CONTROL; - req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); + req = skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); req->reply = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c index 26264125865f..248e40c6966c 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c @@ -96,7 +96,7 @@ static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb, return -ENOMEM; } - req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); + req = __skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx)); req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) | diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 1b9d154f1149..e2d342647b19 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -2282,7 +2282,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs, if (!skb) goto no_mem; - memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE); + __skb_put_data(skb, r, AN_PKT_SIZE); skb->data[0] = CPL_ASYNC_NOTIF; rss_hi = htonl(CPL_ASYNC_NOTIF << 24); q->async_notif++; diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index c6b71f656992..817212702f0a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o -cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o +cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o cxgb4_ptp.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index e88c1808e46f..ef4be781fd05 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -48,6 +48,8 @@ #include <linux/vmalloc.h> #include <linux/etherdevice.h> #include <linux/net_tstamp.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/ptp_classify.h> #include <asm/io.h> #include "t4_chip_type.h" #include "cxgb4_uld.h" @@ -362,6 +364,11 @@ struct adapter_params { unsigned int max_ordird_qp; /* Max read depth per RDMA QP */ unsigned int max_ird_adapter; /* Max read depth per adapter */ bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */ + + /* MPS Buffer Group Map[per Port]. Bit i is set if buffer group i is + * used by the Port + */ + u8 mps_bg_map[MAX_NPORTS]; /* MPS Buffer Group Map */ }; /* State needed to monitor the forward progress of SGE Ingress DMA activities @@ -505,6 +512,7 @@ struct port_info { #endif /* CONFIG_CHELSIO_T4_FCOE */ bool rxtstamp; /* Enable TS */ struct hwtstamp_config tstamp_config; + bool ptp_enable; struct sched_table *sched_tbl; }; @@ -700,6 +708,7 @@ struct sge_uld_txq_info { struct sge { struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; + struct sge_eth_txq ptptxq; struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; @@ -777,6 +786,7 @@ struct uld_msix_info { struct vf_info { unsigned char vf_mac_addr[ETH_ALEN]; + unsigned int tx_rate; bool pf_set_mac; }; @@ -863,11 +873,17 @@ struct adapter { * used for all 4 filters. */ + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct sk_buff *ptp_tx_skb; + /* ptp lock */ + spinlock_t ptp_lock; spinlock_t stats_lock; spinlock_t win0_lock ____cacheline_aligned_in_smp; /* TC u32 offload */ struct cxgb4_tc_u32_table *tc_u32; + struct chcr_stats_debug chcr_stats; }; /* Support for "sched-class" command to allow a TX Scheduling Class to be @@ -1433,7 +1449,8 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, u32 t4_read_rss_pf_map(struct adapter *adapter); u32 t4_read_rss_pf_mask(struct adapter *adapter); -unsigned int t4_get_mps_bg_map(struct adapter *adapter, int idx); +unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx); +unsigned int t4_get_tp_ch_map(struct adapter *adapter, int pidx); void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]); void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]); int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, @@ -1493,9 +1510,12 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox); int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, u32 *val); +int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + u32 *val); int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, - u32 *val, int rw); + u32 *val, int rw, bool sleep_ok); int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, @@ -1551,6 +1571,7 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox); void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl); +int t4_update_port_info(struct port_info *pi); int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); void t4_db_full(struct adapter *adapter); void t4_db_dropped(struct adapter *adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 1fa34b009891..76540b0e082d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2669,6 +2669,8 @@ static int tid_info_show(struct seq_file *seq, void *v) if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) { unsigned int sb; + seq_printf(seq, "Connections in use: %u\n", + atomic_read(&t->conns_in_use)); if (chip <= CHELSIO_T5) sb = t4_read_reg(adap, LE_DB_SERVER_INDEX_A) / 4; @@ -2699,17 +2701,23 @@ static int tid_info_show(struct seq_file *seq, void *v) atomic_read(&t->hash_tids_in_use)); } } else if (t->ntids) { + seq_printf(seq, "Connections in use: %u\n", + atomic_read(&t->conns_in_use)); + seq_printf(seq, "TID range: 0..%u", t->ntids - 1); seq_printf(seq, ", in use: %u\n", atomic_read(&t->tids_in_use)); } if (t->nstids) - seq_printf(seq, "STID range: %u..%u, in use: %u\n", + seq_printf(seq, "STID range: %u..%u, in use-IPv4/IPv6: %u/%u\n", (!t->stid_base && (chip <= CHELSIO_T5)) ? t->stid_base + 1 : t->stid_base, - t->stid_base + t->nstids - 1, t->stids_in_use); + t->stid_base + t->nstids - 1, + t->stids_in_use - t->v6_stids_in_use, + t->v6_stids_in_use); + if (t->natids) seq_printf(seq, "ATID range: 0..%u, in use: %u\n", t->natids - 1, t->atids_in_use); @@ -3069,6 +3077,40 @@ static const struct file_operations meminfo_fops = { .llseek = seq_lseek, .release = single_release, }; + +static int chcr_show(struct seq_file *seq, void *v) +{ + struct adapter *adap = seq->private; + + seq_puts(seq, "Chelsio Crypto Accelerator Stats \n"); + seq_printf(seq, "Cipher Ops: %10u \n", + atomic_read(&adap->chcr_stats.cipher_rqst)); + seq_printf(seq, "Digest Ops: %10u \n", + atomic_read(&adap->chcr_stats.digest_rqst)); + seq_printf(seq, "Aead Ops: %10u \n", + atomic_read(&adap->chcr_stats.aead_rqst)); + seq_printf(seq, "Completion: %10u \n", + atomic_read(&adap->chcr_stats.complete)); + seq_printf(seq, "Error: %10u \n", + atomic_read(&adap->chcr_stats.error)); + seq_printf(seq, "Fallback: %10u \n", + atomic_read(&adap->chcr_stats.fallback)); + return 0; +} + + +static int chcr_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, chcr_show, inode->i_private); +} + +static const struct file_operations chcr_stats_debugfs_fops = { + .owner = THIS_MODULE, + .open = chcr_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; /* Add an array of Debug FS files. */ void add_debugfs_files(struct adapter *adap, @@ -3143,6 +3185,7 @@ int t4_setup_debugfs(struct adapter *adap) { "tids", &tid_info_debugfs_fops, S_IRUSR, 0}, { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 }, { "meminfo", &meminfo_fops, S_IRUSR, 0 }, + { "crypto", &chcr_stats_debugfs_fops, S_IRUSR, 0 }, }; /* Debug FS nodes common to all T5 and later adapters. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 0ba7866c8259..26eb00a45db1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -500,7 +500,11 @@ static int from_fw_port_mod_type(enum fw_port_type port_type, } else if (port_type == FW_PORT_TYPE_SFP || port_type == FW_PORT_TYPE_QSFP_10G || port_type == FW_PORT_TYPE_QSA || - port_type == FW_PORT_TYPE_QSFP) { + port_type == FW_PORT_TYPE_QSFP || + port_type == FW_PORT_TYPE_CR4_QSFP || + port_type == FW_PORT_TYPE_CR_QSFP || + port_type == FW_PORT_TYPE_CR2_QSFP || + port_type == FW_PORT_TYPE_SFP28) { if (mod_type == FW_PORT_MOD_TYPE_LR || mod_type == FW_PORT_MOD_TYPE_SR || mod_type == FW_PORT_MOD_TYPE_ER || @@ -511,6 +515,9 @@ static int from_fw_port_mod_type(enum fw_port_type port_type, return PORT_DA; else return PORT_OTHER; + } else if (port_type == FW_PORT_TYPE_KR4_100G || + port_type == FW_PORT_TYPE_KR_SFP28) { + return PORT_NONE; } return PORT_OTHER; @@ -618,7 +625,21 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, case FW_PORT_TYPE_CR_QSFP: case FW_PORT_TYPE_SFP28: SET_LMM(FIBRE); - SET_LMM(25000baseCR_Full); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); + FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full); + break; + + case FW_PORT_TYPE_KR_SFP28: + SET_LMM(Backplane); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full); + FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full); + break; + + case FW_PORT_TYPE_CR2_QSFP: + SET_LMM(FIBRE); + SET_LMM(50000baseSR2_Full); break; case FW_PORT_TYPE_KR4_100G: @@ -674,13 +695,20 @@ static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask) static int get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *link_ksettings) { - const struct port_info *pi = netdev_priv(dev); + struct port_info *pi = netdev_priv(dev); struct ethtool_link_settings *base = &link_ksettings->base; ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising); + /* For the nonce, the Firmware doesn't send up Port State changes + * when the Virtual Interface attached to the Port is down. So + * if it's down, let's grab any changes. + */ + if (!netif_running(dev)) + (void)t4_update_port_info(pi); + base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type); if (pi->mdio_addr >= 0) { @@ -1085,14 +1113,31 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info) { + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE; ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - ts_info->phc_index = -1; + ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); + + if (adapter->ptp_clock) + ts_info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + ts_info->phc_index = -1; return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 10736738ff30..45b5853ca2f1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -190,7 +190,7 @@ static int del_filter_wr(struct adapter *adapter, int fidx) if (!skb) return -ENOMEM; - fwr = (struct fw_filter_wr *)__skb_put(skb, len); + fwr = __skb_put(skb, len); t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id); /* Mark the filter as "pending" and ship off the Filter Work Request. @@ -231,8 +231,7 @@ int set_filter_wr(struct adapter *adapter, int fidx) } } - fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr)); - memset(fwr, 0, sizeof(*fwr)); + fwr = __skb_put_zero(skb, sizeof(*fwr)); /* It would be nice to put most of the following in t4_hw.c but most * of the work is translating the cxgbtool ch_filter_specification diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 53309f659951..e403fa18f1b1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -79,6 +79,7 @@ #include "l2t.h" #include "sched.h" #include "cxgb4_tc_u32.h" +#include "cxgb4_ptp.h" char cxgb4_driver_name[] = KBUILD_MODNAME; @@ -824,9 +825,12 @@ static int setup_sge_queues(struct adapter *adap) { int err, i, j; struct sge *s = &adap->sge; - struct sge_uld_rxq_info *rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA]; + struct sge_uld_rxq_info *rxq_info = NULL; unsigned int cmplqid = 0; + if (is_uld(adap)) + rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA]; + for_each_port(adap, i) { struct net_device *dev = adap->port[i]; struct port_info *pi = netdev_priv(dev); @@ -840,8 +844,8 @@ static int setup_sge_queues(struct adapter *adap) adap->msi_idx, &q->fl, t4_ethrx_handler, NULL, - t4_get_mps_bg_map(adap, - pi->tx_chan)); + t4_get_tp_ch_map(adap, + pi->tx_chan)); if (err) goto freeout; q->rspq.idx = j; @@ -869,6 +873,14 @@ static int setup_sge_queues(struct adapter *adap) goto freeout; } + if (!is_t4(adap->params.chip)) { + err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0], + netdev_get_tx_queue(adap->port[0], 0) + , s->fw_evtq.cntxt_id); + if (err) + goto freeout; + } + t4_write_reg(adap, is_t4(adap->params.chip) ? MPS_TRC_RSS_CONTROL_A : MPS_T5_TRC_RSS_CONTROL_A, @@ -891,7 +903,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, * The skb's priority is determined via the VLAN Tag Priority Code * Point field. */ - if (cxgb4_dcb_enabled(dev)) { + if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) { u16 vlan_tci; int err; @@ -1093,10 +1105,12 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) * This is equivalent to 4 TIDs. With CLIP enabled it * needs 2 TIDs. */ - if (family == PF_INET) - t->stids_in_use++; - else + if (family == PF_INET6) { t->stids_in_use += 2; + t->v6_stids_in_use += 2; + } else { + t->stids_in_use++; + } } spin_unlock_bh(&t->stid_lock); return stid; @@ -1150,13 +1164,16 @@ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) bitmap_release_region(t->stid_bmap, stid, 1); t->stid_tab[stid].data = NULL; if (stid < t->nstids) { - if (family == PF_INET) - t->stids_in_use--; - else + if (family == PF_INET6) { t->stids_in_use -= 2; + t->v6_stids_in_use -= 2; + } else { + t->stids_in_use--; + } } else { t->sftids_in_use--; } + spin_unlock_bh(&t->stid_lock); } EXPORT_SYMBOL(cxgb4_free_stid); @@ -1170,7 +1187,7 @@ static void mk_tid_release(struct sk_buff *skb, unsigned int chan, struct cpl_tid_release *req; set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); - req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); + req = __skb_put(skb, sizeof(*req)); INIT_TP_WR(req, tid); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); } @@ -1232,7 +1249,8 @@ static void process_tid_release_list(struct work_struct *work) * Release a TID and inform HW. If we are unable to allocate the release * message we defer to a work queue. */ -void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid) +void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid, + unsigned short family) { struct sk_buff *skb; struct adapter *adap = container_of(t, struct adapter, tids); @@ -1241,10 +1259,18 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid) if (t->tid_tab[tid]) { t->tid_tab[tid] = NULL; - if (t->hash_base && (tid >= t->hash_base)) - atomic_dec(&t->hash_tids_in_use); - else - atomic_dec(&t->tids_in_use); + atomic_dec(&t->conns_in_use); + if (t->hash_base && (tid >= t->hash_base)) { + if (family == AF_INET6) + atomic_sub(2, &t->hash_tids_in_use); + else + atomic_dec(&t->hash_tids_in_use); + } else { + if (family == AF_INET6) + atomic_sub(2, &t->tids_in_use); + else + atomic_dec(&t->tids_in_use); + } } skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); @@ -1292,10 +1318,12 @@ static int tid_init(struct tid_info *t) spin_lock_init(&t->ftid_lock); t->stids_in_use = 0; + t->v6_stids_in_use = 0; t->sftids_in_use = 0; t->afree = NULL; t->atids_in_use = 0; atomic_set(&t->tids_in_use, 0); + atomic_set(&t->conns_in_use, 0); atomic_set(&t->hash_tids_in_use, 0); /* Setup the free list for atid_tab and clear the stid bitmap. */ @@ -1343,7 +1371,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid, return -ENOMEM; adap = netdev2adap(dev); - req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req)); + req = __skb_put(skb, sizeof(*req)); INIT_TP_WR(req, 0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid)); req->local_port = sport; @@ -1384,7 +1412,7 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, return -ENOMEM; adap = netdev2adap(dev); - req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req)); + req = __skb_put(skb, sizeof(*req)); INIT_TP_WR(req, 0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid)); req->local_port = sport; @@ -1416,7 +1444,7 @@ int cxgb4_remove_server(const struct net_device *dev, unsigned int stid, if (!skb) return -ENOMEM; - req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req)); + req = __skb_put(skb, sizeof(*req)); INIT_TP_WR(req, 0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid)); req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) : @@ -2055,12 +2083,12 @@ static void detach_ulds(struct adapter *adap) mutex_lock(&uld_mutex); list_del(&adap->list_node); + for (i = 0; i < CXGB4_ULD_MAX; i++) - if (adap->uld && adap->uld[i].handle) { + if (adap->uld && adap->uld[i].handle) adap->uld[i].state_change(adap->uld[i].handle, CXGB4_STATE_DETACH); - adap->uld[i].handle = NULL; - } + if (netevent_registered && list_empty(&adapter_list)) { unregister_netevent_notifier(&cxgb4_netevent_nb); netevent_registered = false; @@ -2251,6 +2279,13 @@ static int cxgb_open(struct net_device *dev) return err; } + /* It's possible that the basic port information could have + * changed since we first read it. + */ + err = t4_update_port_info(pi); + if (err < 0) + return err; + err = link_start(dev); if (!err) netif_tx_start_all_queues(dev); @@ -2412,6 +2447,7 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) unsigned int mbox; int ret = 0, prtad, devad; struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; switch (cmd) { @@ -2449,18 +2485,69 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) sizeof(pi->tstamp_config))) return -EFAULT; - switch (pi->tstamp_config.rx_filter) { - case HWTSTAMP_FILTER_NONE: + if (!is_t4(adapter->params.chip)) { + switch (pi->tstamp_config.tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (pi->tstamp_config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + pi->rxtstamp = false; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + cxgb4_ptprx_timestamping(pi, pi->port_id, + PTP_TS_L4); + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + cxgb4_ptprx_timestamping(pi, pi->port_id, + PTP_TS_L2_L4); + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + pi->rxtstamp = true; + break; + default: + pi->tstamp_config.rx_filter = + HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) && + (pi->tstamp_config.rx_filter == + HWTSTAMP_FILTER_NONE)) { + if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0) + pi->ptp_enable = false; + } + + if (pi->tstamp_config.rx_filter != + HWTSTAMP_FILTER_NONE) { + if (cxgb4_ptp_redirect_rx_packet(adapter, + pi) >= 0) + pi->ptp_enable = true; + } + } else { + /* For T4 Adapters */ + switch (pi->tstamp_config.rx_filter) { + case HWTSTAMP_FILTER_NONE: pi->rxtstamp = false; break; - case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_ALL: pi->rxtstamp = true; break; - default: - pi->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + default: + pi->tstamp_config.rx_filter = + HWTSTAMP_FILTER_NONE; return -ERANGE; + } } - return copy_to_user(req->ifr_data, &pi->tstamp_config, sizeof(pi->tstamp_config)) ? -EFAULT : 0; @@ -2562,6 +2649,8 @@ static int cxgb_get_vf_config(struct net_device *dev, if (vf >= adap->num_vfs) return -EINVAL; ivi->vf = vf; + ivi->max_tx_rate = adap->vfinfo[vf].tx_rate; + ivi->min_tx_rate = 0; ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr); return 0; } @@ -2578,6 +2667,109 @@ static int cxgb_get_phys_port_id(struct net_device *dev, return 0; } +static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, + int max_tx_rate) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct fw_port_cmd port_cmd, port_rpl; + u32 link_status, speed = 0; + u32 fw_pfvf, fw_class; + int class_id = vf; + int link_ok, ret; + u16 pktsize; + + if (vf >= adap->num_vfs) + return -EINVAL; + + if (min_tx_rate) { + dev_err(adap->pdev_dev, + "Min tx rate (%d) (> 0) for VF %d is Invalid.\n", + min_tx_rate, vf); + return -EINVAL; + } + /* Retrieve link details for VF port */ + memset(&port_cmd, 0, sizeof(port_cmd)); + port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(pi->port_id)); + port_cmd.action_to_len16 = + cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | + FW_LEN16(port_cmd)); + ret = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd), + &port_rpl); + if (ret != FW_SUCCESS) { + dev_err(adap->pdev_dev, + "Failed to get link status for VF %d\n", vf); + return -EINVAL; + } + link_status = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); + link_ok = (link_status & FW_PORT_CMD_LSTATUS_F) != 0; + if (!link_ok) { + dev_err(adap->pdev_dev, "Link down for VF %d\n", vf); + return -EINVAL; + } + /* Determine link speed */ + if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) + speed = 100; + else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) + speed = 1000; + else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) + speed = 10000; + else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) + speed = 25000; + else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) + speed = 40000; + else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) + speed = 100000; + + if (max_tx_rate > speed) { + dev_err(adap->pdev_dev, + "Max tx rate %d for VF %d can't be > link-speed %u", + max_tx_rate, vf, speed); + return -EINVAL; + } + pktsize = be16_to_cpu(port_rpl.u.info.mtu); + /* subtract ethhdr size and 4 bytes crc since, f/w appends it */ + pktsize = pktsize - sizeof(struct ethhdr) - 4; + /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */ + pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr); + /* configure Traffic Class for rate-limiting */ + ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET, + SCHED_CLASS_LEVEL_CL_RL, + SCHED_CLASS_MODE_CLASS, + SCHED_CLASS_RATEUNIT_BITS, + SCHED_CLASS_RATEMODE_ABS, + pi->port_id, class_id, 0, + max_tx_rate * 1000, 0, pktsize); + if (ret) { + dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n", + ret); + return -EINVAL; + } + dev_info(adap->pdev_dev, + "Class %d with MSS %u configured with rate %u\n", + class_id, pktsize, max_tx_rate); + + /* bind VF to configured Traffic Class */ + fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH)); + fw_class = class_id; + ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf, + &fw_class); + if (ret) { + dev_err(adap->pdev_dev, + "Err %d in binding VF %d to Traffic Class %d\n", + ret, vf, class_id); + return -EINVAL; + } + dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n", + adap->pf, vf, class_id); + adap->vfinfo[vf].tx_rate = max_tx_rate; + return 0; +} + #endif static int cxgb_set_mac_addr(struct net_device *dev, void *p) @@ -2697,12 +2889,15 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return err; } -static int cxgb_setup_tc(struct net_device *dev, u32 handle, __be16 proto, - struct tc_to_netdev *tc) +static int cxgb_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, + __be16 proto, struct tc_to_netdev *tc) { struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); + if (chain_index) + return -EOPNOTSUPP; + if (!(adap->flags & FULL_INIT_DONE)) { dev_err(adap->pdev_dev, "Failed to setup tc on port %d. Link Down?\n", @@ -2726,6 +2921,16 @@ static int cxgb_setup_tc(struct net_device *dev, u32 handle, __be16 proto, return -EOPNOTSUPP; } +static netdev_features_t cxgb_fix_features(struct net_device *dev, + netdev_features_t features) +{ + /* Disable GRO, if RX_CSUM is disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_GRO; + + return features; +} + static const struct net_device_ops cxgb4_netdev_ops = { .ndo_open = cxgb_open, .ndo_stop = cxgb_close, @@ -2747,6 +2952,7 @@ static const struct net_device_ops cxgb4_netdev_ops = { #endif /* CONFIG_CHELSIO_T4_FCOE */ .ndo_set_tx_maxrate = cxgb_set_tx_maxrate, .ndo_setup_tc = cxgb_setup_tc, + .ndo_fix_features = cxgb_fix_features, }; #ifdef CONFIG_PCI_IOV @@ -2754,6 +2960,7 @@ static const struct net_device_ops cxgb4_mgmt_netdev_ops = { .ndo_open = dummy_open, .ndo_set_vf_mac = cxgb_set_vf_mac, .ndo_get_vf_config = cxgb_get_vf_config, + .ndo_set_vf_rate = cxgb_set_vf_rate, .ndo_get_phys_port_id = cxgb_get_phys_port_id, }; #endif @@ -4018,10 +4225,7 @@ static void cfg_queues(struct adapter *adap) /* Reduce memory usage in kdump environment, disable all offload. */ - if (is_kdump_kernel()) { - adap->params.offload = 0; - adap->params.crypto = 0; - } else if (is_uld(adap) && t4_uld_mem_alloc(adap)) { + if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) { adap->params.offload = 0; adap->params.crypto = 0; } @@ -4042,7 +4246,7 @@ static void cfg_queues(struct adapter *adap) struct port_info *pi = adap2pinfo(adap, i); pi->first_qset = qidx; - pi->nqsets = 8; + pi->nqsets = is_kdump_kernel() ? 1 : 8; qidx += pi->nqsets; } #else /* !CONFIG_CHELSIO_T4_DCB */ @@ -4055,6 +4259,9 @@ static void cfg_queues(struct adapter *adap) if (q10g > netif_get_num_default_rss_queues()) q10g = netif_get_num_default_rss_queues(); + if (is_kdump_kernel()) + q10g = 1; + for_each_port(adap, i) { struct port_info *pi = adap2pinfo(adap, i); @@ -4094,6 +4301,9 @@ static void cfg_queues(struct adapter *adap) for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) s->ctrlq[i].q.size = 512; + if (!is_t4(adap->params.chip)) + s->ptptxq.q.size = 8; + init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); init_rspq(adap, &s->intrq, 0, 1, 512, 64); } @@ -4960,6 +5170,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets); netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets); + netif_carrier_off(adapter->port[i]); + err = register_netdev(adapter->port[i]); if (err) break; @@ -4990,6 +5202,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) mutex_unlock(&uld_mutex); } + if (!is_t4(adapter->params.chip)) + cxgb4_ptp_init(adapter); + print_adapter_info(adapter); setup_fw_sge_queues(adapter); return 0; @@ -5026,13 +5241,15 @@ sriov: &v, &port_vec); if (err < 0) { dev_err(adapter->pdev_dev, "Could not fetch port params\n"); - goto free_adapter; + goto free_mbox_log; } adapter->params.nports = hweight32(port_vec); pci_set_drvdata(pdev, adapter); return 0; +free_mbox_log: + kfree(adapter->mbox_log); free_adapter: kfree(adapter); free_pci_region: @@ -5086,8 +5303,10 @@ static void remove_one(struct pci_dev *pdev) */ destroy_workqueue(adapter->workq); - if (is_uld(adapter)) + if (is_uld(adapter)) { detach_ulds(adapter); + t4_uld_clean_up(adapter); + } disable_interrupts(adapter); @@ -5097,6 +5316,9 @@ static void remove_one(struct pci_dev *pdev) debugfs_remove_recursive(adapter->debugfs_root); + if (!is_t4(adapter->params.chip)) + cxgb4_ptp_stop(adapter); + /* If we allocated filters, free up state associated with any * valid filters ... */ @@ -5132,6 +5354,7 @@ static void remove_one(struct pci_dev *pdev) unregister_netdev(adapter->port[0]); iounmap(adapter->regs); kfree(adapter->vfinfo); + kfree(adapter->mbox_log); kfree(adapter); pci_disable_sriov(pdev); pci_release_regions(pdev); @@ -5164,7 +5387,11 @@ static void shutdown_one(struct pci_dev *pdev) if (adapter->port[i]->reg_state == NETREG_REGISTERED) cxgb_close(adapter->port[i]); - t4_uld_clean_up(adapter); + if (is_uld(adapter)) { + detach_ulds(adapter); + t4_uld_clean_up(adapter); + } + disable_interrupts(adapter); disable_msi(adapter); @@ -5178,6 +5405,7 @@ static void shutdown_one(struct pci_dev *pdev) unregister_netdev(adapter->port[0]); iounmap(adapter->regs); kfree(adapter->vfinfo); + kfree(adapter->mbox_log); kfree(adapter); pci_disable_sriov(pdev); pci_release_regions(pdev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c new file mode 100644 index 000000000000..50517cfd9671 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c @@ -0,0 +1,475 @@ +/* + * cxgb4_ptp.c:Chelsio PTP support for T5/T6 + * + * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Written by: Atul Gupta (atul.gupta@chelsio.com) + */ + +#include <linux/module.h> +#include <linux/net_tstamp.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/pps_kernel.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/ptp_classify.h> +#include <linux/udp.h> + +#include "cxgb4.h" +#include "t4_hw.h" +#include "t4_regs.h" +#include "t4_msg.h" +#include "t4fw_api.h" +#include "cxgb4_ptp.h" + +/** + * cxgb4_ptp_is_ptp_tx - determine whether TX packet is PTP or not + * @skb: skb of outgoing ptp request + * + */ +bool cxgb4_ptp_is_ptp_tx(struct sk_buff *skb) +{ + struct udphdr *uh; + + uh = udp_hdr(skb); + return skb->len >= PTP_MIN_LENGTH && + skb->len <= PTP_IN_TRANSMIT_PACKET_MAXNUM && + likely(skb->protocol == htons(ETH_P_IP)) && + ip_hdr(skb)->protocol == IPPROTO_UDP && + uh->dest == htons(PTP_EVENT_PORT); +} + +bool is_ptp_enabled(struct sk_buff *skb, struct net_device *dev) +{ + struct port_info *pi; + + pi = netdev_priv(dev); + return (pi->ptp_enable && cxgb4_xmit_with_hwtstamp(skb) && + cxgb4_ptp_is_ptp_tx(skb)); +} + +/** + * cxgb4_ptp_is_ptp_rx - determine whether RX packet is PTP or not + * @skb: skb of incoming ptp request + * + */ +bool cxgb4_ptp_is_ptp_rx(struct sk_buff *skb) +{ + struct udphdr *uh = (struct udphdr *)(skb->data + ETH_HLEN + + IPV4_HLEN(skb->data)); + + return uh->dest == htons(PTP_EVENT_PORT) && + uh->source == htons(PTP_EVENT_PORT); +} + +/** + * cxgb4_ptp_read_hwstamp - read timestamp for TX event PTP message + * @adapter: board private structure + * @pi: port private structure + * + */ +void cxgb4_ptp_read_hwstamp(struct adapter *adapter, struct port_info *pi) +{ + struct skb_shared_hwtstamps *skb_ts = NULL; + u64 tx_ts; + + skb_ts = skb_hwtstamps(adapter->ptp_tx_skb); + + tx_ts = t4_read_reg(adapter, + T5_PORT_REG(pi->port_id, MAC_PORT_TX_TS_VAL_LO)); + + tx_ts |= (u64)t4_read_reg(adapter, + T5_PORT_REG(pi->port_id, + MAC_PORT_TX_TS_VAL_HI)) << 32; + skb_ts->hwtstamp = ns_to_ktime(tx_ts); + skb_tstamp_tx(adapter->ptp_tx_skb, skb_ts); + dev_kfree_skb_any(adapter->ptp_tx_skb); + spin_lock(&adapter->ptp_lock); + adapter->ptp_tx_skb = NULL; + spin_unlock(&adapter->ptp_lock); +} + +/** + * cxgb4_ptprx_timestamping - Enable Timestamp for RX PTP event message + * @pi: port private structure + * @port: pot number + * @mode: RX mode + * + */ +int cxgb4_ptprx_timestamping(struct port_info *pi, u8 port, u16 mode) +{ + struct adapter *adapter = pi->adapter; + struct fw_ptp_cmd c; + int err; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_PTP_CMD_PORTID_V(port)); + c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); + c.u.init.sc = FW_PTP_SC_RXTIME_STAMP; + c.u.init.mode = cpu_to_be16(mode); + + err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); + if (err < 0) + dev_err(adapter->pdev_dev, + "PTP: %s error %d\n", __func__, -err); + return err; +} + +int cxgb4_ptp_txtype(struct adapter *adapter, u8 port) +{ + struct fw_ptp_cmd c; + int err; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_PTP_CMD_PORTID_V(port)); + c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); + c.u.init.sc = FW_PTP_SC_TX_TYPE; + c.u.init.mode = cpu_to_be16(PTP_TS_NONE); + + err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); + if (err < 0) + dev_err(adapter->pdev_dev, + "PTP: %s error %d\n", __func__, -err); + + return err; +} + +int cxgb4_ptp_redirect_rx_packet(struct adapter *adapter, struct port_info *pi) +{ + struct sge *s = &adapter->sge; + struct sge_eth_rxq *receive_q = &s->ethrxq[pi->first_qset]; + struct fw_ptp_cmd c; + int err; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_PTP_CMD_PORTID_V(pi->port_id)); + + c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); + c.u.init.sc = FW_PTP_SC_RDRX_TYPE; + c.u.init.txchan = pi->tx_chan; + c.u.init.absid = cpu_to_be16(receive_q->rspq.abs_id); + + err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); + if (err < 0) + dev_err(adapter->pdev_dev, + "PTP: %s error %d\n", __func__, -err); + return err; +} + +/** + * @ptp: ptp clock structure + * @ppb: Desired frequency change in parts per billion + * + * Adjust the frequency of the PHC cycle counter by the indicated ppb from + * the base frequency. + */ +static int cxgb4_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct adapter *adapter = (struct adapter *)container_of(ptp, + struct adapter, ptp_clock_info); + struct fw_ptp_cmd c; + int err; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_PTP_CMD_PORTID_V(0)); + c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); + c.u.ts.sc = FW_PTP_SC_ADJ_FREQ; + c.u.ts.sign = (ppb < 0) ? 1 : 0; + if (ppb < 0) + ppb = -ppb; + c.u.ts.ppb = cpu_to_be32(ppb); + + err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); + if (err < 0) + dev_err(adapter->pdev_dev, + "PTP: %s error %d\n", __func__, -err); + + return err; +} + +/** + * cxgb4_ptp_fineadjtime - Shift the time of the hardware clock + * @ptp: ptp clock structure + * @delta: Desired change in nanoseconds + * + * Adjust the timer by resetting the timecounter structure. + */ +static int cxgb4_ptp_fineadjtime(struct adapter *adapter, s64 delta) +{ + struct fw_ptp_cmd c; + int err; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_PTP_CMD_PORTID_V(0)); + c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); + c.u.ts.sc = FW_PTP_SC_ADJ_FTIME; + c.u.ts.tm = cpu_to_be64(delta); + + err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); + if (err < 0) + dev_err(adapter->pdev_dev, + "PTP: %s error %d\n", __func__, -err); + return err; +} + +/** + * cxgb4_ptp_adjtime - Shift the time of the hardware clock + * @ptp: ptp clock structure + * @delta: Desired change in nanoseconds + * + * Adjust the timer by resetting the timecounter structure. + */ +static int cxgb4_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct adapter *adapter = + (struct adapter *)container_of(ptp, struct adapter, + ptp_clock_info); + struct fw_ptp_cmd c; + s64 sign = 1; + int err; + + if (delta < 0) + sign = -1; + + if (delta * sign > PTP_CLOCK_MAX_ADJTIME) { + memset(&c, 0, sizeof(c)); + c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_PTP_CMD_PORTID_V(0)); + c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); + c.u.ts.sc = FW_PTP_SC_ADJ_TIME; + c.u.ts.sign = (delta < 0) ? 1 : 0; + if (delta < 0) + delta = -delta; + c.u.ts.tm = cpu_to_be64(delta); + + err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); + if (err < 0) + dev_err(adapter->pdev_dev, + "PTP: %s error %d\n", __func__, -err); + } else { + err = cxgb4_ptp_fineadjtime(adapter, delta); + } + + return err; +} + +/** + * cxgb4_ptp_gettime - Reads the current time from the hardware clock + * @ptp: ptp clock structure + * @ts: timespec structure to hold the current time value + * + * Read the timecounter and return the correct value in ns after converting + * it into a struct timespec. + */ +static int cxgb4_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct adapter *adapter = (struct adapter *)container_of(ptp, + struct adapter, ptp_clock_info); + struct fw_ptp_cmd c; + u64 ns; + int err; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_PTP_CMD_PORTID_V(0)); + c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); + c.u.ts.sc = FW_PTP_SC_GET_TIME; + + err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), &c); + if (err < 0) { + dev_err(adapter->pdev_dev, + "PTP: %s error %d\n", __func__, -err); + return err; + } + + /* convert to timespec*/ + ns = be64_to_cpu(c.u.ts.tm); + *ts = ns_to_timespec64(ns); + + return err; +} + +/** + * cxgb4_ptp_settime - Set the current time on the hardware clock + * @ptp: ptp clock structure + * @ts: timespec containing the new time for the cycle counter + * + * Reset value to new base value instead of the kernel + * wall timer value. + */ +static int cxgb4_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct adapter *adapter = (struct adapter *)container_of(ptp, + struct adapter, ptp_clock_info); + struct fw_ptp_cmd c; + u64 ns; + int err; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_PTP_CMD_PORTID_V(0)); + c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); + c.u.ts.sc = FW_PTP_SC_SET_TIME; + + ns = timespec64_to_ns(ts); + c.u.ts.tm = cpu_to_be64(ns); + + err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); + if (err < 0) + dev_err(adapter->pdev_dev, + "PTP: %s error %d\n", __func__, -err); + + return err; +} + +static void cxgb4_init_ptp_timer(struct adapter *adapter) +{ + struct fw_ptp_cmd c; + int err; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_PTP_CMD_PORTID_V(0)); + c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); + c.u.scmd.sc = FW_PTP_SC_INIT_TIMER; + + err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); + if (err < 0) + dev_err(adapter->pdev_dev, + "PTP: %s error %d\n", __func__, -err); +} + +/** + * cxgb4_ptp_enable - enable or disable an ancillary feature + * @ptp: ptp clock structure + * @request: Desired resource to enable or disable + * @on: Caller passes one to enable or zero to disable + * + * Enable (or disable) ancillary features of the PHC subsystem. + * Currently, no ancillary features are supported. + */ +static int cxgb4_ptp_enable(struct ptp_clock_info __always_unused *ptp, + struct ptp_clock_request __always_unused *request, + int __always_unused on) +{ + return -ENOTSUPP; +} + +static const struct ptp_clock_info cxgb4_ptp_clock_info = { + .owner = THIS_MODULE, + .name = "cxgb4_clock", + .max_adj = MAX_PTP_FREQ_ADJ, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .pps = 0, + .adjfreq = cxgb4_ptp_adjfreq, + .adjtime = cxgb4_ptp_adjtime, + .gettime64 = cxgb4_ptp_gettime, + .settime64 = cxgb4_ptp_settime, + .enable = cxgb4_ptp_enable, +}; + +/** + * cxgb4_ptp_init - initialize PTP for devices which support it + * @adapter: board private structure + * + * This function performs the required steps for enabling PTP support. + */ +void cxgb4_ptp_init(struct adapter *adapter) +{ + struct timespec64 now; + /* no need to create a clock device if we already have one */ + if (!IS_ERR_OR_NULL(adapter->ptp_clock)) + return; + + adapter->ptp_tx_skb = NULL; + adapter->ptp_clock_info = cxgb4_ptp_clock_info; + spin_lock_init(&adapter->ptp_lock); + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info, + &adapter->pdev->dev); + if (!adapter->ptp_clock) { + dev_err(adapter->pdev_dev, + "PTP %s Clock registration has failed\n", __func__); + return; + } + + now = ktime_to_timespec64(ktime_get_real()); + cxgb4_init_ptp_timer(adapter); + if (cxgb4_ptp_settime(&adapter->ptp_clock_info, &now) < 0) { + ptp_clock_unregister(adapter->ptp_clock); + adapter->ptp_clock = NULL; + } +} + +/** + * cxgb4_ptp_remove - disable PTP device and stop the overflow check + * @adapter: board private structure + * + * Stop the PTP support. + */ +void cxgb4_ptp_stop(struct adapter *adapter) +{ + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + } + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + adapter->ptp_clock = NULL; + } +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.h new file mode 100644 index 000000000000..cccfae84bb84 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.h @@ -0,0 +1,74 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_PTP_H__ +#define __CXGB4_PTP_H__ + +/* Maximum parts-per-billion adjustment that is acceptable */ +#define MAX_PTP_FREQ_ADJ 1000000 +#define PTP_CLOCK_MAX_ADJTIME 10000000 /* 10 ms */ + +#define PTP_MIN_LENGTH 63 +#define PTP_IN_TRANSMIT_PACKET_MAXNUM 240 +#define PTP_EVENT_PORT 319 + +enum ptp_rx_filter_mode { + PTP_TS_NONE = 0, + PTP_TS_L2, + PTP_TS_L4, + PTP_TS_L2_L4 +}; + +struct port_info; + +static inline bool cxgb4_xmit_with_hwtstamp(struct sk_buff *skb) +{ + return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP; +} + +static inline void cxgb4_xmit_hwtstamp_pending(struct sk_buff *skb) +{ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; +} + +void cxgb4_ptp_init(struct adapter *adap); +void cxgb4_ptp_stop(struct adapter *adap); +bool cxgb4_ptp_is_ptp_tx(struct sk_buff *skb); +bool cxgb4_ptp_is_ptp_rx(struct sk_buff *skb); +int cxgb4_ptprx_timestamping(struct port_info *pi, u8 port, u16 mode); +int cxgb4_ptp_redirect_rx_packet(struct adapter *adap, struct port_info *pi); +int cxgb4_ptp_txtype(struct adapter *adap, u8 port_id); +void cxgb4_ptp_read_hwstamp(struct adapter *adap, struct port_info *pi); +bool is_ptp_enabled(struct sk_buff *skb, struct net_device *dev); +#endif /* __CXGB4_PTP_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index d0868c2320da..71a315bc1409 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -589,22 +589,37 @@ void t4_uld_mem_free(struct adapter *adap) kfree(adap->uld); } +/* This function should be called with uld_mutex taken. */ +static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type) +{ + if (adap->uld[type].handle) { + adap->uld[type].handle = NULL; + adap->uld[type].add = NULL; + release_sge_txq_uld(adap, type); + + if (adap->flags & FULL_INIT_DONE) + quiesce_rx_uld(adap, type); + + if (adap->flags & USING_MSIX) + free_msix_queue_irqs_uld(adap, type); + + free_sge_queues_uld(adap, type); + free_queues_uld(adap, type); + } +} + void t4_uld_clean_up(struct adapter *adap) { unsigned int i; - if (!adap->uld) - return; + mutex_lock(&uld_mutex); for (i = 0; i < CXGB4_ULD_MAX; i++) { if (!adap->uld[i].handle) continue; - if (adap->flags & FULL_INIT_DONE) - quiesce_rx_uld(adap, i); - if (adap->flags & USING_MSIX) - free_msix_queue_irqs_uld(adap, i); - free_sge_queues_uld(adap, i); - free_queues_uld(adap, i); + + cxgb4_shutdown_uld_adapter(adap, i); } + mutex_unlock(&uld_mutex); } static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) @@ -642,6 +657,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) lld->sge_ingpadboundary = adap->sge.fl_align; lld->sge_egrstatuspagesize = adap->sge.stat_len; lld->sge_pktshift = adap->sge.pktshift; + lld->ulp_crypto = adap->params.crypto; lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; lld->max_ordird_qp = adap->params.max_ordird_qp; lld->max_ird_adapter = adap->params.max_ird_adapter; @@ -782,15 +798,8 @@ int cxgb4_unregister_uld(enum cxgb4_uld type) continue; if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) continue; - adap->uld[type].handle = NULL; - adap->uld[type].add = NULL; - release_sge_txq_uld(adap, type); - if (adap->flags & FULL_INIT_DONE) - quiesce_rx_uld(adap, type); - if (adap->flags & USING_MSIX) - free_msix_queue_irqs_uld(adap, type); - free_sge_queues_uld(adap, type); - free_queues_uld(adap, type); + + cxgb4_shutdown_uld_adapter(adap, type); } mutex_unlock(&uld_mutex); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 6e74040af49a..84541fce94c5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -123,12 +123,14 @@ struct tid_info { spinlock_t stid_lock; unsigned int stids_in_use; + unsigned int v6_stids_in_use; unsigned int sftids_in_use; /* TIDs in the TCAM */ atomic_t tids_in_use; /* TIDs in the HASH */ atomic_t hash_tids_in_use; + atomic_t conns_in_use; /* lock for setting/clearing filter bitmap */ spinlock_t ftid_lock; }; @@ -157,13 +159,21 @@ static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) } static inline void cxgb4_insert_tid(struct tid_info *t, void *data, - unsigned int tid) + unsigned int tid, unsigned short family) { t->tid_tab[tid] = data; - if (t->hash_base && (tid >= t->hash_base)) - atomic_inc(&t->hash_tids_in_use); - else - atomic_inc(&t->tids_in_use); + if (t->hash_base && (tid >= t->hash_base)) { + if (family == AF_INET6) + atomic_add(2, &t->hash_tids_in_use); + else + atomic_inc(&t->hash_tids_in_use); + } else { + if (family == AF_INET6) + atomic_add(2, &t->tids_in_use); + else + atomic_inc(&t->tids_in_use); + } + atomic_inc(&t->conns_in_use); } int cxgb4_alloc_atid(struct tid_info *t, void *data); @@ -171,8 +181,8 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data); int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data); void cxgb4_free_atid(struct tid_info *t, unsigned int atid); void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family); -void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid); - +void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid, + unsigned short family); struct in6_addr; int cxgb4_create_server(const struct net_device *dev, unsigned int stid, @@ -275,6 +285,15 @@ struct cxgb4_virt_res { /* virtualized HW resources */ unsigned int ncrypto_fc; }; +struct chcr_stats_debug { + atomic_t cipher_rqst; + atomic_t digest_rqst; + atomic_t aead_rqst; + atomic_t complete; + atomic_t error; + atomic_t fallback; +}; + #define OCQ_WIN_OFFSET(pdev, vres) \ (pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size)) @@ -322,6 +341,7 @@ struct cxgb4_lld_info { unsigned int iscsi_tagmask; /* iscsi ddp tag mask */ unsigned int iscsi_pgsz_order; /* iscsi ddp page size orders */ unsigned int iscsi_llimit; /* chip's iscsi region llimit */ + unsigned int ulp_crypto; /* crypto lookaside support */ void **iscsi_ppm; /* iscsi page pod manager */ int nodeid; /* device numa node id */ bool fr_nsmr_tpte_wr_support; /* FW supports FR_NSMR_TPTE_WR */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 6f3692db29af..f7ef8871dd0b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -146,7 +146,7 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) if (!skb) return -ENOMEM; - req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); + req = __skb_put(skb, sizeof(*req)); INIT_TP_WR(req, 0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index f05f0d400324..ede12209f20b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -52,6 +52,7 @@ #include "t4_values.h" #include "t4_msg.h" #include "t4fw_api.h" +#include "cxgb4_ptp.h" /* * Rx buffer size. We use largish buffers if possible but settle for single @@ -1162,7 +1163,7 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap, */ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) { - u32 wr_mid, ctrl0; + u32 wr_mid, ctrl0, op; u64 cntrl, *end; int qidx, credits; unsigned int flits, ndesc; @@ -1175,6 +1176,7 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) dma_addr_t addr[MAX_SKB_FRAGS + 1]; bool immediate = false; int len, max_pkt_len; + bool ptp_enabled = is_ptp_enabled(skb, dev); #ifdef CONFIG_CHELSIO_T4_FCOE int err; #endif /* CONFIG_CHELSIO_T4_FCOE */ @@ -1198,15 +1200,31 @@ out_free: dev_kfree_skb_any(skb); pi = netdev_priv(dev); adap = pi->adapter; qidx = skb_get_queue_mapping(skb); - q = &adap->sge.ethtxq[qidx + pi->first_qset]; + if (ptp_enabled) { + spin_lock(&adap->ptp_lock); + if (!(adap->ptp_tx_skb)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + adap->ptp_tx_skb = skb_get(skb); + } else { + spin_unlock(&adap->ptp_lock); + goto out_free; + } + q = &adap->sge.ptptxq; + } else { + q = &adap->sge.ethtxq[qidx + pi->first_qset]; + } + skb_tx_timestamp(skb); reclaim_completed_tx(adap, &q->q, true); cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; #ifdef CONFIG_CHELSIO_T4_FCOE err = cxgb_fcoe_offload(skb, adap, pi, &cntrl); - if (unlikely(err == -ENOTSUPP)) + if (unlikely(err == -ENOTSUPP)) { + if (ptp_enabled) + spin_unlock(&adap->ptp_lock); goto out_free; + } #endif /* CONFIG_CHELSIO_T4_FCOE */ flits = calc_tx_flits(skb); @@ -1218,6 +1236,8 @@ out_free: dev_kfree_skb_any(skb); dev_err(adap->pdev_dev, "%s: Tx ring %u full while queue awake!\n", dev->name, qidx); + if (ptp_enabled) + spin_unlock(&adap->ptp_lock); return NETDEV_TX_BUSY; } @@ -1227,6 +1247,8 @@ out_free: dev_kfree_skb_any(skb); if (!immediate && unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { q->mapping_err++; + if (ptp_enabled) + spin_unlock(&adap->ptp_lock); goto out_free; } @@ -1279,7 +1301,11 @@ out_free: dev_kfree_skb_any(skb); q->tx_cso += ssi->gso_segs; } else { len += sizeof(*cpl); - wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | + if (ptp_enabled) + op = FW_PTP_TX_PKT_WR; + else + op = FW_ETH_TX_PKT_WR; + wr->op_immdlen = htonl(FW_WR_OP_V(op) | FW_WR_IMMDLEN_V(len)); cpl = (void *)(wr + 1); if (skb->ip_summed == CHECKSUM_PARTIAL) { @@ -1301,6 +1327,8 @@ out_free: dev_kfree_skb_any(skb); ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | TXPKT_PF_V(adap->pf); + if (ptp_enabled) + ctrl0 |= TXPKT_TSTAMP_F; #ifdef CONFIG_CHELSIO_T4_DCB if (is_t4(adap->params.chip)) ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio); @@ -1332,6 +1360,8 @@ out_free: dev_kfree_skb_any(skb); txq_advance(&q->q, ndesc); ring_tx_db(adap, &q->q, ndesc); + if (ptp_enabled) + spin_unlock(&adap->ptp_lock); return NETDEV_TX_OK; } @@ -2023,6 +2053,92 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, rxq->stats.rx_cso++; } +enum { + RX_NON_PTP_PKT = 0, + RX_PTP_PKT_SUC = 1, + RX_PTP_PKT_ERR = 2 +}; + +/** + * t4_systim_to_hwstamp - read hardware time stamp + * @adap: the adapter + * @skb: the packet + * + * Read Time Stamp from MPS packet and insert in skb which + * is forwarded to PTP application + */ +static noinline int t4_systim_to_hwstamp(struct adapter *adapter, + struct sk_buff *skb) +{ + struct skb_shared_hwtstamps *hwtstamps; + struct cpl_rx_mps_pkt *cpl = NULL; + unsigned char *data; + int offset; + + cpl = (struct cpl_rx_mps_pkt *)skb->data; + if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) & + X_CPL_RX_MPS_PKT_TYPE_PTP)) + return RX_PTP_PKT_ERR; + + data = skb->data + sizeof(*cpl); + skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt)); + offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN; + if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short)) + return RX_PTP_PKT_ERR; + + hwtstamps = skb_hwtstamps(skb); + memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data))); + + return RX_PTP_PKT_SUC; +} + +/** + * t4_rx_hststamp - Recv PTP Event Message + * @adap: the adapter + * @rsp: the response queue descriptor holding the RX_PKT message + * @skb: the packet + * + * PTP enabled and MPS packet, read HW timestamp + */ +static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp, + struct sge_eth_rxq *rxq, struct sk_buff *skb) +{ + int ret; + + if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) && + !is_t4(adapter->params.chip))) { + ret = t4_systim_to_hwstamp(adapter, skb); + if (ret == RX_PTP_PKT_ERR) { + kfree_skb(skb); + rxq->stats.rx_drops++; + } + return ret; + } + return RX_NON_PTP_PKT; +} + +/** + * t4_tx_hststamp - Loopback PTP Transmit Event Message + * @adap: the adapter + * @skb: the packet + * @dev: the ingress net device + * + * Read hardware timestamp for the loopback PTP Tx event message + */ +static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb, + struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + + if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) { + cxgb4_ptp_read_hwstamp(adapter, pi); + kfree_skb(skb); + return 0; + } + return 1; +} + /** * t4_ethrx_handler - process an ingress ethernet packet * @q: the response queue that received the packet @@ -2038,11 +2154,13 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, struct sk_buff *skb; const struct cpl_rx_pkt *pkt; struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + struct adapter *adapter = q->adap; struct sge *s = &q->adap->sge; int cpl_trace_pkt = is_t4(q->adap->params.chip) ? CPL_TRACE_PKT : CPL_TRACE_PKT_T5; u16 err_vec; struct port_info *pi; + int ret = 0; if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) return handle_trace_pkt(q->adap, si); @@ -2068,8 +2186,25 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, rxq->stats.rx_drops++; return 0; } + pi = netdev_priv(q->netdev); + + /* Handle PTP Event Rx packet */ + if (unlikely(pi->ptp_enable)) { + ret = t4_rx_hststamp(adapter, rsp, rxq, skb); + if (ret == RX_PTP_PKT_ERR) + return 0; + } + if (likely(!ret)) + __skb_pull(skb, s->pktshift); /* remove ethernet header pad */ + + /* Handle the PTP Event Tx Loopback packet */ + if (unlikely(pi->ptp_enable && !ret && + (pkt->l2info & htonl(RXF_UDP_F)) && + cxgb4_ptp_is_ptp_rx(skb))) { + if (!t4_tx_hststamp(adapter, skb, q->netdev)) + return 0; + } - __skb_pull(skb, s->pktshift); /* remove ethernet header padding */ skb->protocol = eth_type_trans(skb, q->netdev); skb_record_rx_queue(skb, q->idx); if (skb->dev->features & NETIF_F_RXHASH) @@ -2078,7 +2213,6 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, rxq->stats.pkts++; - pi = netdev_priv(skb->dev); if (pi->rxtstamp) cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb), si->sgetstamp); @@ -2502,6 +2636,20 @@ static void sge_tx_timer_cb(unsigned long data) tasklet_schedule(&txq->qresume_tsk); } + if (!is_t4(adap->params.chip)) { + struct sge_eth_txq *q = &s->ptptxq; + int avail; + + spin_lock(&adap->ptp_lock); + avail = reclaimable(&q->q); + + if (avail) { + free_tx_desc(adap, &q->q, avail, false); + q->q.in_use -= avail; + } + spin_unlock(&adap->ptp_lock); + } + budget = MAX_TIMER_TX_RECLAIM; i = s->ethtxq_rover; do { @@ -3068,6 +3216,19 @@ void t4_free_sge_resources(struct adapter *adap) if (adap->sge.intrq.desc) free_rspq_fl(adap, &adap->sge.intrq, NULL); + if (!is_t4(adap->params.chip)) { + etq = &adap->sge.ptptxq; + if (etq->q.desc) { + t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, + etq->q.cntxt_id); + spin_lock_bh(&adap->ptp_lock); + free_tx_desc(adap, &etq->q, etq->q.in_use, true); + spin_unlock_bh(&adap->ptp_lock); + kfree(etq->q.sdesc); + free_txq(adap, &etq->q); + } + } + /* clear the reverse egress queue map */ memset(adap->sge.egr_map, 0, adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 3a34aa629f7d..82bf7aac6cdb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3540,7 +3540,7 @@ int t4_load_phy_fw(struct adapter *adap, FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD)); val = phy_fw_size; ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1, - ¶m, &val, 1); + ¶m, &val, 1, true); if (ret < 0) return ret; mtype = val >> 8; @@ -4040,6 +4040,7 @@ static void cim_intr_handler(struct adapter *adapter) { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 }, { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 }, { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 }, + { TIMER0INT_F, "CIM TIMER0 interrupt", -1, 1 }, { 0 } }; static const struct intr_info cim_upintr_info[] = { @@ -4074,11 +4075,27 @@ static void cim_intr_handler(struct adapter *adapter) { 0 } }; + u32 val, fw_err; int fat; - if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F) + fw_err = t4_read_reg(adapter, PCIE_FW_A); + if (fw_err & PCIE_FW_ERR_F) t4_report_fw_error(adapter); + /* When the Firmware detects an internal error which normally + * wouldn't raise a Host Interrupt, it forces a CIM Timer0 interrupt + * in order to make sure the Host sees the Firmware Crash. So + * if we have a Timer0 interrupt and don't see a Firmware Crash, + * ignore the Timer0 interrupt. + */ + + val = t4_read_reg(adapter, CIM_HOST_INT_CAUSE_A); + if (val & TIMER0INT_F) + if (!(fw_err & PCIE_FW_ERR_F) || + (PCIE_FW_EVAL_G(fw_err) != PCIE_FW_EVAL_CRASH)) + t4_write_reg(adapter, CIM_HOST_INT_CAUSE_A, + TIMER0INT_F); + fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A, cim_intr_info) + t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A, @@ -4445,7 +4462,7 @@ static void pl_intr_handler(struct adapter *adap) #define PF_INTR_MASK (PFSW_F) #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \ EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \ - CPL_SWITCH_F | SGE_F | ULP_TX_F) + CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F) /** * t4_slow_intr_handler - control path interrupt handler @@ -5423,30 +5440,155 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) } /** - * t4_get_mps_bg_map - return the buffer groups associated with a port + * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port * @adap: the adapter - * @idx: the port index + * @pidx: the port index + * + * Computes and returns a bitmap indicating which MPS buffer groups are + * associated with the given Port. Bit i is set if buffer group i is + * used by the Port. + */ +static inline unsigned int compute_mps_bg_map(struct adapter *adapter, + int pidx) +{ + unsigned int chip_version, nports; + + chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip); + nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A)); + + switch (chip_version) { + case CHELSIO_T4: + case CHELSIO_T5: + switch (nports) { + case 1: return 0xf; + case 2: return 3 << (2 * pidx); + case 4: return 1 << pidx; + } + break; + + case CHELSIO_T6: + switch (nports) { + case 2: return 1 << (2 * pidx); + } + break; + } + + dev_err(adapter->pdev_dev, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n", + chip_version, nports); + + return 0; +} + +/** + * t4_get_mps_bg_map - return the buffer groups associated with a port + * @adapter: the adapter + * @pidx: the port index * * Returns a bitmap indicating which MPS buffer groups are associated - * with the given port. Bit i is set if buffer group i is used by the - * port. + * with the given Port. Bit i is set if buffer group i is used by the + * Port. */ -unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx) +unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx) { - u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A)); + u8 *mps_bg_map; + unsigned int nports; + + nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A)); + if (pidx >= nports) { + CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n", + pidx, nports); + return 0; + } + + /* If we've already retrieved/computed this, just return the result. + */ + mps_bg_map = adapter->params.mps_bg_map; + if (mps_bg_map[pidx]) + return mps_bg_map[pidx]; + + /* Newer Firmware can tell us what the MPS Buffer Group Map is. + * If we're talking to such Firmware, let it tell us. If the new + * API isn't supported, revert back to old hardcoded way. The value + * obtained from Firmware is encoded in below format: + * + * val = (( MPSBGMAP[Port 3] << 24 ) | + * ( MPSBGMAP[Port 2] << 16 ) | + * ( MPSBGMAP[Port 1] << 8 ) | + * ( MPSBGMAP[Port 0] << 0 )) + */ + if (adapter->flags & FW_OK) { + u32 param, val; + int ret; + + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_MPSBGMAP)); + ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf, + 0, 1, ¶m, &val); + if (!ret) { + int p; + + /* Store the BG Map for all of the Ports in order to + * avoid more calls to the Firmware in the future. + */ + for (p = 0; p < MAX_NPORTS; p++, val >>= 8) + mps_bg_map[p] = val & 0xff; + + return mps_bg_map[pidx]; + } + } - if (n == 0) - return idx == 0 ? 0xf : 0; - /* In T6 (which is a 2 port card), - * port 0 is mapped to channel 0 and port 1 is mapped to channel 1. - * For 2 port T4/T5 adapter, - * port 0 is mapped to channel 0 and 1, - * port 1 is mapped to channel 2 and 3. + /* Either we're not talking to the Firmware or we're dealing with + * older Firmware which doesn't support the new API to get the MPS + * Buffer Group Map. Fall back to computing it ourselves. */ - if ((n == 1) && - (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)) - return idx < 2 ? (3 << (2 * idx)) : 0; - return 1 << idx; + mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx); + return mps_bg_map[pidx]; +} + +/** + * t4_get_tp_ch_map - return TP ingress channels associated with a port + * @adapter: the adapter + * @pidx: the port index + * + * Returns a bitmap indicating which TP Ingress Channels are associated + * with a given Port. Bit i is set if TP Ingress Channel i is used by + * the Port. + */ +unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx) +{ + unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip); + unsigned int nports = 1 << NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A)); + + if (pidx >= nports) { + dev_warn(adap->pdev_dev, "TP Port Index %d >= Nports %d\n", + pidx, nports); + return 0; + } + + switch (chip_version) { + case CHELSIO_T4: + case CHELSIO_T5: + /* Note that this happens to be the same values as the MPS + * Buffer Group Map for these Chips. But we replicate the code + * here because they're really separate concepts. + */ + switch (nports) { + case 1: return 0xf; + case 2: return 3 << (2 * pidx); + case 4: return 1 << pidx; + } + break; + + case CHELSIO_T6: + switch (nports) { + case 2: return 1 << pidx; + } + break; + } + + dev_err(adap->pdev_dev, "Need TP Channel Map for Chip %0x, Nports %d\n", + chip_version, nports); + return 0; } /** @@ -6293,13 +6435,18 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, if (!t4_fw_matches_chip(adap, fw_hdr)) return -EINVAL; + /* Disable FW_OK flag so that mbox commands with FW_OK flag set + * wont be sent when we are flashing FW. + */ + adap->flags &= ~FW_OK; + ret = t4_fw_halt(adap, mbox, force); if (ret < 0 && !force) - return ret; + goto out; ret = t4_load_fw(adap, fw_data, size); if (ret < 0) - return ret; + goto out; /* * Older versions of the firmware don't understand the new @@ -6310,7 +6457,17 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, * its header flags to see if it advertises the capability. */ reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); - return t4_fw_restart(adap, mbox, reset); + ret = t4_fw_restart(adap, mbox, reset); + + /* Grab potentially new Firmware Device Log parameters so we can see + * how healthy the new Firmware is. It's okay to contact the new + * Firmware for these parameters even though, as far as it's + * concerned, we've never said "HELLO" to it ... + */ + (void)t4_init_devlog_params(adap); +out: + adap->flags |= FW_OK; + return ret; } /** @@ -6546,13 +6703,14 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox) * @params: the parameter names * @val: the parameter values * @rw: Write and read flag + * @sleep_ok: if true, we may sleep awaiting mbox cmd completion * * Reads the value of FW or device parameters. Up to 7 parameters can be * queried at once. */ int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, - u32 *val, int rw) + u32 *val, int rw, bool sleep_ok) { int i, ret; struct fw_params_cmd c; @@ -6575,7 +6733,7 @@ int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf, p++; } - ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); if (ret == 0) for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) *val++ = be32_to_cpu(*p); @@ -6586,7 +6744,16 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, u32 *val) { - return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0); + return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0, + true); +} + +int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + u32 *val) +{ + return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0, + false); } /** @@ -7360,11 +7527,41 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) lc->fc = fc; lc->supported = be16_to_cpu(p->u.info.pcap); lc->lp_advertising = be16_to_cpu(p->u.info.lpacap); + t4_os_link_changed(adap, pi->port_id, link_ok); } } /** + * t4_update_port_info - retrieve and update port information if changed + * @pi: the port_info + * + * We issue a Get Port Information Command to the Firmware and, if + * successful, we check to see if anything is different from what we + * last recorded and update things accordingly. + */ +int t4_update_port_info(struct port_info *pi) +{ + struct fw_port_cmd port_cmd; + int ret; + + memset(&port_cmd, 0, sizeof(port_cmd)); + port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(pi->port_id)); + port_cmd.action_to_len16 = cpu_to_be32( + FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | + FW_LEN16(port_cmd)); + ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox, + &port_cmd, sizeof(port_cmd), &port_cmd); + if (ret) + return ret; + + t4_handle_get_port_info(pi, (__be64 *)&port_cmd); + return 0; +} + +/** * t4_handle_fw_rpl - process a FW reply message * @adap: the adapter * @rpl: start of the FW message @@ -7643,10 +7840,9 @@ int t4_shutdown_adapter(struct adapter *adapter) t4_intr_disable(adapter); t4_write_reg(adapter, DBG_GPIO_EN_A, 0); for_each_port(adapter, port) { - u32 a_port_cfg = PORT_REG(port, - is_t4(adapter->params.chip) - ? XGMAC_PORT_CFG_A - : MAC_PORT_CFG_A); + u32 a_port_cfg = is_t4(adapter->params.chip) ? + PORT_REG(port, XGMAC_PORT_CFG_A) : + T5_PORT_REG(port, MAC_PORT_CFG_A); t4_write_reg(adapter, a_port_cfg, t4_read_reg(adapter, a_port_cfg) @@ -8272,7 +8468,16 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]); if (ret) break; - idx = (idx + 1) & UPDBGLARDPTR_M; + + /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to + * identify the 32-bit portion of the full 312-bit data + */ + if (is_t6(adap->params.chip) && (idx & 0xf) >= 9) + idx = (idx & 0xff0) + 0x10; + else + idx++; + /* address can't exceed 0xfff */ + idx &= UPDBGLARDPTR_M; } restart: if (cfg & UPDBGLAEN_F) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 8098c93cd16e..b0ff78da8aa2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -92,6 +92,7 @@ enum { CPL_RDMA_TERMINATE = 0xA2, CPL_RDMA_WRITE = 0xA4, CPL_SGE_EGR_UPDATE = 0xA5, + CPL_RX_MPS_PKT = 0xAF, CPL_TRACE_PKT = 0xB0, CPL_ISCSI_DATA = 0xB2, @@ -807,6 +808,10 @@ struct cpl_tx_pkt { #define TXPKT_INS_OVLAN_V(x) ((x) << TXPKT_INS_OVLAN_S) #define TXPKT_INS_OVLAN_F TXPKT_INS_OVLAN_V(1U) +#define TXPKT_TSTAMP_S 23 +#define TXPKT_TSTAMP_V(x) ((x) << TXPKT_TSTAMP_S) +#define TXPKT_TSTAMP_F TXPKT_TSTAMP_V(1ULL) + #define TXPKT_OPCODE_S 24 #define TXPKT_OPCODE_V(x) ((x) << TXPKT_OPCODE_S) @@ -1875,4 +1880,27 @@ struct cpl_rx_phys_dsgl { (((x) >> CPL_RX_PHYS_DSGL_NOOFSGENTR_S) & \ CPL_RX_PHYS_DSGL_NOOFSGENTR_M) +struct cpl_rx_mps_pkt { + __be32 op_to_r1_hi; + __be32 r1_lo_length; +}; + +#define CPL_RX_MPS_PKT_OP_S 24 +#define CPL_RX_MPS_PKT_OP_M 0xff +#define CPL_RX_MPS_PKT_OP_V(x) ((x) << CPL_RX_MPS_PKT_OP_S) +#define CPL_RX_MPS_PKT_OP_G(x) \ + (((x) >> CPL_RX_MPS_PKT_OP_S) & CPL_RX_MPS_PKT_OP_M) + +#define CPL_RX_MPS_PKT_TYPE_S 20 +#define CPL_RX_MPS_PKT_TYPE_M 0xf +#define CPL_RX_MPS_PKT_TYPE_V(x) ((x) << CPL_RX_MPS_PKT_TYPE_S) +#define CPL_RX_MPS_PKT_TYPE_G(x) \ + (((x) >> CPL_RX_MPS_PKT_TYPE_S) & CPL_RX_MPS_PKT_TYPE_M) + +enum { + X_CPL_RX_MPS_PKT_TYPE_PAUSE = 1 << 0, + X_CPL_RX_MPS_PKT_TYPE_PPP = 1 << 1, + X_CPL_RX_MPS_PKT_TYPE_QFC = 1 << 2, + X_CPL_RX_MPS_PKT_TYPE_PTP = 1 << 3 +}; #endif /* __T4_MSG_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index a323185507ec..99987d8e437e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -172,6 +172,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x509e), /* Custom T520-CR */ CH_PCI_ID_TABLE_FENTRY(0x509f), /* Custom T540-CR */ CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50a1), /* Custom T540-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50a2), /* Custom T540-KR4 */ /* T6 adapters: */ @@ -190,6 +192,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x6015), CH_PCI_ID_TABLE_FENTRY(0x6080), CH_PCI_ID_TABLE_FENTRY(0x6081), + CH_PCI_ID_TABLE_FENTRY(0x6082), /* Custom T6225-CR SFP28 */ + CH_PCI_ID_TABLE_FENTRY(0x6083), /* Custom T62100-CR QSFP28 */ + CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR QSFP28 */ CH_PCI_DEVICE_ID_TABLE_DEFINE_END; #endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 3348d33c36fa..dac90837842b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1077,6 +1077,10 @@ #define TIEQINPARERRINT_V(x) ((x) << TIEQINPARERRINT_S) #define TIEQINPARERRINT_F TIEQINPARERRINT_V(1U) +#define TIMER0INT_S 2 +#define TIMER0INT_V(x) ((x) << TIMER0INT_S) +#define TIMER0INT_F TIMER0INT_V(1U) + #define PREFDROPINT_S 1 #define PREFDROPINT_V(x) ((x) << PREFDROPINT_S) #define PREFDROPINT_F PREFDROPINT_V(1U) @@ -1795,6 +1799,8 @@ #define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614 #define MAC_PORT_MAGIC_MACID_LO 0x824 #define MAC_PORT_MAGIC_MACID_HI 0x828 +#define MAC_PORT_TX_TS_VAL_LO 0x928 +#define MAC_PORT_TX_TS_VAL_HI 0x92c #define MAC_PORT_EPIO_DATA0_A 0x8c0 #define MAC_PORT_EPIO_DATA1_A 0x8c4 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 251a35e9795c..0ebed64d62d3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -103,6 +103,7 @@ enum fw_wr_opcodes { FW_RI_FR_NSMR_TPTE_WR = 0x20, FW_RI_INV_LSTAG_WR = 0x1a, FW_ISCSI_TX_DATA_WR = 0x45, + FW_PTP_TX_PKT_WR = 0x46, FW_CRYPTO_LOOKASIDE_WR = 0X6d, FW_LASTC2E_WR = 0x70 }; @@ -685,6 +686,7 @@ enum fw_cmd_opcodes { FW_SCHED_CMD = 0x24, FW_DEVLOG_CMD = 0x25, FW_CLIP_CMD = 0x28, + FW_PTP_CMD = 0x3e, FW_LASTC2E_CMD = 0x40, FW_ERROR_CMD = 0x80, FW_DEBUG_CMD = 0x81, @@ -1123,6 +1125,7 @@ enum fw_params_param_dev { FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17, FW_PARAMS_PARAM_DEV_FWCACHE = 0x18, FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C, + FW_PARAMS_PARAM_DEV_MPSBGMAP = 0x1E, }; /* @@ -2572,6 +2575,7 @@ enum fw_port_type { FW_PORT_TYPE_CR_QSFP, FW_PORT_TYPE_CR2_QSFP, FW_PORT_TYPE_SFP28, + FW_PORT_TYPE_KR_SFP28, FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M }; @@ -2800,6 +2804,54 @@ struct fw_port_lb_stats_cmd { } u; }; +enum fw_ptp_subop { + /* none */ + FW_PTP_SC_INIT_TIMER = 0x00, + FW_PTP_SC_TX_TYPE = 0x01, + /* init */ + FW_PTP_SC_RXTIME_STAMP = 0x08, + FW_PTP_SC_RDRX_TYPE = 0x09, + /* ts */ + FW_PTP_SC_ADJ_FREQ = 0x10, + FW_PTP_SC_ADJ_TIME = 0x11, + FW_PTP_SC_ADJ_FTIME = 0x12, + FW_PTP_SC_WALL_CLOCK = 0x13, + FW_PTP_SC_GET_TIME = 0x14, + FW_PTP_SC_SET_TIME = 0x15, +}; + +struct fw_ptp_cmd { + __be32 op_to_portid; + __be32 retval_len16; + union fw_ptp { + struct fw_ptp_sc { + __u8 sc; + __u8 r3[7]; + } scmd; + struct fw_ptp_init { + __u8 sc; + __u8 txchan; + __be16 absid; + __be16 mode; + __be16 r3; + } init; + struct fw_ptp_ts { + __u8 sc; + __u8 sign; + __be16 r3; + __be32 ppb; + __be64 tm; + } ts; + } u; + __be64 r3; +}; + +#define FW_PTP_CMD_PORTID_S 0 +#define FW_PTP_CMD_PORTID_M 0xf +#define FW_PTP_CMD_PORTID_V(x) ((x) << FW_PTP_CMD_PORTID_S) +#define FW_PTP_CMD_PORTID_G(x) \ + (((x) >> FW_PTP_CMD_PORTID_S) & FW_PTP_CMD_PORTID_M) + struct fw_rss_ind_tbl_cmd { __be32 op_to_viid; __be32 retval_len16; @@ -3087,6 +3139,10 @@ struct fw_debug_cmd { #define FW_DEBUG_CMD_TYPE_G(x) \ (((x) >> FW_DEBUG_CMD_TYPE_S) & FW_DEBUG_CMD_TYPE_M) +enum pcie_fw_eval { + PCIE_FW_EVAL_CRASH = 0, +}; + #define PCIE_FW_ERR_S 31 #define PCIE_FW_ERR_V(x) ((x) << PCIE_FW_ERR_S) #define PCIE_FW_ERR_F PCIE_FW_ERR_V(1U) diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h index 515b94ff9080..4b5aacc09cab 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h @@ -90,7 +90,7 @@ cxgb_mk_tid_release(struct sk_buff *skb, u32 len, u32 tid, u16 chan) { struct cpl_tid_release *req; - req = (struct cpl_tid_release *)__skb_put(skb, len); + req = __skb_put(skb, len); memset(req, 0, len); INIT_TP_WR(req, tid); @@ -104,7 +104,7 @@ cxgb_mk_close_con_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan, { struct cpl_close_con_req *req; - req = (struct cpl_close_con_req *)__skb_put(skb, len); + req = __skb_put(skb, len); memset(req, 0, len); INIT_TP_WR(req, tid); @@ -119,7 +119,7 @@ cxgb_mk_abort_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan, { struct cpl_abort_req *req; - req = (struct cpl_abort_req *)__skb_put(skb, len); + req = __skb_put(skb, len); memset(req, 0, len); INIT_TP_WR(req, tid); @@ -134,7 +134,7 @@ cxgb_mk_abort_rpl(struct sk_buff *skb, u32 len, u32 tid, u16 chan) { struct cpl_abort_rpl *rpl; - rpl = (struct cpl_abort_rpl *)__skb_put(skb, len); + rpl = __skb_put(skb, len); memset(rpl, 0, len); INIT_TP_WR(rpl, tid); @@ -149,7 +149,7 @@ cxgb_mk_rx_data_ack(struct sk_buff *skb, u32 len, u32 tid, u16 chan, { struct cpl_rx_data_ack *req; - req = (struct cpl_rx_data_ack *)__skb_put(skb, len); + req = __skb_put(skb, len); memset(req, 0, len); INIT_TP_WR(req, tid); |