summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/chelsio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/chelsio')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c26
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c230
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c172
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c152
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c6
12 files changed, 487 insertions, 179 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 45d77334d7d9..07bbb711b7e5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3088,30 +3088,22 @@ static int cxgb_enable_msix(struct adapter *adap)
{
struct msix_entry entries[SGE_QSETS + 1];
int vectors;
- int i, err;
+ int i;
vectors = ARRAY_SIZE(entries);
for (i = 0; i < vectors; ++i)
entries[i].entry = i;
- while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
- vectors = err;
-
- if (err < 0)
- pci_disable_msix(adap->pdev);
-
- if (!err && vectors < (adap->params.nports + 1)) {
- pci_disable_msix(adap->pdev);
- err = -1;
- }
+ vectors = pci_enable_msix_range(adap->pdev, entries,
+ adap->params.nports + 1, vectors);
+ if (vectors < 0)
+ return vectors;
- if (!err) {
- for (i = 0; i < vectors; ++i)
- adap->msix_info[i].vec = entries[i].vector;
- adap->msix_nvectors = vectors;
- }
+ for (i = 0; i < vectors; ++i)
+ adap->msix_info[i].vec = entries[i].vector;
+ adap->msix_nvectors = vectors;
- return err;
+ return 0;
}
static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 632b318eb38a..8b069f96e920 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -298,7 +298,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
if (need_unmap)
unmap_skb(d->skb, q, cidx, pdev);
if (d->eop) {
- kfree_skb(d->skb);
+ dev_consume_skb_any(d->skb);
d->skb = NULL;
}
}
@@ -1188,7 +1188,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
V_WR_TID(q->token));
wr_gen2(d, gen);
- kfree_skb(skb);
+ dev_consume_skb_any(skb);
return;
}
@@ -1233,7 +1233,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* anything shorter than an Ethernet header.
*/
if (unlikely(skb->len < ETH_HLEN)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 1f4b9b30b9ed..32db37709263 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -66,6 +66,7 @@ enum {
SERNUM_LEN = 24, /* Serial # length */
EC_LEN = 16, /* E/C length */
ID_LEN = 16, /* ID length */
+ PN_LEN = 16, /* Part Number length */
};
enum {
@@ -254,6 +255,7 @@ struct vpd_params {
u8 ec[EC_LEN + 1];
u8 sn[SERNUM_LEN + 1];
u8 id[ID_LEN + 1];
+ u8 pn[PN_LEN + 1];
};
struct pci_params {
@@ -306,6 +308,7 @@ struct adapter_params {
unsigned char bypass;
unsigned int ofldq_wr_cred;
+ bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
};
#include "t4fw_api.h"
@@ -497,6 +500,7 @@ struct sge_txq {
spinlock_t db_lock;
int db_disabled;
unsigned short db_pidx;
+ unsigned short db_pidx_inc;
u64 udb;
};
@@ -553,8 +557,13 @@ struct sge {
u32 pktshift; /* padding between CPL & packet data */
u32 fl_align; /* response queue message alignment */
u32 fl_starve_thres; /* Free List starvation threshold */
- unsigned int starve_thres;
- u8 idma_state[2];
+
+ /* State variables for detecting an SGE Ingress DMA hang */
+ unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */
+ unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */
+ unsigned int idma_state[2]; /* SGE IDMA Hang detect state */
+ unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
+
unsigned int egr_start;
unsigned int ingr_start;
void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
@@ -957,7 +966,7 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
-
+const char *t4_get_port_type_description(enum fw_port_type port_type);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
@@ -1029,4 +1038,5 @@ void t4_db_dropped(struct adapter *adapter);
int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len);
int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
u32 addr, u32 val);
+void t4_sge_decode_idma_state(struct adapter *adapter, int state);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 34e2488767d9..6fe58913403a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -254,6 +254,14 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0x5011, 4),
CH_DEVICE(0x5012, 4),
CH_DEVICE(0x5013, 4),
+ CH_DEVICE(0x5014, 4),
+ CH_DEVICE(0x5015, 4),
+ CH_DEVICE(0x5080, 4),
+ CH_DEVICE(0x5081, 4),
+ CH_DEVICE(0x5082, 4),
+ CH_DEVICE(0x5083, 4),
+ CH_DEVICE(0x5084, 4),
+ CH_DEVICE(0x5085, 4),
CH_DEVICE(0x5401, 4),
CH_DEVICE(0x5402, 4),
CH_DEVICE(0x5403, 4),
@@ -273,6 +281,14 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0x5411, 4),
CH_DEVICE(0x5412, 4),
CH_DEVICE(0x5413, 4),
+ CH_DEVICE(0x5414, 4),
+ CH_DEVICE(0x5415, 4),
+ CH_DEVICE(0x5480, 4),
+ CH_DEVICE(0x5481, 4),
+ CH_DEVICE(0x5482, 4),
+ CH_DEVICE(0x5483, 4),
+ CH_DEVICE(0x5484, 4),
+ CH_DEVICE(0x5485, 4),
{ 0, }
};
@@ -423,15 +439,18 @@ static void link_report(struct net_device *dev)
const struct port_info *p = netdev_priv(dev);
switch (p->link_cfg.speed) {
- case SPEED_10000:
+ case 10000:
s = "10Gbps";
break;
- case SPEED_1000:
+ case 1000:
s = "1000Mbps";
break;
- case SPEED_100:
+ case 100:
s = "100Mbps";
break;
+ case 40000:
+ s = "40Gbps";
+ break;
}
netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
@@ -2061,7 +2080,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0x40200, 0x40298,
0x402ac, 0x4033c,
0x403f8, 0x403fc,
- 0x41300, 0x413c4,
+ 0x41304, 0x413c4,
0x41400, 0x4141c,
0x41480, 0x414d0,
0x44000, 0x44078,
@@ -2089,7 +2108,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0x48200, 0x48298,
0x482ac, 0x4833c,
0x483f8, 0x483fc,
- 0x49300, 0x493c4,
+ 0x49304, 0x493c4,
0x49400, 0x4941c,
0x49480, 0x494d0,
0x4c000, 0x4c078,
@@ -2199,6 +2218,8 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
else if (type == FW_PORT_TYPE_FIBER_XFI ||
type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
v |= SUPPORTED_FIBRE;
+ else if (type == FW_PORT_TYPE_BP40_BA)
+ v |= SUPPORTED_40000baseSR4_Full;
if (caps & FW_PORT_CAP_ANEG)
v |= SUPPORTED_Autoneg;
@@ -2215,6 +2236,8 @@ static unsigned int to_fw_linkcaps(unsigned int caps)
v |= FW_PORT_CAP_SPEED_1G;
if (caps & ADVERTISED_10000baseT_Full)
v |= FW_PORT_CAP_SPEED_10G;
+ if (caps & ADVERTISED_40000baseSR4_Full)
+ v |= FW_PORT_CAP_SPEED_40G;
return v;
}
@@ -2263,12 +2286,14 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static unsigned int speed_to_caps(int speed)
{
- if (speed == SPEED_100)
+ if (speed == 100)
return FW_PORT_CAP_SPEED_100M;
- if (speed == SPEED_1000)
+ if (speed == 1000)
return FW_PORT_CAP_SPEED_1G;
- if (speed == SPEED_10000)
+ if (speed == 10000)
return FW_PORT_CAP_SPEED_10G;
+ if (speed == 40000)
+ return FW_PORT_CAP_SPEED_40G;
return 0;
}
@@ -2296,8 +2321,10 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (cmd->autoneg == AUTONEG_DISABLE) {
cap = speed_to_caps(speed);
- if (!(lc->supported & cap) || (speed == SPEED_1000) ||
- (speed == SPEED_10000))
+ if (!(lc->supported & cap) ||
+ (speed == 1000) ||
+ (speed == 10000) ||
+ (speed == 40000))
return -EINVAL;
lc->requested_speed = cap;
lc->advertising = 0;
@@ -3205,8 +3232,8 @@ static int cxgb4_clip_get(const struct net_device *dev,
c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
FW_CMD_REQUEST | FW_CMD_WRITE);
c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
- *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
- *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+ c.ip_hi = *(__be64 *)(lip->s6_addr);
+ c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
}
@@ -3221,8 +3248,8 @@ static int cxgb4_clip_release(const struct net_device *dev,
c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
- *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
- *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+ c.ip_hi = *(__be64 *)(lip->s6_addr);
+ c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
}
@@ -3563,14 +3590,25 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
static void disable_txq_db(struct sge_txq *q)
{
- spin_lock_irq(&q->db_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->db_lock, flags);
q->db_disabled = 1;
- spin_unlock_irq(&q->db_lock);
+ spin_unlock_irqrestore(&q->db_lock, flags);
}
-static void enable_txq_db(struct sge_txq *q)
+static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
{
spin_lock_irq(&q->db_lock);
+ if (q->db_pidx_inc) {
+ /* Make sure that all writes to the TX descriptors
+ * are committed before we tell HW about them.
+ */
+ wmb();
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+ QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
+ q->db_pidx_inc = 0;
+ }
q->db_disabled = 0;
spin_unlock_irq(&q->db_lock);
}
@@ -3592,11 +3630,32 @@ static void enable_dbs(struct adapter *adap)
int i;
for_each_ethrxq(&adap->sge, i)
- enable_txq_db(&adap->sge.ethtxq[i].q);
+ enable_txq_db(adap, &adap->sge.ethtxq[i].q);
for_each_ofldrxq(&adap->sge, i)
- enable_txq_db(&adap->sge.ofldtxq[i].q);
+ enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
- enable_txq_db(&adap->sge.ctrlq[i].q);
+ enable_txq_db(adap, &adap->sge.ctrlq[i].q);
+}
+
+static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
+{
+ if (adap->uld_handle[CXGB4_ULD_RDMA])
+ ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
+ cmd);
+}
+
+static void process_db_full(struct work_struct *work)
+{
+ struct adapter *adap;
+
+ adap = container_of(work, struct adapter, db_full_task);
+
+ drain_db_fifo(adap, dbfifo_drain_delay);
+ enable_dbs(adap);
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
+ t4_set_reg_field(adap, SGE_INT_ENABLE3,
+ DBFIFO_HP_INT | DBFIFO_LP_INT,
+ DBFIFO_HP_INT | DBFIFO_LP_INT);
}
static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
@@ -3604,7 +3663,7 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
u16 hw_pidx, hw_cidx;
int ret;
- spin_lock_bh(&q->db_lock);
+ spin_lock_irq(&q->db_lock);
ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
if (ret)
goto out;
@@ -3621,7 +3680,8 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
}
out:
q->db_disabled = 0;
- spin_unlock_bh(&q->db_lock);
+ q->db_pidx_inc = 0;
+ spin_unlock_irq(&q->db_lock);
if (ret)
CH_WARN(adap, "DB drop recovery failed.\n");
}
@@ -3637,29 +3697,6 @@ static void recover_all_queues(struct adapter *adap)
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
}
-static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
-{
- mutex_lock(&uld_mutex);
- if (adap->uld_handle[CXGB4_ULD_RDMA])
- ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
- cmd);
- mutex_unlock(&uld_mutex);
-}
-
-static void process_db_full(struct work_struct *work)
-{
- struct adapter *adap;
-
- adap = container_of(work, struct adapter, db_full_task);
-
- notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
- drain_db_fifo(adap, dbfifo_drain_delay);
- t4_set_reg_field(adap, SGE_INT_ENABLE3,
- DBFIFO_HP_INT | DBFIFO_LP_INT,
- DBFIFO_HP_INT | DBFIFO_LP_INT);
- notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
-}
-
static void process_db_drop(struct work_struct *work)
{
struct adapter *adap;
@@ -3667,11 +3704,13 @@ static void process_db_drop(struct work_struct *work)
adap = container_of(work, struct adapter, db_drop_task);
if (is_t4(adap->params.chip)) {
- disable_dbs(adap);
+ drain_db_fifo(adap, dbfifo_drain_delay);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
- drain_db_fifo(adap, 1);
+ drain_db_fifo(adap, dbfifo_drain_delay);
recover_all_queues(adap);
+ drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap);
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
} else {
u32 dropped_db = t4_read_reg(adap, 0x010ac);
u16 qid = (dropped_db >> 15) & 0x1ffff;
@@ -3712,6 +3751,8 @@ static void process_db_drop(struct work_struct *work)
void t4_db_full(struct adapter *adap)
{
if (is_t4(adap->params.chip)) {
+ disable_dbs(adap);
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
t4_set_reg_field(adap, SGE_INT_ENABLE3,
DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
queue_work(workq, &adap->db_full_task);
@@ -3720,8 +3761,11 @@ void t4_db_full(struct adapter *adap)
void t4_db_dropped(struct adapter *adap)
{
- if (is_t4(adap->params.chip))
- queue_work(workq, &adap->db_drop_task);
+ if (is_t4(adap->params.chip)) {
+ disable_dbs(adap);
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
+ }
+ queue_work(workq, &adap->db_drop_task);
}
static void uld_attach(struct adapter *adap, unsigned int uld)
@@ -3765,6 +3809,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.dbfifo_int_thresh = dbfifo_int_thresh;
lli.sge_pktshift = adap->sge.pktshift;
lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
+ lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
handle = ulds[uld].add(&lli);
if (IS_ERR(handle)) {
@@ -5370,6 +5415,21 @@ static int adap_init0(struct adapter *adap)
(void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
/*
+ * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
+ * capability. Earlier versions of the firmware didn't have the
+ * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
+ * permission to use ULPTX MEMWRITE DSGL.
+ */
+ if (is_t4(adap->params.chip)) {
+ adap->params.ulptx_memwrite_dsgl = false;
+ } else {
+ params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
+ 1, params, val);
+ adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
+ }
+
+ /*
* Get device capabilities so we can determine what resources we need
* to manage.
*/
@@ -5603,9 +5663,10 @@ static const struct pci_error_handlers cxgb4_eeh = {
.resume = eeh_resume,
};
-static inline bool is_10g_port(const struct link_config *lc)
+static inline bool is_x_10g_port(const struct link_config *lc)
{
- return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
+ return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
+ (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
}
static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
@@ -5629,7 +5690,7 @@ static void cfg_queues(struct adapter *adap)
int i, q10g = 0, n10g = 0, qidx = 0;
for_each_port(adap, i)
- n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
+ n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
/*
* We default to 1 queue per non-10G port and up to # of cores queues
@@ -5644,7 +5705,7 @@ static void cfg_queues(struct adapter *adap)
struct port_info *pi = adap2pinfo(adap, i);
pi->first_qset = qidx;
- pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
+ pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
qidx += pi->nqsets;
}
@@ -5737,7 +5798,7 @@ static void reduce_ethqs(struct adapter *adap, int n)
static int enable_msix(struct adapter *adap)
{
int ofld_need = 0;
- int i, err, want, need;
+ int i, want, need;
struct sge *s = &adap->sge;
unsigned int nchan = adap->params.nports;
struct msix_entry entries[MAX_INGQ + 1];
@@ -5753,32 +5814,30 @@ static int enable_msix(struct adapter *adap)
}
need = adap->params.nports + EXTRA_VECS + ofld_need;
- while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
- want = err;
+ want = pci_enable_msix_range(adap->pdev, entries, need, want);
+ if (want < 0)
+ return want;
- if (!err) {
- /*
- * Distribute available vectors to the various queue groups.
- * Every group gets its minimum requirement and NIC gets top
- * priority for leftovers.
- */
- i = want - EXTRA_VECS - ofld_need;
- if (i < s->max_ethqsets) {
- s->max_ethqsets = i;
- if (i < s->ethqsets)
- reduce_ethqs(adap, i);
- }
- if (is_offload(adap)) {
- i = want - EXTRA_VECS - s->max_ethqsets;
- i -= ofld_need - nchan;
- s->ofldqsets = (i / nchan) * nchan; /* round down */
- }
- for (i = 0; i < want; ++i)
- adap->msix_info[i].vec = entries[i].vector;
- } else if (err > 0)
- dev_info(adap->pdev_dev,
- "only %d MSI-X vectors left, not using MSI-X\n", err);
- return err;
+ /*
+ * Distribute available vectors to the various queue groups.
+ * Every group gets its minimum requirement and NIC gets top
+ * priority for leftovers.
+ */
+ i = want - EXTRA_VECS - ofld_need;
+ if (i < s->max_ethqsets) {
+ s->max_ethqsets = i;
+ if (i < s->ethqsets)
+ reduce_ethqs(adap, i);
+ }
+ if (is_offload(adap)) {
+ i = want - EXTRA_VECS - s->max_ethqsets;
+ i -= ofld_need - nchan;
+ s->ofldqsets = (i / nchan) * nchan; /* round down */
+ }
+ for (i = 0; i < want; ++i)
+ adap->msix_info[i].vec = entries[i].vector;
+
+ return 0;
}
#undef EXTRA_VECS
@@ -5801,11 +5860,6 @@ static int init_rss(struct adapter *adap)
static void print_port_info(const struct net_device *dev)
{
- static const char *base[] = {
- "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
- "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
- };
-
char buf[80];
char *bufp = buf;
const char *spd = "";
@@ -5823,9 +5877,11 @@ static void print_port_info(const struct net_device *dev)
bufp += sprintf(bufp, "1000/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
bufp += sprintf(bufp, "10G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
+ bufp += sprintf(bufp, "40G/");
if (bufp != buf)
--bufp;
- sprintf(bufp, "BASE-%s", base[pi->port_type]);
+ sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
adap->params.vpd.id,
@@ -5833,8 +5889,8 @@ static void print_port_info(const struct net_device *dev)
is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
- netdev_info(dev, "S/N: %s, E/C: %s\n",
- adap->params.vpd.sn, adap->params.vpd.ec);
+ netdev_info(dev, "S/N: %s, P/N: %s\n",
+ adap->params.vpd.sn, adap->params.vpd.pn);
}
static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 4dd0a82533e4..e274a047528f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -253,6 +253,7 @@ struct cxgb4_lld_info {
/* packet data */
bool enable_fw_ofld_conn; /* Enable connection through fw */
/* WR */
+ bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
};
struct cxgb4_uld_info {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 47ffa64fcf19..ca95cf2954eb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -93,6 +93,16 @@
*/
#define TX_QCHECK_PERIOD (HZ / 2)
+/* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate
+ * (in RX_QCHECK_PERIOD multiples). If we find one of the SGE Ingress DMA
+ * State Machines in the same state for this amount of time (in HZ) then we'll
+ * issue a warning about a potential hang. We'll repeat the warning as the
+ * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till
+ * the situation clears. If the situation clears, we'll note that as well.
+ */
+#define SGE_IDMA_WARN_THRESH (1 * HZ)
+#define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD)
+
/*
* Max number of Tx descriptors to be reclaimed by the Tx timer.
*/
@@ -373,7 +383,7 @@ static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
if (d->skb) { /* an SGL is present */
if (unmap)
unmap_sgl(dev, d->skb, d->sgl, q);
- kfree_skb(d->skb);
+ dev_consume_skb_any(d->skb);
d->skb = NULL;
}
++d;
@@ -706,11 +716,17 @@ static inline unsigned int flits_to_desc(unsigned int n)
* @skb: the packet
*
* Returns whether an Ethernet packet is small enough to fit as
- * immediate data.
+ * immediate data. Return value corresponds to headroom required.
*/
static inline int is_eth_imm(const struct sk_buff *skb)
{
- return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
+ int hdrlen = skb_shinfo(skb)->gso_size ?
+ sizeof(struct cpl_tx_pkt_lso_core) : 0;
+
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+ return hdrlen;
+ return 0;
}
/**
@@ -723,9 +739,10 @@ static inline int is_eth_imm(const struct sk_buff *skb)
static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
{
unsigned int flits;
+ int hdrlen = is_eth_imm(skb);
- if (is_eth_imm(skb))
- return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
+ if (hdrlen)
+ return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
if (skb_shinfo(skb)->gso_size)
@@ -843,9 +860,10 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
unsigned int *wr, index;
+ unsigned long flags;
wmb(); /* write descriptors before telling HW */
- spin_lock(&q->db_lock);
+ spin_lock_irqsave(&q->db_lock, flags);
if (!q->db_disabled) {
if (is_t4(adap->params.chip)) {
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
@@ -861,9 +879,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
writel(n, adap->bar2 + q->udb + 8);
wmb();
}
- }
+ } else
+ q->db_pidx_inc += n;
q->db_pidx = q->pidx;
- spin_unlock(&q->db_lock);
+ spin_unlock_irqrestore(&q->db_lock, flags);
}
/**
@@ -971,6 +990,7 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
*/
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ int len;
u32 wr_mid;
u64 cntrl, *end;
int qidx, credits;
@@ -982,13 +1002,14 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
struct cpl_tx_pkt_core *cpl;
const struct skb_shared_info *ssi;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ bool immediate = false;
/*
* The chip min packet length is 10 octets but play safe and reject
* anything shorter than an Ethernet header.
*/
if (unlikely(skb->len < ETH_HLEN)) {
-out_free: dev_kfree_skb(skb);
+out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1011,7 +1032,10 @@ out_free: dev_kfree_skb(skb);
return NETDEV_TX_BUSY;
}
- if (!is_eth_imm(skb) &&
+ if (is_eth_imm(skb))
+ immediate = true;
+
+ if (!immediate &&
unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
goto out_free;
@@ -1028,6 +1052,7 @@ out_free: dev_kfree_skb(skb);
wr->r3 = cpu_to_be64(0);
end = (u64 *)wr + flits;
+ len = immediate ? skb->len : 0;
ssi = skb_shinfo(skb);
if (ssi->gso_size) {
struct cpl_tx_pkt_lso *lso = (void *)wr;
@@ -1035,8 +1060,9 @@ out_free: dev_kfree_skb(skb);
int l3hdr_len = skb_network_header_len(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ len += sizeof(*lso);
wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
- FW_WR_IMMDLEN(sizeof(*lso)));
+ FW_WR_IMMDLEN(len));
lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
LSO_FIRST_SLICE | LSO_LAST_SLICE |
LSO_IPV6(v6) |
@@ -1054,9 +1080,7 @@ out_free: dev_kfree_skb(skb);
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
- int len;
-
- len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
+ len += sizeof(*cpl);
wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN(len));
cpl = (void *)(wr + 1);
@@ -1078,9 +1102,9 @@ out_free: dev_kfree_skb(skb);
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
- if (is_eth_imm(skb)) {
+ if (immediate) {
inline_tx_skb(skb, &q->q, cpl + 1);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
} else {
int last_desc;
@@ -1467,8 +1491,12 @@ static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
{
unsigned int idx = skb_txq(skb);
- if (unlikely(is_ctrl_pkt(skb)))
+ if (unlikely(is_ctrl_pkt(skb))) {
+ /* Single ctrl queue is a requirement for LE workaround path */
+ if (adap->tids.nsftids)
+ idx = 0;
return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
+ }
return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
}
@@ -1992,7 +2020,7 @@ irq_handler_t t4_intr_handler(struct adapter *adap)
static void sge_rx_timer_cb(unsigned long data)
{
unsigned long m;
- unsigned int i, cnt[2];
+ unsigned int i, idma_same_state_cnt[2];
struct adapter *adap = (struct adapter *)data;
struct sge *s = &adap->sge;
@@ -2015,21 +2043,64 @@ static void sge_rx_timer_cb(unsigned long data)
}
t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
- cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
- cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
-
- for (i = 0; i < 2; i++)
- if (cnt[i] >= s->starve_thres) {
- if (s->idma_state[i] || cnt[i] == 0xffffffff)
- continue;
- s->idma_state[i] = 1;
- t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
- m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
- dev_warn(adap->pdev_dev,
- "SGE idma%u starvation detected for "
- "queue %lu\n", i, m & 0xffff);
- } else if (s->idma_state[i])
- s->idma_state[i] = 0;
+ idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
+ idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+
+ for (i = 0; i < 2; i++) {
+ u32 debug0, debug11;
+
+ /* If the Ingress DMA Same State Counter ("timer") is less
+ * than 1s, then we can reset our synthesized Stall Timer and
+ * continue. If we have previously emitted warnings about a
+ * potential stalled Ingress Queue, issue a note indicating
+ * that the Ingress Queue has resumed forward progress.
+ */
+ if (idma_same_state_cnt[i] < s->idma_1s_thresh) {
+ if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH)
+ CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n",
+ i, s->idma_qid[i],
+ s->idma_stalled[i]/HZ);
+ s->idma_stalled[i] = 0;
+ continue;
+ }
+
+ /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
+ * domain. The first time we get here it'll be because we
+ * passed the 1s Threshold; each additional time it'll be
+ * because the RX Timer Callback is being fired on its regular
+ * schedule.
+ *
+ * If the stall is below our Potential Hung Ingress Queue
+ * Warning Threshold, continue.
+ */
+ if (s->idma_stalled[i] == 0)
+ s->idma_stalled[i] = HZ;
+ else
+ s->idma_stalled[i] += RX_QCHECK_PERIOD;
+
+ if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH)
+ continue;
+
+ /* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */
+ if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0)
+ continue;
+
+ /* Read and save the SGE IDMA State and Queue ID information.
+ * We do this every time in case it changes across time ...
+ */
+ t4_write_reg(adap, SGE_DEBUG_INDEX, 0);
+ debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+ s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
+
+ t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
+ debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+ s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
+
+ CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
+ i, s->idma_qid[i], s->idma_state[i],
+ s->idma_stalled[i]/HZ, debug0, debug11);
+ t4_sge_decode_idma_state(adap, s->idma_state[i]);
+ }
mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
}
@@ -2580,11 +2651,19 @@ static int t4_sge_init_soft(struct adapter *adap)
fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
+ /* We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
#undef READ_FL_BUF
+ /* The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
if (fl_small_pg != PAGE_SIZE ||
- (fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
- (fl_large_pg & (fl_large_pg-1)) != 0))) {
+ (fl_large_pg & (fl_large_pg-1)) != 0) {
dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
fl_small_pg, fl_large_pg);
return -EINVAL;
@@ -2699,8 +2778,8 @@ static int t4_sge_init_hard(struct adapter *adap)
int t4_sge_init(struct adapter *adap)
{
struct sge *s = &adap->sge;
- u32 sge_control;
- int ret;
+ u32 sge_control, sge_conm_ctrl;
+ int ret, egress_threshold;
/*
* Ingress Padding Boundary and Egress Status Page Size are set up by
@@ -2725,15 +2804,24 @@ int t4_sge_init(struct adapter *adap)
* SGE's Egress Congestion Threshold. If it isn't, then we can get
* stuck waiting for new packets while the SGE is waiting for us to
* give it more Free List entries. (Note that the SGE's Egress
- * Congestion Threshold is in units of 2 Free List pointers.)
+ * Congestion Threshold is in units of 2 Free List pointers.) For T4,
+ * there was only a single field to control this. For T5 there's the
+ * original field which now only applies to Unpacked Mode Free List
+ * buffers and a new field which only applies to Packed Mode Free List
+ * buffers.
*/
- s->fl_starve_thres
- = EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1;
+ sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL);
+ if (is_t4(adap->params.chip))
+ egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl);
+ else
+ egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl);
+ s->fl_starve_thres = 2*egress_threshold + 1;
setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
- s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */
- s->idma_state[0] = s->idma_state[1] = 0;
+ s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000; /* 1 s */
+ s->idma_stalled[0] = 0;
+ s->idma_stalled[1] = 0;
spin_lock_init(&s->intrq_lock);
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 2c109343d570..fb2fe65903c2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -573,7 +573,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
u32 cclk_param, cclk_val;
int i, ret, addr;
- int ec, sn;
+ int ec, sn, pn;
u8 *vpd, csum;
unsigned int vpdr_len, kw_offset, id_len;
@@ -638,6 +638,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
FIND_VPD_KW(ec, "EC");
FIND_VPD_KW(sn, "SN");
+ FIND_VPD_KW(pn, "PN");
#undef FIND_VPD_KW
memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
@@ -647,6 +648,8 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
strim(p->sn);
+ memcpy(p->pn, vpd + pn, min(i, PN_LEN));
+ strim(p->pn);
/*
* Ask firmware for the Core Clock since it knows how to translate the
@@ -1155,7 +1158,8 @@ out:
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+ FW_PORT_CAP_ANEG)
/**
* t4_link_start - apply link configuration to MAC/PHY
@@ -2247,6 +2251,36 @@ static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
}
/**
+ * t4_get_port_type_description - return Port Type string description
+ * @port_type: firmware Port Type enumeration
+ */
+const char *t4_get_port_type_description(enum fw_port_type port_type)
+{
+ static const char *const port_type_description[] = {
+ "R XFI",
+ "R XAUI",
+ "T SGMII",
+ "T XFI",
+ "T XAUI",
+ "KX4",
+ "CX4",
+ "KX",
+ "KR",
+ "R SFP+",
+ "KR/KX",
+ "KR/KX/KX4",
+ "R QSFP_10G",
+ "",
+ "R QSFP",
+ "R BP40_BA",
+ };
+
+ if (port_type < ARRAY_SIZE(port_type_description))
+ return port_type_description[port_type];
+ return "UNKNOWN";
+}
+
+/**
* t4_get_port_stats - collect port statistics
* @adap: the adapter
* @idx: the port index
@@ -2563,6 +2597,112 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
}
/**
+ * t4_sge_decode_idma_state - decode the idma state
+ * @adap: the adapter
+ * @state: the state idma is stuck in
+ */
+void t4_sge_decode_idma_state(struct adapter *adapter, int state)
+{
+ static const char * const t4_decode[] = {
+ "IDMA_IDLE",
+ "IDMA_PUSH_MORE_CPL_FIFO",
+ "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
+ "Not used",
+ "IDMA_PHYSADDR_SEND_PCIEHDR",
+ "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
+ "IDMA_PHYSADDR_SEND_PAYLOAD",
+ "IDMA_SEND_FIFO_TO_IMSG",
+ "IDMA_FL_REQ_DATA_FL_PREP",
+ "IDMA_FL_REQ_DATA_FL",
+ "IDMA_FL_DROP",
+ "IDMA_FL_H_REQ_HEADER_FL",
+ "IDMA_FL_H_SEND_PCIEHDR",
+ "IDMA_FL_H_PUSH_CPL_FIFO",
+ "IDMA_FL_H_SEND_CPL",
+ "IDMA_FL_H_SEND_IP_HDR_FIRST",
+ "IDMA_FL_H_SEND_IP_HDR",
+ "IDMA_FL_H_REQ_NEXT_HEADER_FL",
+ "IDMA_FL_H_SEND_NEXT_PCIEHDR",
+ "IDMA_FL_H_SEND_IP_HDR_PADDING",
+ "IDMA_FL_D_SEND_PCIEHDR",
+ "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
+ "IDMA_FL_D_REQ_NEXT_DATA_FL",
+ "IDMA_FL_SEND_PCIEHDR",
+ "IDMA_FL_PUSH_CPL_FIFO",
+ "IDMA_FL_SEND_CPL",
+ "IDMA_FL_SEND_PAYLOAD_FIRST",
+ "IDMA_FL_SEND_PAYLOAD",
+ "IDMA_FL_REQ_NEXT_DATA_FL",
+ "IDMA_FL_SEND_NEXT_PCIEHDR",
+ "IDMA_FL_SEND_PADDING",
+ "IDMA_FL_SEND_COMPLETION_TO_IMSG",
+ "IDMA_FL_SEND_FIFO_TO_IMSG",
+ "IDMA_FL_REQ_DATAFL_DONE",
+ "IDMA_FL_REQ_HEADERFL_DONE",
+ };
+ static const char * const t5_decode[] = {
+ "IDMA_IDLE",
+ "IDMA_ALMOST_IDLE",
+ "IDMA_PUSH_MORE_CPL_FIFO",
+ "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
+ "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
+ "IDMA_PHYSADDR_SEND_PCIEHDR",
+ "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
+ "IDMA_PHYSADDR_SEND_PAYLOAD",
+ "IDMA_SEND_FIFO_TO_IMSG",
+ "IDMA_FL_REQ_DATA_FL",
+ "IDMA_FL_DROP",
+ "IDMA_FL_DROP_SEND_INC",
+ "IDMA_FL_H_REQ_HEADER_FL",
+ "IDMA_FL_H_SEND_PCIEHDR",
+ "IDMA_FL_H_PUSH_CPL_FIFO",
+ "IDMA_FL_H_SEND_CPL",
+ "IDMA_FL_H_SEND_IP_HDR_FIRST",
+ "IDMA_FL_H_SEND_IP_HDR",
+ "IDMA_FL_H_REQ_NEXT_HEADER_FL",
+ "IDMA_FL_H_SEND_NEXT_PCIEHDR",
+ "IDMA_FL_H_SEND_IP_HDR_PADDING",
+ "IDMA_FL_D_SEND_PCIEHDR",
+ "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
+ "IDMA_FL_D_REQ_NEXT_DATA_FL",
+ "IDMA_FL_SEND_PCIEHDR",
+ "IDMA_FL_PUSH_CPL_FIFO",
+ "IDMA_FL_SEND_CPL",
+ "IDMA_FL_SEND_PAYLOAD_FIRST",
+ "IDMA_FL_SEND_PAYLOAD",
+ "IDMA_FL_REQ_NEXT_DATA_FL",
+ "IDMA_FL_SEND_NEXT_PCIEHDR",
+ "IDMA_FL_SEND_PADDING",
+ "IDMA_FL_SEND_COMPLETION_TO_IMSG",
+ };
+ static const u32 sge_regs[] = {
+ SGE_DEBUG_DATA_LOW_INDEX_2,
+ SGE_DEBUG_DATA_LOW_INDEX_3,
+ SGE_DEBUG_DATA_HIGH_INDEX_10,
+ };
+ const char **sge_idma_decode;
+ int sge_idma_decode_nstates;
+ int i;
+
+ if (is_t4(adapter->params.chip)) {
+ sge_idma_decode = (const char **)t4_decode;
+ sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
+ } else {
+ sge_idma_decode = (const char **)t5_decode;
+ sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
+ }
+
+ if (state < sge_idma_decode_nstates)
+ CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
+ else
+ CH_WARN(adapter, "idma state %d unknown\n", state);
+
+ for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
+ CH_WARN(adapter, "SGE register %#x value %#x\n",
+ sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
+}
+
+/**
* t4_fw_hello - establish communication with FW
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -3533,11 +3673,13 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
if (stat & FW_PORT_CMD_TXPAUSE)
fc |= PAUSE_TX;
if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
- speed = SPEED_100;
+ speed = 100;
else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
- speed = SPEED_1000;
+ speed = 1000;
else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
- speed = SPEED_10000;
+ speed = 10000;
+ else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+ speed = 40000;
if (link_ok != lc->link_ok || speed != lc->speed ||
fc != lc->fc) { /* something changed */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index cd6874b571ee..f2738c710789 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -116,6 +116,7 @@ enum CPL_error {
CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
CPL_ERR_RTX_NEG_ADVICE = 35,
CPL_ERR_PERSIST_NEG_ADVICE = 36,
+ CPL_ERR_KEEPALV_NEG_ADVICE = 37,
CPL_ERR_ABORT_FAILED = 42,
CPL_ERR_IWARP_FLM = 50,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 4082522d8140..225ad8a5722d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -230,6 +230,12 @@
#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift)
#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
+#define EGRTHRESHOLDPACKING_MASK 0x3fU
+#define EGRTHRESHOLDPACKING_SHIFT 14
+#define EGRTHRESHOLDPACKING(x) ((x) << EGRTHRESHOLDPACKING_SHIFT)
+#define EGRTHRESHOLDPACKING_GET(x) (((x) >> EGRTHRESHOLDPACKING_SHIFT) & \
+ EGRTHRESHOLDPACKING_MASK)
+
#define SGE_DBFIFO_STATUS 0x10a4
#define HP_INT_THRESH_SHIFT 28
#define HP_INT_THRESH_MASK 0xfU
@@ -278,6 +284,9 @@
#define SGE_DEBUG_INDEX 0x10cc
#define SGE_DEBUG_DATA_HIGH 0x10d0
#define SGE_DEBUG_DATA_LOW 0x10d4
+#define SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8
+#define SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc
+#define SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8
#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
#define S_HP_INT_THRESH 28
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 74fea74ce0aa..9cc973fbcf26 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -932,6 +932,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
FW_PARAMS_PARAM_DEV_CF = 0x0D,
+ FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
};
/*
@@ -1742,6 +1743,9 @@ enum fw_port_type {
FW_PORT_TYPE_SFP,
FW_PORT_TYPE_BP_AP,
FW_PORT_TYPE_BP4_AP,
+ FW_PORT_TYPE_QSFP_10G,
+ FW_PORT_TYPE_QSFP,
+ FW_PORT_TYPE_BP40_BA,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 0899c0983594..52859288de7b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2444,7 +2444,7 @@ static void reduce_ethqs(struct adapter *adapter, int n)
*/
static int enable_msix(struct adapter *adapter)
{
- int i, err, want, need;
+ int i, want, need, nqsets;
struct msix_entry entries[MSIX_ENTRIES];
struct sge *s = &adapter->sge;
@@ -2460,26 +2460,23 @@ static int enable_msix(struct adapter *adapter)
*/
want = s->max_ethqsets + MSIX_EXTRAS;
need = adapter->params.nports + MSIX_EXTRAS;
- while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need)
- want = err;
- if (err == 0) {
- int nqsets = want - MSIX_EXTRAS;
- if (nqsets < s->max_ethqsets) {
- dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
- " for %d Queue Sets\n", nqsets);
- s->max_ethqsets = nqsets;
- if (nqsets < s->ethqsets)
- reduce_ethqs(adapter, nqsets);
- }
- for (i = 0; i < want; ++i)
- adapter->msix_info[i].vec = entries[i].vector;
- } else if (err > 0) {
- pci_disable_msix(adapter->pdev);
- dev_info(adapter->pdev_dev, "only %d MSI-X vectors left,"
- " not using MSI-X\n", err);
+ want = pci_enable_msix_range(adapter->pdev, entries, need, want);
+ if (want < 0)
+ return want;
+
+ nqsets = want - MSIX_EXTRAS;
+ if (nqsets < s->max_ethqsets) {
+ dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
+ " for %d Queue Sets\n", nqsets);
+ s->max_ethqsets = nqsets;
+ if (nqsets < s->ethqsets)
+ reduce_ethqs(adapter, nqsets);
}
- return err;
+ for (i = 0; i < want; ++i)
+ adapter->msix_info[i].vec = entries[i].vector;
+
+ return 0;
}
static const struct net_device_ops cxgb4vf_netdev_ops = {
@@ -2947,6 +2944,14 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4vf_pci_tbl) = {
CH_DEVICE(0x5811, 0), /* T520-lp-cr */
CH_DEVICE(0x5812, 0), /* T560-cr */
CH_DEVICE(0x5813, 0), /* T580-cr */
+ CH_DEVICE(0x5814, 0), /* T580-so-cr */
+ CH_DEVICE(0x5815, 0), /* T502-bt */
+ CH_DEVICE(0x5880, 0),
+ CH_DEVICE(0x5881, 0),
+ CH_DEVICE(0x5882, 0),
+ CH_DEVICE(0x5883, 0),
+ CH_DEVICE(0x5884, 0),
+ CH_DEVICE(0x5885, 0),
{ 0, }
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0a89963c48ce..9cfa4b4bb089 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -401,7 +401,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
if (sdesc->skb) {
if (need_unmap)
unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
- kfree_skb(sdesc->skb);
+ dev_consume_skb_any(sdesc->skb);
sdesc->skb = NULL;
}
@@ -1275,7 +1275,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* need it any longer.
*/
inline_tx_skb(skb, &txq->q, cpl + 1);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
} else {
/*
* Write the skb's Scatter/Gather list into the TX Packet CPL
@@ -1354,7 +1354,7 @@ out_free:
* An error of some sort happened. Free the TX skb and tell the
* OS that we've "dealt" with the packet ...
*/
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}