summaryrefslogtreecommitdiff
path: root/drivers/net/cxgb3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r--drivers/net/cxgb3/adapter.h22
-rw-r--r--drivers/net/cxgb3/common.h17
-rw-r--r--drivers/net/cxgb3/cxgb3_ctl_defs.h52
-rw-r--r--drivers/net/cxgb3/cxgb3_defs.h20
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c143
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c79
-rw-r--r--drivers/net/cxgb3/regs.h10
-rw-r--r--drivers/net/cxgb3/sge.c203
-rw-r--r--drivers/net/cxgb3/sge_defs.h4
-rw-r--r--drivers/net/cxgb3/t3_hw.c61
-rw-r--r--drivers/net/cxgb3/version.h2
-rw-r--r--drivers/net/cxgb3/xgmac.c33
12 files changed, 336 insertions, 310 deletions
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 20e887de2545..044261703381 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -49,11 +49,13 @@
typedef irqreturn_t(*intr_handler_t) (int, void *);
struct vlan_group;
-
struct adapter;
+struct sge_qset;
+
struct port_info {
struct adapter *adapter;
struct vlan_group *vlan_grp;
+ struct sge_qset *qs;
const struct port_type_info *port_type;
u8 port_id;
u8 rx_csum_offload;
@@ -173,10 +175,12 @@ enum { /* per port SGE statistics */
};
struct sge_qset { /* an SGE queue set */
+ struct adapter *adap;
+ struct napi_struct napi;
struct sge_rspq rspq;
struct sge_fl fl[SGE_RXQ_PER_SET];
struct sge_txq txq[SGE_TXQ_PER_SET];
- struct net_device *netdev; /* associated net device */
+ struct net_device *netdev;
unsigned long txq_stopped; /* which Tx queues are stopped */
struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
unsigned long port_stats[SGE_PSTAT_MAX];
@@ -221,12 +225,6 @@ struct adapter {
struct delayed_work adap_check_task;
struct work_struct ext_intr_handler_task;
- /*
- * Dummy netdevices are needed when using multiple receive queues with
- * NAPI as each netdevice can service only one queue.
- */
- struct net_device *dummy_netdev[SGE_QSETS - 1];
-
struct dentry *debugfs_root;
struct mutex mdio_lock;
@@ -253,12 +251,6 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
return netdev_priv(adap->port[idx]);
}
-/*
- * We use the spare atalk_ptr to map a net device to its SGE queue set.
- * This is a macro so it can be used as l-value.
- */
-#define dev2qset(netdev) ((netdev)->atalk_ptr)
-
#define OFFLOAD_DEVMAP_BIT 15
#define tdev2adap(d) container_of(d, struct adapter, tdev)
@@ -284,7 +276,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
int irq_vec_idx, const struct qset_params *p,
- int ntxq, struct net_device *netdev);
+ int ntxq, struct net_device *dev);
int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
unsigned char *data);
irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 2129210a67c1..99c75d30f67a 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -97,6 +97,7 @@ enum {
MAX_NPORTS = 2, /* max # of ports */
MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
EEPROMSIZE = 8192, /* Serial EEPROM size */
+ SERNUM_LEN = 16, /* Serial # length */
RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
@@ -104,7 +105,7 @@ enum {
PROTO_SRAM_LINES = 128, /* size of TP sram */
};
-#define MAX_RX_COALESCING_LEN 16224U
+#define MAX_RX_COALESCING_LEN 12288U
enum {
PAUSE_RX = 1 << 0,
@@ -126,8 +127,8 @@ enum { /* adapter interrupt-maintained statistics */
enum {
TP_VERSION_MAJOR = 1,
- TP_VERSION_MINOR = 0,
- TP_VERSION_MICRO = 44
+ TP_VERSION_MINOR = 1,
+ TP_VERSION_MICRO = 0
};
#define S_TP_VERSION_MAJOR 16
@@ -167,8 +168,8 @@ enum {
};
struct sg_ent { /* SGE scatter/gather entry */
- u32 len[2];
- u64 addr[2];
+ __be32 len[2];
+ __be64 addr[2];
};
#ifndef SGE_NUM_GENBITS
@@ -391,6 +392,7 @@ struct vpd_params {
unsigned int uclk;
unsigned int mdc;
unsigned int mem_timing;
+ u8 sn[SERNUM_LEN + 1];
u8 eth_base[6];
u8 port_type[MAX_NPORTS];
unsigned short xauicfg[2];
@@ -436,6 +438,7 @@ enum { /* chip revisions */
T3_REV_A = 0,
T3_REV_B = 2,
T3_REV_B2 = 3,
+ T3_REV_C = 4,
};
struct trace_params {
@@ -507,9 +510,11 @@ struct cmac {
unsigned int tx_xcnt;
u64 tx_mcnt;
unsigned int rx_xcnt;
+ unsigned int rx_ocnt;
u64 rx_mcnt;
unsigned int toggle_cnt;
unsigned int txen;
+ u64 rx_pause;
struct mac_stats stats;
};
@@ -687,7 +692,7 @@ int t3_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented);
int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
int t3_get_fw_version(struct adapter *adapter, u32 *vers);
-int t3_check_fw_version(struct adapter *adapter);
+int t3_check_fw_version(struct adapter *adapter, int *must_load);
int t3_init_hw(struct adapter *adapter, u32 fw_params);
void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
diff --git a/drivers/net/cxgb3/cxgb3_ctl_defs.h b/drivers/net/cxgb3/cxgb3_ctl_defs.h
index 2095ddacff78..6c4f32066919 100644
--- a/drivers/net/cxgb3/cxgb3_ctl_defs.h
+++ b/drivers/net/cxgb3/cxgb3_ctl_defs.h
@@ -33,27 +33,29 @@
#define _CXGB3_OFFLOAD_CTL_DEFS_H
enum {
- GET_MAX_OUTSTANDING_WR,
- GET_TX_MAX_CHUNK,
- GET_TID_RANGE,
- GET_STID_RANGE,
- GET_RTBL_RANGE,
- GET_L2T_CAPACITY,
- GET_MTUS,
- GET_WR_LEN,
- GET_IFF_FROM_MAC,
- GET_DDP_PARAMS,
- GET_PORTS,
-
- ULP_ISCSI_GET_PARAMS,
- ULP_ISCSI_SET_PARAMS,
-
- RDMA_GET_PARAMS,
- RDMA_CQ_OP,
- RDMA_CQ_SETUP,
- RDMA_CQ_DISABLE,
- RDMA_CTRL_QP_SETUP,
- RDMA_GET_MEM,
+ GET_MAX_OUTSTANDING_WR = 0,
+ GET_TX_MAX_CHUNK = 1,
+ GET_TID_RANGE = 2,
+ GET_STID_RANGE = 3,
+ GET_RTBL_RANGE = 4,
+ GET_L2T_CAPACITY = 5,
+ GET_MTUS = 6,
+ GET_WR_LEN = 7,
+ GET_IFF_FROM_MAC = 8,
+ GET_DDP_PARAMS = 9,
+ GET_PORTS = 10,
+
+ ULP_ISCSI_GET_PARAMS = 11,
+ ULP_ISCSI_SET_PARAMS = 12,
+
+ RDMA_GET_PARAMS = 13,
+ RDMA_CQ_OP = 14,
+ RDMA_CQ_SETUP = 15,
+ RDMA_CQ_DISABLE = 16,
+ RDMA_CTRL_QP_SETUP = 17,
+ RDMA_GET_MEM = 18,
+
+ GET_RX_PAGE_INFO = 50,
};
/*
@@ -161,4 +163,12 @@ struct rdma_ctrlqp_setup {
unsigned long long base_addr;
unsigned int size;
};
+
+/*
+ * Offload TX/RX page information.
+ */
+struct ofld_page_info {
+ unsigned int page_size; /* Page size, should be a power of 2 */
+ unsigned int num; /* Number of pages */
+};
#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */
diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h
index 483a594210a7..45e92164c260 100644
--- a/drivers/net/cxgb3/cxgb3_defs.h
+++ b/drivers/net/cxgb3/cxgb3_defs.h
@@ -79,9 +79,17 @@ static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t,
unsigned int tid)
{
+ union listen_entry *e;
+
if (tid < t->stid_base || tid >= t->stid_base + t->nstids)
return NULL;
- return &(stid2entry(t, tid)->t3c_tid);
+
+ e = stid2entry(t, tid);
+ if ((void *)e->next >= (void *)t->tid_tab &&
+ (void *)e->next < (void *)&t->atid_tab[t->natids])
+ return NULL;
+
+ return &e->t3c_tid;
}
/*
@@ -90,9 +98,17 @@ static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t,
static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
unsigned int tid)
{
+ union active_open_entry *e;
+
if (tid < t->atid_base || tid >= t->atid_base + t->natids)
return NULL;
- return &(atid2entry(t, tid)->t3c_tid);
+
+ e = atid2entry(t, tid);
+ if ((void *)e->next >= (void *)t->tid_tab &&
+ (void *)e->next < (void *)&t->atid_tab[t->natids])
+ return NULL;
+
+ return &e->t3c_tid;
}
int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 5ab319cfe5de..61ffc925eae7 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -339,49 +339,17 @@ static void setup_rss(struct adapter *adap)
V_RRCPLCPUSIZE(6), cpus, rspq_map);
}
-/*
- * If we have multiple receive queues per port serviced by NAPI we need one
- * netdevice per queue as NAPI operates on netdevices. We already have one
- * netdevice, namely the one associated with the interface, so we use dummy
- * ones for any additional queues. Note that these netdevices exist purely
- * so that NAPI has something to work with, they do not represent network
- * ports and are not registered.
- */
-static int init_dummy_netdevs(struct adapter *adap)
+static void init_napi(struct adapter *adap)
{
- int i, j, dummy_idx = 0;
- struct net_device *nd;
-
- for_each_port(adap, i) {
- struct net_device *dev = adap->port[i];
- const struct port_info *pi = netdev_priv(dev);
-
- for (j = 0; j < pi->nqsets - 1; j++) {
- if (!adap->dummy_netdev[dummy_idx]) {
- struct port_info *p;
-
- nd = alloc_netdev(sizeof(*p), "", ether_setup);
- if (!nd)
- goto free_all;
+ int i;
- p = netdev_priv(nd);
- p->adapter = adap;
- nd->weight = 64;
- set_bit(__LINK_STATE_START, &nd->state);
- adap->dummy_netdev[dummy_idx] = nd;
- }
- strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
- dummy_idx++;
- }
- }
- return 0;
+ for (i = 0; i < SGE_QSETS; i++) {
+ struct sge_qset *qs = &adap->sge.qs[i];
-free_all:
- while (--dummy_idx >= 0) {
- free_netdev(adap->dummy_netdev[dummy_idx]);
- adap->dummy_netdev[dummy_idx] = NULL;
+ if (qs->adap)
+ netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
+ 64);
}
- return -ENOMEM;
}
/*
@@ -392,20 +360,18 @@ free_all:
static void quiesce_rx(struct adapter *adap)
{
int i;
- struct net_device *dev;
- for_each_port(adap, i) {
- dev = adap->port[i];
- while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
- msleep(1);
- }
+ for (i = 0; i < SGE_QSETS; i++)
+ if (adap->sge.qs[i].adap)
+ napi_disable(&adap->sge.qs[i].napi);
+}
- for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
- dev = adap->dummy_netdev[i];
- if (dev)
- while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
- msleep(1);
- }
+static void enable_all_napi(struct adapter *adap)
+{
+ int i;
+ for (i = 0; i < SGE_QSETS; i++)
+ if (adap->sge.qs[i].adap)
+ napi_enable(&adap->sge.qs[i].napi);
}
/**
@@ -418,7 +384,7 @@ static void quiesce_rx(struct adapter *adap)
*/
static int setup_sge_qsets(struct adapter *adap)
{
- int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
+ int i, j, err, irq_idx = 0, qset_idx = 0;
unsigned int ntxq = SGE_TXQ_PER_SET;
if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
@@ -426,15 +392,14 @@ static int setup_sge_qsets(struct adapter *adap)
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
- const struct port_info *pi = netdev_priv(dev);
+ struct port_info *pi = netdev_priv(dev);
+ pi->qs = &adap->sge.qs[pi->first_qset];
for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
err = t3_sge_alloc_qset(adap, qset_idx, 1,
(adap->flags & USING_MSIX) ? qset_idx + 1 :
irq_idx,
- &adap->params.sge.qset[qset_idx], ntxq,
- j == 0 ? dev :
- adap-> dummy_netdev[dummy_dev_idx++]);
+ &adap->params.sge.qset[qset_idx], ntxq, dev);
if (err) {
t3_free_sge_resources(adap);
return err;
@@ -768,11 +733,14 @@ static inline char t3rev2char(struct adapter *adapter)
case T3_REV_B2:
rev = 'b';
break;
+ case T3_REV_C:
+ rev = 'c';
+ break;
}
return rev;
}
-int update_tpsram(struct adapter *adap)
+static int update_tpsram(struct adapter *adap)
{
const struct firmware *tpsram;
char buf[64];
@@ -828,15 +796,16 @@ release_tpsram:
*/
static int cxgb_up(struct adapter *adap)
{
- int err = 0;
+ int err;
int must_load;
if (!(adap->flags & FULL_INIT_DONE)) {
- err = t3_check_fw_version(adap);
- if (err == -EINVAL)
+ err = t3_check_fw_version(adap, &must_load);
+ if (err == -EINVAL) {
err = upgrade_fw(adap);
- if (err)
- goto out;
+ if (err && must_load)
+ goto out;
+ }
err = t3_check_tpsram_version(adap, &must_load);
if (err == -EINVAL) {
@@ -845,21 +814,18 @@ static int cxgb_up(struct adapter *adap)
goto out;
}
- err = init_dummy_netdevs(adap);
- if (err)
- goto out;
-
err = t3_init_hw(adap, 0);
if (err)
goto out;
t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
-
+
err = setup_sge_qsets(adap);
if (err)
goto out;
setup_rss(adap);
+ init_napi(adap);
adap->flags |= FULL_INIT_DONE;
}
@@ -886,6 +852,7 @@ static int cxgb_up(struct adapter *adap)
adap->name, adap)))
goto irq_err;
+ enable_all_napi(adap);
t3_sge_start(adap);
t3_intr_enable(adap);
@@ -944,7 +911,7 @@ static int offload_open(struct net_device *dev)
struct adapter *adapter = pi->adapter;
struct t3cdev *tdev = dev2t3cdev(dev);
int adap_up = adapter->open_device_map & PORT_MASK;
- int err = 0;
+ int err;
if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
return 0;
@@ -1012,8 +979,10 @@ static int cxgb_open(struct net_device *dev)
int other_ports = adapter->open_device_map & PORT_MASK;
int err;
- if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
+ if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
+ quiesce_rx(adapter);
return err;
+ }
set_bit(pi->port_id, &adapter->open_device_map);
if (is_offload(adapter) && !ofld_disable) {
@@ -1162,9 +1131,14 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
};
-static int get_stats_count(struct net_device *dev)
+static int get_sset_count(struct net_device *dev, int sset)
{
- return ARRAY_SIZE(stats_strings);
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(stats_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
}
#define T3_REGMAP_SIZE (3 * 1024)
@@ -1601,7 +1575,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
struct adapter *adapter = pi->adapter;
u32 aligned_offset, aligned_len, *p;
u8 *buf;
- int err = 0;
+ int err;
if (eeprom->magic != EEPROM_MAGIC)
return -EINVAL;
@@ -1665,20 +1639,17 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.set_pauseparam = set_pauseparam,
.get_rx_csum = get_rx_csum,
.set_rx_csum = set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
.set_tx_csum = ethtool_op_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_link = ethtool_op_get_link,
.get_strings = get_strings,
.phys_id = cxgb3_phys_id,
.nway_reset = restart_autoneg,
- .get_stats_count = get_stats_count,
+ .get_sset_count = get_sset_count,
.get_ethtool_stats = get_stats,
.get_regs_len = get_regs_len,
.get_regs = get_regs,
.get_wol = get_wol,
- .get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso,
};
@@ -1798,7 +1769,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
}
case CHELSIO_SET_QSET_NUM:{
struct ch_reg edata;
- struct port_info *pi = netdev_priv(dev);
unsigned int i, first_qset = 0, other_qsets = 0;
if (!capable(CAP_NET_ADMIN))
@@ -1830,7 +1800,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
}
case CHELSIO_GET_QSET_NUM:{
struct ch_reg edata;
- struct port_info *pi = netdev_priv(dev);
edata.cmd = CHELSIO_GET_QSET_NUM;
edata.val = pi->nqsets;
@@ -2332,6 +2301,10 @@ void t3_fatal_err(struct adapter *adapter)
if (adapter->flags & FULL_INIT_DONE) {
t3_sge_stop(adapter);
+ t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
+ t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
+ t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
+ t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
t3_intr_disable(adapter);
}
CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
@@ -2391,10 +2364,12 @@ static void __devinit print_port_info(struct adapter *adap,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
if (adap->name == dev->name && adap->params.vpd.mclk)
- printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
+ printk(KERN_INFO
+ "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
adap->name, t3_mc7_size(&adap->cm) >> 20,
t3_mc7_size(&adap->pmtx) >> 20,
- t3_mc7_size(&adap->pmrx) >> 20);
+ t3_mc7_size(&adap->pmrx) >> 20,
+ adap->params.vpd.sn);
}
}
@@ -2490,7 +2465,6 @@ static int __devinit init_one(struct pci_dev *pdev,
goto out_free_dev;
}
- SET_MODULE_OWNER(netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
adapter->port[i] = netdev;
@@ -2524,7 +2498,6 @@ static int __devinit init_one(struct pci_dev *pdev,
#ifdef CONFIG_NET_POLL_CONTROLLER
netdev->poll_controller = cxgb_netpoll;
#endif
- netdev->weight = 64;
SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
}
@@ -2625,12 +2598,6 @@ static void __devexit remove_one(struct pci_dev *pdev)
t3_free_sge_resources(adapter);
cxgb_disable_msi(adapter);
- for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
- if (adapter->dummy_netdev[i]) {
- free_netdev(adapter->dummy_netdev[i]);
- adapter->dummy_netdev[i] = NULL;
- }
-
for_each_port(adapter, i)
if (adapter->port[i])
free_netdev(adapter->port[i]);
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index bdff7baeb59d..bd25421bc12a 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -57,7 +57,7 @@ static DEFINE_RWLOCK(adapter_list_lock);
static LIST_HEAD(adapter_list);
static const unsigned int MAX_ATIDS = 64 * 1024;
-static const unsigned int ATID_BASE = 0x100000;
+static const unsigned int ATID_BASE = 0x10000;
static inline int offload_activated(struct t3cdev *tdev)
{
@@ -222,32 +222,32 @@ static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
int ret = 0;
switch (req) {
- case RDMA_GET_PARAMS:{
- struct rdma_info *req = data;
+ case RDMA_GET_PARAMS: {
+ struct rdma_info *rdma = data;
struct pci_dev *pdev = adapter->pdev;
- req->udbell_physbase = pci_resource_start(pdev, 2);
- req->udbell_len = pci_resource_len(pdev, 2);
- req->tpt_base =
+ rdma->udbell_physbase = pci_resource_start(pdev, 2);
+ rdma->udbell_len = pci_resource_len(pdev, 2);
+ rdma->tpt_base =
t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
- req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
- req->pbl_base =
+ rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
+ rdma->pbl_base =
t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
- req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
- req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
- req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
- req->kdb_addr = adapter->regs + A_SG_KDOORBELL;
- req->pdev = pdev;
+ rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
+ rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
+ rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
+ rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL;
+ rdma->pdev = pdev;
break;
}
case RDMA_CQ_OP:{
unsigned long flags;
- struct rdma_cq_op *req = data;
+ struct rdma_cq_op *rdma = data;
/* may be called in any context */
spin_lock_irqsave(&adapter->sge.reg_lock, flags);
- ret = t3_sge_cqcntxt_op(adapter, req->id, req->op,
- req->credits);
+ ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op,
+ rdma->credits);
spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
break;
}
@@ -274,15 +274,15 @@ static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
break;
}
case RDMA_CQ_SETUP:{
- struct rdma_cq_setup *req = data;
+ struct rdma_cq_setup *rdma = data;
spin_lock_irq(&adapter->sge.reg_lock);
ret =
- t3_sge_init_cqcntxt(adapter, req->id,
- req->base_addr, req->size,
+ t3_sge_init_cqcntxt(adapter, rdma->id,
+ rdma->base_addr, rdma->size,
ASYNC_NOTIF_RSPQ,
- req->ovfl_mode, req->credits,
- req->credit_thres);
+ rdma->ovfl_mode, rdma->credits,
+ rdma->credit_thres);
spin_unlock_irq(&adapter->sge.reg_lock);
break;
}
@@ -292,13 +292,13 @@ static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
spin_unlock_irq(&adapter->sge.reg_lock);
break;
case RDMA_CTRL_QP_SETUP:{
- struct rdma_ctrlqp_setup *req = data;
+ struct rdma_ctrlqp_setup *rdma = data;
spin_lock_irq(&adapter->sge.reg_lock);
ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
SGE_CNTXT_RDMA,
ASYNC_NOTIF_RSPQ,
- req->base_addr, req->size,
+ rdma->base_addr, rdma->size,
FW_RI_TID_START, 1, 0);
spin_unlock_irq(&adapter->sge.reg_lock);
break;
@@ -317,6 +317,8 @@ static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
struct iff_mac *iffmacp;
struct ddp_params *ddpp;
struct adap_ports *ports;
+ struct ofld_page_info *rx_page_info;
+ struct tp_params *tp = &adapter->params.tp;
int i;
switch (req) {
@@ -382,6 +384,11 @@ static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
if (!offload_running(adapter))
return -EAGAIN;
return cxgb_rdma_ctl(adapter, req, data);
+ case GET_RX_PAGE_INFO:
+ rx_page_info = data;
+ rx_page_info->page_size = tp->rx_pg_size;
+ rx_page_info->num = tp->rx_num_pgs;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -687,10 +694,19 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
{
struct cpl_pass_accept_req *req = cplhdr(skb);
unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
+ struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
struct t3c_tid_entry *t3c_tid;
+ unsigned int tid = GET_TID(req);
- t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
- if (t3c_tid->ctx && t3c_tid->client->handlers &&
+ if (unlikely(tid >= t->ntids)) {
+ printk("%s: passive open TID %u too large\n",
+ dev->name, tid);
+ t3_fatal_err(tdev2adap(dev));
+ return CPL_RET_BUF_DONE;
+ }
+
+ t3c_tid = lookup_stid(t, stid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
(dev, skb, t3c_tid->ctx);
@@ -772,16 +788,25 @@ static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
{
struct cpl_act_establish *req = cplhdr(skb);
unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
+ struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
struct t3c_tid_entry *t3c_tid;
+ unsigned int tid = GET_TID(req);
- t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
+ if (unlikely(tid >= t->ntids)) {
+ printk("%s: active establish TID %u too large\n",
+ dev->name, tid);
+ t3_fatal_err(tdev2adap(dev));
+ return CPL_RET_BUF_DONE;
+ }
+
+ t3c_tid = lookup_atid(t, atid);
if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
(dev, skb, t3c_tid->ctx);
} else {
printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
- dev->name, CPL_PASS_ACCEPT_REQ);
+ dev->name, CPL_ACT_ESTABLISH);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
}
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index aa80313c922e..5e1bc0dec5f1 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -172,6 +172,14 @@
#define A_SG_INT_CAUSE 0x5c
+#define S_HIPIODRBDROPERR 11
+#define V_HIPIODRBDROPERR(x) ((x) << S_HIPIODRBDROPERR)
+#define F_HIPIODRBDROPERR V_HIPIODRBDROPERR(1U)
+
+#define S_LOPIODRBDROPERR 10
+#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR)
+#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U)
+
#define S_RSPQDISABLED 3
#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
#define F_RSPQDISABLED V_RSPQDISABLED(1U)
@@ -1318,6 +1326,7 @@
#define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT)
#define A_PM1_RX_CFG 0x5c0
+#define A_PM1_RX_MODE 0x5c4
#define A_PM1_RX_INT_ENABLE 0x5d8
@@ -1386,6 +1395,7 @@
#define A_PM1_RX_INT_CAUSE 0x5dc
#define A_PM1_TX_CFG 0x5e0
+#define A_PM1_TX_MODE 0x5e4
#define A_PM1_TX_INT_ENABLE 0x5f8
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 58a5f60521ed..994b5d6404df 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -79,7 +79,7 @@ enum {
};
struct tx_desc {
- u64 flit[TX_DESC_FLITS];
+ __be64 flit[TX_DESC_FLITS];
};
struct rx_desc {
@@ -544,7 +544,7 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
* as HW contexts, packet buffers, and descriptor rings. Traffic to the
* queue set must be quiesced prior to calling this.
*/
-void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
+static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
{
int i;
struct pci_dev *pdev = adapter->pdev;
@@ -591,9 +591,6 @@ void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
q->rspq.desc, q->rspq.phys_addr);
}
- if (q->netdev)
- q->netdev->atalk_ptr = NULL;
-
memset(q, 0, sizeof(*q));
}
@@ -907,8 +904,8 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
const struct sge_txq *q,
const struct sg_ent *sgl,
unsigned int flits, unsigned int sgl_flits,
- unsigned int gen, unsigned int wr_hi,
- unsigned int wr_lo)
+ unsigned int gen, __be32 wr_hi,
+ __be32 wr_lo)
{
struct work_request_hdr *wrp = (struct work_request_hdr *)d;
struct tx_sw_desc *sd = &q->sdesc[pidx];
@@ -1074,7 +1071,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int ndesc, pidx, credits, gen, compl;
const struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
- struct sge_qset *qs = dev2qset(dev);
+ struct sge_qset *qs = pi->qs;
struct sge_txq *q = &qs->txq[TXQ_ETH];
/*
@@ -1182,8 +1179,8 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
*
* Writes a packet as immediate data into a Tx descriptor. The packet
* contains a work request at its beginning. We must write the packet
- * carefully so the SGE doesn't read accidentally before it's written in
- * its entirety.
+ * carefully so the SGE doesn't read it accidentally before it's written
+ * in its entirety.
*/
static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
unsigned int len, unsigned int gen)
@@ -1191,7 +1188,11 @@ static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
struct work_request_hdr *to = (struct work_request_hdr *)d;
- memcpy(&to[1], &from[1], len - sizeof(*from));
+ if (likely(!skb->data_len))
+ memcpy(&to[1], &from[1], len - sizeof(*from));
+ else
+ skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
+
to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
V_WR_BCNTLFLT(len & 7));
wmb();
@@ -1261,7 +1262,7 @@ static inline void reclaim_completed_tx_imm(struct sge_txq *q)
static inline int immediate(const struct sk_buff *skb)
{
- return skb->len <= WR_LEN && !skb->data_len;
+ return skb->len <= WR_LEN;
}
/**
@@ -1326,13 +1327,12 @@ static void restart_ctrlq(unsigned long data)
struct sk_buff *skb;
struct sge_qset *qs = (struct sge_qset *)data;
struct sge_txq *q = &qs->txq[TXQ_CTRL];
- const struct port_info *pi = netdev_priv(qs->netdev);
- struct adapter *adap = pi->adapter;
spin_lock(&q->lock);
again:reclaim_completed_tx_imm(q);
- while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
+ while (q->in_use < q->size &&
+ (skb = __skb_dequeue(&q->sendq)) != NULL) {
write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
@@ -1354,7 +1354,7 @@ static void restart_ctrlq(unsigned long data)
}
spin_unlock(&q->lock);
- t3_write_reg(adap, A_SG_KDOORBELL,
+ t3_write_reg(qs->adap, A_SG_KDOORBELL,
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
}
@@ -1468,12 +1468,13 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
*/
static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
{
- unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
+ unsigned int flits, cnt;
- if (skb->len <= WR_LEN && cnt == 0)
+ if (skb->len <= WR_LEN)
return 1; /* packet fits as immediate data */
flits = skb_transport_offset(skb) / 8; /* headers */
+ cnt = skb_shinfo(skb)->nr_frags;
if (skb->tail != skb->transport_header)
cnt++;
return flits_to_desc(flits + sgl_len(cnt));
@@ -1638,8 +1639,7 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
else {
struct sge_qset *qs = rspq_to_qset(q);
- if (__netif_rx_schedule_prep(qs->netdev))
- __netif_rx_schedule(qs->netdev);
+ napi_schedule(&qs->napi);
q->rx_head = skb;
}
q->rx_tail = skb;
@@ -1675,34 +1675,30 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
* receive handler. Batches need to be of modest size as we do prefetches
* on the packets in each.
*/
-static int ofld_poll(struct net_device *dev, int *budget)
+static int ofld_poll(struct napi_struct *napi, int budget)
{
- const struct port_info *pi = netdev_priv(dev);
- struct adapter *adapter = pi->adapter;
- struct sge_qset *qs = dev2qset(dev);
+ struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
struct sge_rspq *q = &qs->rspq;
- int work_done, limit = min(*budget, dev->quota), avail = limit;
+ struct adapter *adapter = qs->adap;
+ int work_done = 0;
- while (avail) {
+ while (work_done < budget) {
struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
int ngathered;
spin_lock_irq(&q->lock);
head = q->rx_head;
if (!head) {
- work_done = limit - avail;
- *budget -= work_done;
- dev->quota -= work_done;
- __netif_rx_complete(dev);
+ napi_complete(napi);
spin_unlock_irq(&q->lock);
- return 0;
+ return work_done;
}
tail = q->rx_tail;
q->rx_head = q->rx_tail = NULL;
spin_unlock_irq(&q->lock);
- for (ngathered = 0; avail && head; avail--) {
+ for (ngathered = 0; work_done < budget && head; work_done++) {
prefetch(head->data);
skbs[ngathered] = head;
head = head->next;
@@ -1724,10 +1720,8 @@ static int ofld_poll(struct net_device *dev, int *budget)
}
deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
}
- work_done = limit - avail;
- *budget -= work_done;
- dev->quota -= work_done;
- return 1;
+
+ return work_done;
}
/**
@@ -2071,50 +2065,47 @@ static inline int is_pure_response(const struct rsp_desc *r)
/**
* napi_rx_handler - the NAPI handler for Rx processing
- * @dev: the net device
+ * @napi: the napi instance
* @budget: how many packets we can process in this round
*
* Handler for new data events when using NAPI.
*/
-static int napi_rx_handler(struct net_device *dev, int *budget)
+static int napi_rx_handler(struct napi_struct *napi, int budget)
{
- const struct port_info *pi = netdev_priv(dev);
- struct adapter *adap = pi->adapter;
- struct sge_qset *qs = dev2qset(dev);
- int effective_budget = min(*budget, dev->quota);
+ struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
+ struct adapter *adap = qs->adap;
+ int work_done = process_responses(adap, qs, budget);
- int work_done = process_responses(adap, qs, effective_budget);
- *budget -= work_done;
- dev->quota -= work_done;
+ if (likely(work_done < budget)) {
+ napi_complete(napi);
- if (work_done >= effective_budget)
- return 1;
-
- netif_rx_complete(dev);
-
- /*
- * Because we don't atomically flush the following write it is
- * possible that in very rare cases it can reach the device in a way
- * that races with a new response being written plus an error interrupt
- * causing the NAPI interrupt handler below to return unhandled status
- * to the OS. To protect against this would require flushing the write
- * and doing both the write and the flush with interrupts off. Way too
- * expensive and unjustifiable given the rarity of the race.
- *
- * The race cannot happen at all with MSI-X.
- */
- t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
- V_NEWTIMER(qs->rspq.next_holdoff) |
- V_NEWINDEX(qs->rspq.cidx));
- return 0;
+ /*
+ * Because we don't atomically flush the following
+ * write it is possible that in very rare cases it can
+ * reach the device in a way that races with a new
+ * response being written plus an error interrupt
+ * causing the NAPI interrupt handler below to return
+ * unhandled status to the OS. To protect against
+ * this would require flushing the write and doing
+ * both the write and the flush with interrupts off.
+ * Way too expensive and unjustifiable given the
+ * rarity of the race.
+ *
+ * The race cannot happen at all with MSI-X.
+ */
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
+ V_NEWTIMER(qs->rspq.next_holdoff) |
+ V_NEWINDEX(qs->rspq.cidx));
+ }
+ return work_done;
}
/*
* Returns true if the device is already scheduled for polling.
*/
-static inline int napi_is_scheduled(struct net_device *dev)
+static inline int napi_is_scheduled(struct napi_struct *napi)
{
- return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
+ return test_bit(NAPI_STATE_SCHED, &napi->state);
}
/**
@@ -2197,8 +2188,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
return 0;
}
- if (likely(__netif_rx_schedule_prep(qs->netdev)))
- __netif_rx_schedule(qs->netdev);
+ napi_schedule(&qs->napi);
return 1;
}
@@ -2209,8 +2199,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
{
struct sge_qset *qs = cookie;
- const struct port_info *pi = netdev_priv(qs->netdev);
- struct adapter *adap = pi->adapter;
+ struct adapter *adap = qs->adap;
struct sge_rspq *q = &qs->rspq;
spin_lock(&q->lock);
@@ -2226,16 +2215,14 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
* The MSI-X interrupt handler for an SGE response queue for the NAPI case
* (i.e., response queue serviced by NAPI polling).
*/
-irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
+static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
{
struct sge_qset *qs = cookie;
- const struct port_info *pi = netdev_priv(qs->netdev);
- struct adapter *adap = pi->adapter;
struct sge_rspq *q = &qs->rspq;
spin_lock(&q->lock);
- if (handle_responses(adap, q) < 0)
+ if (handle_responses(qs->adap, q) < 0)
q->unhandled_irqs++;
spin_unlock(&q->lock);
return IRQ_HANDLED;
@@ -2278,11 +2265,13 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie)
return IRQ_HANDLED;
}
-static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
+static int rspq_check_napi(struct sge_qset *qs)
{
- if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
- if (likely(__netif_rx_schedule_prep(dev)))
- __netif_rx_schedule(dev);
+ struct sge_rspq *q = &qs->rspq;
+
+ if (!napi_is_scheduled(&qs->napi) &&
+ is_new_response(&q->desc[q->cidx], q)) {
+ napi_schedule(&qs->napi);
return 1;
}
return 0;
@@ -2295,7 +2284,7 @@ static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
* one SGE response queue per port in this mode and protect all response
* queues with queue 0's lock.
*/
-irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
+static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
{
int new_packets;
struct adapter *adap = cookie;
@@ -2303,10 +2292,9 @@ irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
spin_lock(&q->lock);
- new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
+ new_packets = rspq_check_napi(&adap->sge.qs[0]);
if (adap->params.nports == 2)
- new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
- &adap->sge.qs[1].rspq);
+ new_packets += rspq_check_napi(&adap->sge.qs[1]);
if (!new_packets && t3_slow_intr_handler(adap) == 0)
q->unhandled_irqs++;
@@ -2409,9 +2397,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie)
static irqreturn_t t3b_intr_napi(int irq, void *cookie)
{
u32 map;
- struct net_device *dev;
struct adapter *adap = cookie;
- struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
+ struct sge_qset *qs0 = &adap->sge.qs[0];
+ struct sge_rspq *q0 = &qs0->rspq;
t3_write_reg(adap, A_PL_CLI, 0);
map = t3_read_reg(adap, A_SG_DATA_INTR);
@@ -2424,18 +2412,11 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie)
if (unlikely(map & F_ERRINTR))
t3_slow_intr_handler(adap);
- if (likely(map & 1)) {
- dev = adap->sge.qs[0].netdev;
-
- if (likely(__netif_rx_schedule_prep(dev)))
- __netif_rx_schedule(dev);
- }
- if (map & 2) {
- dev = adap->sge.qs[1].netdev;
+ if (likely(map & 1))
+ napi_schedule(&qs0->napi);
- if (likely(__netif_rx_schedule_prep(dev)))
- __netif_rx_schedule(dev);
- }
+ if (map & 2)
+ napi_schedule(&adap->sge.qs[1].napi);
spin_unlock(&q0->lock);
return IRQ_HANDLED;
@@ -2482,6 +2463,10 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
"(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
}
+ if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
+ CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
+ status & F_HIPIODRBDROPERR ? "high" : "lo");
+
t3_write_reg(adapter, A_SG_INT_CAUSE, status);
if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
t3_fatal_err(adapter);
@@ -2514,8 +2499,7 @@ static void sge_timer_cb(unsigned long data)
{
spinlock_t *lock;
struct sge_qset *qs = (struct sge_qset *)data;
- const struct port_info *pi = netdev_priv(qs->netdev);
- struct adapter *adap = pi->adapter;
+ struct adapter *adap = qs->adap;
if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
@@ -2526,9 +2510,9 @@ static void sge_timer_cb(unsigned long data)
spin_unlock(&qs->txq[TXQ_OFLD].lock);
}
lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
- &adap->sge.qs[0].rspq.lock;
+ &adap->sge.qs[0].rspq.lock;
if (spin_trylock_irq(lock)) {
- if (!napi_is_scheduled(qs->netdev)) {
+ if (!napi_is_scheduled(&qs->napi)) {
u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
if (qs->fl[0].credits < qs->fl[0].size)
@@ -2562,12 +2546,9 @@ static void sge_timer_cb(unsigned long data)
*/
void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
{
- if (!qs->netdev)
- return;
-
qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
qs->rspq.polling = p->polling;
- qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
+ qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
}
/**
@@ -2587,7 +2568,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
*/
int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
int irq_vec_idx, const struct qset_params *p,
- int ntxq, struct net_device *netdev)
+ int ntxq, struct net_device *dev)
{
int i, ret = -ENOMEM;
struct sge_qset *q = &adapter->sge.qs[id];
@@ -2708,16 +2689,10 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
}
spin_unlock(&adapter->sge.reg_lock);
- q->netdev = netdev;
- t3_update_qset_coalesce(q, p);
- /*
- * We use atalk_ptr as a backpointer to a qset. In case a device is
- * associated with multiple queue sets only the first one sets
- * atalk_ptr.
- */
- if (netdev->atalk_ptr == NULL)
- netdev->atalk_ptr = q;
+ q->adap = adapter;
+ q->netdev = dev;
+ t3_update_qset_coalesce(q, p);
refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
diff --git a/drivers/net/cxgb3/sge_defs.h b/drivers/net/cxgb3/sge_defs.h
index 514869e26a76..29b6c800b238 100644
--- a/drivers/net/cxgb3/sge_defs.h
+++ b/drivers/net/cxgb3/sge_defs.h
@@ -106,6 +106,10 @@
#define V_CQ_GEN(x) ((x) << S_CQ_GEN)
#define F_CQ_GEN V_CQ_GEN(1U)
+#define S_CQ_ERR 30
+#define V_CQ_ERR(x) ((x) << S_CQ_ERR)
+#define F_CQ_ERR V_CQ_ERR(1U)
+
#define S_CQ_OVERFLOW_MODE 31
#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE)
#define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U)
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index b02d15daf5d9..d4ee00d32219 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -119,9 +119,9 @@ void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
* Reads registers that are accessed indirectly through an address/data
* register pair.
*/
-void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
- unsigned int data_reg, u32 *vals, unsigned int nregs,
- unsigned int start_idx)
+static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
{
while (nregs--) {
t3_write_reg(adap, addr_reg, start_idx);
@@ -505,7 +505,7 @@ struct t3_vpd {
u8 vpdr_len[2];
VPD_ENTRY(pn, 16); /* part number */
VPD_ENTRY(ec, 16); /* EC level */
- VPD_ENTRY(sn, 16); /* serial number */
+ VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
VPD_ENTRY(na, 12); /* MAC address base */
VPD_ENTRY(cclk, 6); /* core clock */
VPD_ENTRY(mclk, 6); /* mem clock */
@@ -648,6 +648,7 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
+ memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
/* Old eeproms didn't have port information */
if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
@@ -959,16 +960,18 @@ int t3_get_fw_version(struct adapter *adapter, u32 *vers)
/**
* t3_check_fw_version - check if the FW is compatible with this driver
* @adapter: the adapter
- *
+ * @must_load: set to 1 if loading a new FW image is required
+
* Checks if an adapter's FW is compatible with the driver. Returns 0
* if the versions are compatible, a negative error otherwise.
*/
-int t3_check_fw_version(struct adapter *adapter)
+int t3_check_fw_version(struct adapter *adapter, int *must_load)
{
int ret;
u32 vers;
unsigned int type, major, minor;
+ *must_load = 1;
ret = t3_get_fw_version(adapter, &vers);
if (ret)
return ret;
@@ -981,9 +984,17 @@ int t3_check_fw_version(struct adapter *adapter)
minor == FW_VERSION_MINOR)
return 0;
- CH_ERR(adapter, "found wrong FW version(%u.%u), "
- "driver needs version %u.%u\n", major, minor,
- FW_VERSION_MAJOR, FW_VERSION_MINOR);
+ if (major != FW_VERSION_MAJOR)
+ CH_ERR(adapter, "found wrong FW version(%u.%u), "
+ "driver needs version %u.%u\n", major, minor,
+ FW_VERSION_MAJOR, FW_VERSION_MINOR);
+ else {
+ *must_load = 0;
+ CH_WARN(adapter, "found wrong FW minor version(%u.%u), "
+ "driver compiled for version %u.%u\n", major, minor,
+ FW_VERSION_MAJOR, FW_VERSION_MINOR);
+ }
+
return -EINVAL;
}
@@ -1347,6 +1358,10 @@ static void pcie_intr_handler(struct adapter *adapter)
{0}
};
+ if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
+ CH_ALERT(adapter, "PEX error code 0x%x\n",
+ t3_read_reg(adapter, A_PCIE_PEX_ERR));
+
if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
pcie_intr_info, adapter->irq_stats))
t3_fatal_err(adapter);
@@ -1798,6 +1813,8 @@ void t3_intr_clear(struct adapter *adapter)
for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
+ if (is_pcie(adapter))
+ t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
}
@@ -1853,6 +1870,8 @@ void t3_port_intr_clear(struct adapter *adapter, int idx)
phy->ops->intr_clear(phy);
}
+#define SG_CONTEXT_CMD_ATTEMPTS 100
+
/**
* t3_sge_write_context - write an SGE context
* @adapter: the adapter
@@ -1872,7 +1891,7 @@ static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
t3_write_reg(adapter, A_SG_CONTEXT_CMD,
V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
- 0, 5, 1);
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
}
/**
@@ -2029,7 +2048,8 @@ int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
base_addr >>= 32;
t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
- V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
+ V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
+ V_CQ_ERR(ovfl_mode));
t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
V_CQ_CREDIT_THRES(credit_thres));
return t3_sge_write_context(adapter, id, F_CQ);
@@ -2057,7 +2077,7 @@ int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
t3_write_reg(adapter, A_SG_CONTEXT_CMD,
V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
- 0, 5, 1);
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
}
/**
@@ -2081,7 +2101,7 @@ int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
t3_write_reg(adapter, A_SG_CONTEXT_CMD,
V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
- 0, 5, 1);
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
}
/**
@@ -2105,7 +2125,7 @@ int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
t3_write_reg(adapter, A_SG_CONTEXT_CMD,
V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
- 0, 5, 1);
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
}
/**
@@ -2129,7 +2149,7 @@ int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
t3_write_reg(adapter, A_SG_CONTEXT_CMD,
V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
- 0, 5, 1);
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
}
/**
@@ -2154,7 +2174,7 @@ int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
V_CONTEXT(id) | F_CQ);
if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
- 0, 5, 1, &val))
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
return -EIO;
if (op >= 2 && op < 7) {
@@ -2164,7 +2184,8 @@ int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
t3_write_reg(adapter, A_SG_CONTEXT_CMD,
V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
- F_CONTEXT_CMD_BUSY, 0, 5, 1))
+ F_CONTEXT_CMD_BUSY, 0,
+ SG_CONTEXT_CMD_ATTEMPTS, 1))
return -EIO;
return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
}
@@ -2190,7 +2211,7 @@ static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
t3_write_reg(adapter, A_SG_CONTEXT_CMD,
V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
- 5, 1))
+ SG_CONTEXT_CMD_ATTEMPTS, 1))
return -EIO;
data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
@@ -3222,6 +3243,8 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
+ t3_write_reg(adapter, A_PM1_RX_MODE, 0);
+ t3_write_reg(adapter, A_PM1_TX_MODE, 0);
init_hw_for_avail_ports(adapter, adapter->params.nports);
t3_sge_init(adapter, &adapter->params.sge);
@@ -3384,7 +3407,7 @@ void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
* Older PCIe cards lose their config space during reset, PCI-X
* ones don't.
*/
-int t3_reset_adapter(struct adapter *adapter)
+static int t3_reset_adapter(struct adapter *adapter)
{
int i, save_and_restore_pcie =
adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index eb508bf8022a..ef1c6339c806 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -39,6 +39,6 @@
/* Firmware version */
#define FW_VERSION_MAJOR 4
-#define FW_VERSION_MINOR 3
+#define FW_VERSION_MINOR 6
#define FW_VERSION_MICRO 0
#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
index c302b1a30cba..eeb766aeced9 100644
--- a/drivers/net/cxgb3/xgmac.c
+++ b/drivers/net/cxgb3/xgmac.c
@@ -142,7 +142,7 @@ int t3_mac_reset(struct cmac *mac)
return 0;
}
-int t3b2_mac_reset(struct cmac *mac)
+static int t3b2_mac_reset(struct cmac *mac)
{
struct adapter *adap = mac->adapter;
unsigned int oft = mac->offset;
@@ -437,12 +437,13 @@ int t3_mac_enable(struct cmac *mac, int which)
struct mac_stats *s = &mac->stats;
if (which & MAC_DIRECTION_TX) {
- t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
t3_write_reg(adap, A_TP_PIO_DATA, 0xc0ede401);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
+ t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
+
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx);
mac->tx_mcnt = s->tx_frames;
mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
@@ -451,9 +452,11 @@ int t3_mac_enable(struct cmac *mac, int which)
A_XGM_TX_SPI4_SOP_EOP_CNT +
oft)));
mac->rx_mcnt = s->rx_frames;
+ mac->rx_pause = s->rx_pause;
mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
A_XGM_RX_SPI4_SOP_EOP_CNT +
oft)));
+ mac->rx_ocnt = s->rx_fifo_ovfl;
mac->txen = F_TXEN;
mac->toggle_cnt = 0;
}
@@ -464,24 +467,19 @@ int t3_mac_enable(struct cmac *mac, int which)
int t3_mac_disable(struct cmac *mac, int which)
{
- int idx = macidx(mac);
struct adapter *adap = mac->adapter;
- int val;
if (which & MAC_DIRECTION_TX) {
t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
- t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
- t3_write_reg(adap, A_TP_PIO_DATA, 0xc000001f);
- t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
- t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
mac->txen = 0;
}
if (which & MAC_DIRECTION_RX) {
+ int val = F_MAC_RESET_;
+
t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
F_PCS_RESET_, 0);
msleep(100);
t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
- val = F_MAC_RESET_;
if (is_10G(adap))
val |= F_PCS_RESET_;
else if (uses_xaui(adap))
@@ -507,7 +505,7 @@ int t3b2_mac_watchdog_task(struct cmac *mac)
tx_xcnt = 1; /* By default tx_xcnt is making progress */
tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt */
rx_xcnt = 1; /* By default rx_xcnt is making progress */
- if (tx_mcnt == mac->tx_mcnt) {
+ if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) {
tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
A_XGM_TX_SPI4_SOP_EOP_CNT +
mac->offset)));
@@ -524,10 +522,7 @@ int t3b2_mac_watchdog_task(struct cmac *mac)
goto rxcheck;
}
- if (((tx_tcnt != mac->tx_tcnt) &&
- (tx_xcnt == 0) && (mac->tx_xcnt == 0)) ||
- ((mac->tx_mcnt == tx_mcnt) &&
- (tx_xcnt != 0) && (mac->tx_xcnt != 0))) {
+ if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) {
if (mac->toggle_cnt > 4) {
status = 2;
goto out;
@@ -541,11 +536,14 @@ int t3b2_mac_watchdog_task(struct cmac *mac)
}
rxcheck:
- if (rx_mcnt != mac->rx_mcnt)
+ if (rx_mcnt != mac->rx_mcnt) {
rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
A_XGM_RX_SPI4_SOP_EOP_CNT +
- mac->offset)));
- else
+ mac->offset))) +
+ (s->rx_fifo_ovfl -
+ mac->rx_ocnt);
+ mac->rx_ocnt = s->rx_fifo_ovfl;
+ } else
goto out;
if (mac->rx_mcnt != s->rx_frames && rx_xcnt == 0 &&
@@ -560,6 +558,7 @@ out:
mac->tx_mcnt = s->tx_frames;
mac->rx_xcnt = rx_xcnt;
mac->rx_mcnt = s->rx_frames;
+ mac->rx_pause = s->rx_pause;
if (status == 1) {
t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */