summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/chelsio/cxgb4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c219
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c263
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c450
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c334
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/smt.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c112
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_values.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
22 files changed, 1068 insertions, 454 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 5701272aa7f7..ce28820c57c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -289,8 +289,7 @@ struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
if (clipt_size < CLIPT_MIN_HASH_BUCKETS)
return NULL;
- ctbl = kvzalloc(sizeof(*ctbl) +
- clipt_size*sizeof(struct list_head), GFP_KERNEL);
+ ctbl = kvzalloc(struct_size(ctbl, hash_list, clipt_size), GFP_KERNEL);
if (!ctbl)
return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 127b1f624413..7c5bfc931128 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -81,7 +81,7 @@ static int is_fw_attached(struct cudbg_init *pdbg_init)
{
struct adapter *padap = pdbg_init->adap;
- if (!(padap->flags & FW_OK) || padap->use_bd)
+ if (!(padap->flags & CXGB4_FW_OK) || padap->use_bd)
return 0;
return 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 2d1ca920601e..956219c178e1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -568,7 +568,7 @@ struct sge_rspq;
struct port_info {
struct adapter *adapter;
u16 viid;
- s16 xact_addr_filt; /* index of exact MAC address filter */
+ int xact_addr_filt; /* index of exact MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
s8 mdio_addr;
enum fw_port_type port_type;
@@ -606,17 +606,18 @@ struct dentry;
struct work_struct;
enum { /* adapter flags */
- FULL_INIT_DONE = (1 << 0),
- DEV_ENABLED = (1 << 1),
- USING_MSI = (1 << 2),
- USING_MSIX = (1 << 3),
- FW_OK = (1 << 4),
- RSS_TNLALLLOOKUP = (1 << 5),
- USING_SOFT_PARAMS = (1 << 6),
- MASTER_PF = (1 << 7),
- FW_OFLD_CONN = (1 << 9),
- ROOT_NO_RELAXED_ORDERING = (1 << 10),
- SHUTTING_DOWN = (1 << 11),
+ CXGB4_FULL_INIT_DONE = (1 << 0),
+ CXGB4_DEV_ENABLED = (1 << 1),
+ CXGB4_USING_MSI = (1 << 2),
+ CXGB4_USING_MSIX = (1 << 3),
+ CXGB4_FW_OK = (1 << 4),
+ CXGB4_RSS_TNLALLLOOKUP = (1 << 5),
+ CXGB4_USING_SOFT_PARAMS = (1 << 6),
+ CXGB4_MASTER_PF = (1 << 7),
+ CXGB4_FW_OFLD_CONN = (1 << 9),
+ CXGB4_ROOT_NO_RELAXED_ORDERING = (1 << 10),
+ CXGB4_SHUTTING_DOWN = (1 << 11),
+ CXGB4_SGE_DBQ_TIMER = (1 << 12),
};
enum {
@@ -756,6 +757,8 @@ struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
#ifdef CONFIG_CHELSIO_T4_DCB
u8 dcb_prio; /* DCB Priority bound to queue */
#endif
+ u8 dbqt; /* SGE Doorbell Queue Timer in use */
+ unsigned int dbqtimerix; /* SGE Doorbell Queue Timer Index */
unsigned long tso; /* # of TSO requests */
unsigned long tx_cso; /* # of Tx checksum offloads */
unsigned long vlan_ins; /* # of Tx VLAN insertions */
@@ -816,6 +819,8 @@ struct sge {
u16 nqs_per_uld; /* # of Rx queues per ULD */
u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS];
+ u16 dbqtimer_tick;
+ u16 dbqtimer_val[SGE_NDBQTIMERS];
u32 fl_pg_order; /* large page allocation size */
u32 stat_len; /* length of status page at ring end */
u32 pktshift; /* padding between CPL & packet data */
@@ -860,6 +865,7 @@ struct doorbell_stats {
struct hash_mac_addr {
struct list_head list;
u8 addr[ETH_ALEN];
+ unsigned int iface_mac;
};
struct uld_msix_bmap {
@@ -879,6 +885,7 @@ struct vf_info {
unsigned int tx_rate;
bool pf_set_mac;
u16 vlan;
+ int link_state;
};
enum {
@@ -1401,7 +1408,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
rspq_flush_handler_t flush_handler, int cong);
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
struct net_device *dev, struct netdev_queue *netdevq,
- unsigned int iqid);
+ unsigned int iqid, u8 dbqt);
int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
struct net_device *dev, unsigned int iqid,
unsigned int cmplqid);
@@ -1414,6 +1421,8 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
int t4_sge_init(struct adapter *adap);
void t4_sge_start(struct adapter *adap);
void t4_sge_stop(struct adapter *adap);
+int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *q,
+ int maxreclaim);
void cxgb4_set_ethtool_ops(struct net_device *netdev);
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb);
@@ -1820,6 +1829,8 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type);
+int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers,
+ u16 *dbqtimers);
void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
int t4_update_port_info(struct port_info *pi);
int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index b0ff9fa183f4..3130b43bba52 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3143,7 +3143,7 @@ static int tid_info_show(struct seq_file *seq, void *v)
seq_printf(seq, ", in use: %u/%u\n",
atomic_read(&t->tids_in_use),
atomic_read(&t->hash_tids_in_use));
- } else if (adap->flags & FW_OFLD_CONN) {
+ } else if (adap->flags & CXGB4_FW_OFLD_CONN) {
seq_printf(seq, "TID range: %u..%u/%u..%u",
t->aftid_base,
t->aftid_end,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index d07230c892a5..bec4711005cc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -446,8 +446,10 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
unsigned long *link_mode_mask)
{
#define SET_LMM(__lmm_name) \
- __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
- link_mode_mask)
+ do { \
+ __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
+ link_mode_mask); \
+ } while (0)
#define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
do { \
@@ -541,7 +543,7 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
case FW_PORT_TYPE_CR4_QSFP:
SET_LMM(FIBRE);
FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
- FW_CAPS_TO_LMM(SPEED_10G, 10000baseSR_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full);
@@ -552,6 +554,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
break;
}
+ if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) {
+ FW_CAPS_TO_LMM(FEC_RS, FEC_RS);
+ FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER);
+ } else {
+ SET_LMM(FEC_NONE);
+ }
+
FW_CAPS_TO_LMM(ANEG, Autoneg);
FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
@@ -679,18 +688,15 @@ static int set_link_ksettings(struct net_device *dev,
base->autoneg == AUTONEG_DISABLE) {
fw_caps = speed_to_fw_caps(base->speed);
- /* Must only specify a single speed which must be supported
- * as part of the Physical Port Capabilities.
- */
- if ((fw_caps & (fw_caps - 1)) != 0 ||
- !(lc->pcaps & fw_caps))
+ /* Speed must be supported by Physical Port Capabilities. */
+ if (!(lc->pcaps & fw_caps))
return -EINVAL;
lc->speed_caps = fw_caps;
lc->acaps = fw_caps;
} else {
fw_caps =
- lmm_to_fw_caps(link_ksettings->link_modes.advertising);
+ lmm_to_fw_caps(link_ksettings->link_modes.advertising);
if (!(lc->pcaps & fw_caps))
return -EINVAL;
lc->speed_caps = 0;
@@ -869,7 +875,7 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
return -EINVAL;
- if (adapter->flags & FULL_INIT_DONE)
+ if (adapter->flags & CXGB4_FULL_INIT_DONE)
return -EBUSY;
for (i = 0; i < pi->nqsets; ++i) {
@@ -926,11 +932,190 @@ static int get_adaptive_rx_setting(struct net_device *dev)
return q->rspq.adaptive_rx;
}
-static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+/* Return the current global Adapter SGE Doorbell Queue Timer Tick for all
+ * Ethernet TX Queues.
+ */
+static int get_dbqtimer_tick(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+
+ if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
+ return 0;
+
+ return adap->sge.dbqtimer_tick;
+}
+
+/* Return the SGE Doorbell Queue Timer Value for the Ethernet TX Queues
+ * associated with a Network Device.
+ */
+static int get_dbqtimer(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ struct sge_eth_txq *txq;
+
+ txq = &adap->sge.ethtxq[pi->first_qset];
+
+ if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
+ return 0;
+
+ /* all of the TX Queues use the same Timer Index */
+ return adap->sge.dbqtimer_val[txq->dbqtimerix];
+}
+
+/* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX
+ * Queues. This is the fundamental "Tick" that sets the scale of values which
+ * can be used. Individual Ethernet TX Queues index into a relatively small
+ * array of Tick Multipliers. Changing the base Tick will thus change all of
+ * the resulting Timer Values associated with those multipliers for all
+ * Ethernet TX Queues.
+ */
+static int set_dbqtimer_tick(struct net_device *dev, int usecs)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ struct sge *s = &adap->sge;
+ u32 param, val;
+ int ret;
+
+ if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
+ return 0;
+
+ /* return early if it's the same Timer Tick we're already using */
+ if (s->dbqtimer_tick == usecs)
+ return 0;
+
+ /* attempt to set the new Timer Tick value */
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
+ val = usecs;
+ ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
+ if (ret)
+ return ret;
+ s->dbqtimer_tick = usecs;
+
+ /* if successful, reread resulting dependent Timer values */
+ ret = t4_read_sge_dbqtimers(adap, ARRAY_SIZE(s->dbqtimer_val),
+ s->dbqtimer_val);
+ return ret;
+}
+
+/* Set the SGE Doorbell Queue Timer Value for the Ethernet TX Queues
+ * associated with a Network Device. There is a relatively small array of
+ * possible Timer Values so we need to pick the closest value available.
+ */
+static int set_dbqtimer(struct net_device *dev, int usecs)
+{
+ int qix, timerix, min_timerix, delta, min_delta;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ struct sge *s = &adap->sge;
+ struct sge_eth_txq *txq;
+ u32 param, val;
+ int ret;
+
+ if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
+ return 0;
+
+ /* Find the SGE Doorbell Timer Value that's closest to the requested
+ * value.
+ */
+ min_delta = INT_MAX;
+ min_timerix = 0;
+ for (timerix = 0; timerix < ARRAY_SIZE(s->dbqtimer_val); timerix++) {
+ delta = s->dbqtimer_val[timerix] - usecs;
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ min_timerix = timerix;
+ }
+ }
+
+ /* Return early if it's the same Timer Index we're already using.
+ * We use the same Timer Index for all of the TX Queues for an
+ * interface so it's only necessary to check the first one.
+ */
+ txq = &s->ethtxq[pi->first_qset];
+ if (txq->dbqtimerix == min_timerix)
+ return 0;
+
+ for (qix = 0; qix < pi->nqsets; qix++, txq++) {
+ if (adap->flags & CXGB4_FULL_INIT_DONE) {
+ param =
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX) |
+ FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
+ val = min_timerix;
+ ret = t4_set_params(adap, adap->mbox, adap->pf, 0,
+ 1, &param, &val);
+ if (ret)
+ return ret;
+ }
+ txq->dbqtimerix = min_timerix;
+ }
+ return 0;
+}
+
+/* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX
+ * Queues and the Timer Value for the Ethernet TX Queues associated with a
+ * Network Device. Since changing the global Tick changes all of the
+ * available Timer Values, we need to do this first before selecting the
+ * resulting closest Timer Value. Moreover, since the Tick is global,
+ * changing it affects the Timer Values for all Network Devices on the
+ * adapter. So, before changing the Tick, we grab all of the current Timer
+ * Values for other Network Devices on this Adapter and then attempt to select
+ * new Timer Values which are close to the old values ...
+ */
+static int set_dbqtimer_tickval(struct net_device *dev,
+ int tick_usecs, int timer_usecs)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ int timer[MAX_NPORTS];
+ unsigned int port;
+ int ret;
+
+ /* Grab the other adapter Network Interface current timers and fill in
+ * the new one for this Network Interface.
+ */
+ for_each_port(adap, port)
+ if (port == pi->port_id)
+ timer[port] = timer_usecs;
+ else
+ timer[port] = get_dbqtimer(adap->port[port]);
+
+ /* Change the global Tick first ... */
+ ret = set_dbqtimer_tick(dev, tick_usecs);
+ if (ret)
+ return ret;
+
+ /* ... and then set all of the Network Interface Timer Values ... */
+ for_each_port(adap, port) {
+ ret = set_dbqtimer(adap->port[port], timer[port]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coalesce)
{
- set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
- return set_rx_intr_params(dev, c->rx_coalesce_usecs,
- c->rx_max_coalesced_frames);
+ int ret;
+
+ set_adaptive_rx_setting(dev, coalesce->use_adaptive_rx_coalesce);
+
+ ret = set_rx_intr_params(dev, coalesce->rx_coalesce_usecs,
+ coalesce->rx_max_coalesced_frames);
+ if (ret)
+ return ret;
+
+ return set_dbqtimer_tickval(dev,
+ coalesce->tx_coalesce_usecs_irq,
+ coalesce->tx_coalesce_usecs);
}
static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
@@ -943,6 +1128,8 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
adap->sge.counter_val[rq->pktcnt_idx] : 0;
c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
+ c->tx_coalesce_usecs_irq = get_dbqtimer_tick(dev);
+ c->tx_coalesce_usecs = get_dbqtimer(dev);
return 0;
}
@@ -1076,7 +1263,7 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
* firmware image otherwise we'll try to do the entire job from the
* host ... and we always "force" the operation in this path.
*/
- if (adap->flags & FULL_INIT_DONE)
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
mbox = adap->mbox;
ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
@@ -1155,7 +1342,7 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
return 0;
/* Interface must be brought up atleast once */
- if (pi->adapter->flags & FULL_INIT_DONE) {
+ if (pi->adapter->flags & CXGB4_FULL_INIT_DONE) {
for (i = 0; i < pi->rss_size; i++)
pi->rss[i] = p[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
index 6c8a62eefe51..33b2c0c45509 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
@@ -74,7 +74,7 @@ int cxgb_fcoe_enable(struct net_device *netdev)
if (is_t4(adap->params.chip))
return -EINVAL;
- if (!(adap->flags & FULL_INIT_DONE))
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE))
return -EINVAL;
dev_info(adap->pdev_dev, "Enabling FCoE offload features\n");
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 7dddb9e748b8..5afb43000049 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -524,7 +524,7 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
return -ENOMEM;
fwr = __skb_put(skb, len);
- t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & SHUTTING_DOWN) ? -1
+ t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & CXGB4_SHUTTING_DOWN) ? -1
: adapter->sge.fw_evtq.abs_id);
/* Mark the filter as "pending" and ship off the Filter Work Request.
@@ -1569,7 +1569,7 @@ int cxgb4_del_filter(struct net_device *dev, int filter_id,
int ret;
/* If we are shutting down the adapter do not wait for completion */
- if (netdev2adap(dev)->flags & SHUTTING_DOWN)
+ if (netdev2adap(dev)->flags & CXGB4_SHUTTING_DOWN)
return __cxgb4_del_filter(dev, filter_id, fs, NULL);
init_completion(&ctx.completion);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6ba9099ca7fe..89179e316687 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -433,6 +433,60 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
}
/**
+ * cxgb4_change_mac - Update match filter for a MAC address.
+ * @pi: the port_info
+ * @viid: the VI id
+ * @tcam_idx: TCAM index of existing filter for old value of MAC address,
+ * or -1
+ * @addr: the new MAC address value
+ * @persist: whether a new MAC allocation should be persistent
+ * @add_smt: if true also add the address to the HW SMT
+ *
+ * Modifies an MPS filter and sets it to the new MAC address if
+ * @tcam_idx >= 0, or adds the MAC address to a new filter if
+ * @tcam_idx < 0. In the latter case the address is added persistently
+ * if @persist is %true.
+ * Addresses are programmed to hash region, if tcam runs out of entries.
+ *
+ */
+static int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
+ int *tcam_idx, const u8 *addr, bool persist,
+ u8 *smt_idx)
+{
+ struct adapter *adapter = pi->adapter;
+ struct hash_mac_addr *entry, *new_entry;
+ int ret;
+
+ ret = t4_change_mac(adapter, adapter->mbox, viid,
+ *tcam_idx, addr, persist, smt_idx);
+ /* We ran out of TCAM entries. try programming hash region. */
+ if (ret == -ENOMEM) {
+ /* If the MAC address to be updated is in the hash addr
+ * list, update it from the list
+ */
+ list_for_each_entry(entry, &adapter->mac_hlist, list) {
+ if (entry->iface_mac) {
+ ether_addr_copy(entry->addr, addr);
+ goto set_hash;
+ }
+ }
+ new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+ if (!new_entry)
+ return -ENOMEM;
+ ether_addr_copy(new_entry->addr, addr);
+ new_entry->iface_mac = true;
+ list_add_tail(&new_entry->list, &adapter->mac_hlist);
+set_hash:
+ ret = cxgb4_set_addr_hash(pi);
+ } else if (ret >= 0) {
+ *tcam_idx = ret;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/*
* link_start - enable a port
* @dev: the port to enable
*
@@ -450,15 +504,9 @@ static int link_start(struct net_device *dev)
*/
ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
!!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
- if (ret == 0) {
- ret = t4_change_mac(pi->adapter, mb, pi->viid,
- pi->xact_addr_filt, dev->dev_addr, true,
- &pi->smt_idx);
- if (ret >= 0) {
- pi->xact_addr_filt = ret;
- ret = 0;
- }
- }
+ if (ret == 0)
+ ret = cxgb4_change_mac(pi, pi->viid, &pi->xact_addr_filt,
+ dev->dev_addr, true, &pi->smt_idx);
if (ret == 0)
ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
&pi->link_cfg);
@@ -527,7 +575,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
struct sge_eth_txq *eq;
eq = container_of(txq, struct sge_eth_txq, q);
- netif_tx_wake_queue(eq->txq);
+ t4_sge_eth_txq_egress_update(q->adap, eq, -1);
} else {
struct sge_uld_txq *oq;
@@ -603,12 +651,12 @@ out:
static void disable_msi(struct adapter *adapter)
{
- if (adapter->flags & USING_MSIX) {
+ if (adapter->flags & CXGB4_USING_MSIX) {
pci_disable_msix(adapter->pdev);
- adapter->flags &= ~USING_MSIX;
- } else if (adapter->flags & USING_MSI) {
+ adapter->flags &= ~CXGB4_USING_MSIX;
+ } else if (adapter->flags & CXGB4_USING_MSI) {
pci_disable_msi(adapter->pdev);
- adapter->flags &= ~USING_MSI;
+ adapter->flags &= ~CXGB4_USING_MSI;
}
}
@@ -624,7 +672,7 @@ static irqreturn_t t4_nondata_intr(int irq, void *cookie)
adap->swintr = 1;
t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
}
- if (adap->flags & MASTER_PF)
+ if (adap->flags & CXGB4_MASTER_PF)
t4_slow_intr_handler(adap);
return IRQ_HANDLED;
}
@@ -789,9 +837,9 @@ static void quiesce_rx(struct adapter *adap)
/* Disable interrupt and napi handler */
static void disable_interrupts(struct adapter *adap)
{
- if (adap->flags & FULL_INIT_DONE) {
+ if (adap->flags & CXGB4_FULL_INIT_DONE) {
t4_intr_disable(adap);
- if (adap->flags & USING_MSIX) {
+ if (adap->flags & CXGB4_USING_MSIX) {
free_msix_queue_irqs(adap);
free_irq(adap->msix_info[0].vec, adap);
} else {
@@ -832,7 +880,7 @@ static int setup_fw_sge_queues(struct adapter *adap)
bitmap_zero(s->starving_fl, s->egr_sz);
bitmap_zero(s->txq_maperr, s->egr_sz);
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
else {
err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
@@ -885,10 +933,13 @@ static int setup_sge_queues(struct adapter *adap)
q->rspq.idx = j;
memset(&q->stats, 0, sizeof(q->stats));
}
- for (j = 0; j < pi->nqsets; j++, t++) {
+
+ q = &s->ethrxq[pi->first_qset];
+ for (j = 0; j < pi->nqsets; j++, t++, q++) {
err = t4_sge_alloc_eth_txq(adap, t, dev,
netdev_get_tx_queue(dev, j),
- s->fw_evtq.cntxt_id);
+ q->rspq.cntxt_id,
+ !!(adap->flags & CXGB4_SGE_DBQ_TIMER));
if (err)
goto freeout;
}
@@ -910,7 +961,7 @@ static int setup_sge_queues(struct adapter *adap)
if (!is_t4(adap->params.chip)) {
err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
netdev_get_tx_queue(adap->port[0], 0)
- , s->fw_evtq.cntxt_id);
+ , s->fw_evtq.cntxt_id, false);
if (err)
goto freeout;
}
@@ -2229,7 +2280,7 @@ static int cxgb_up(struct adapter *adap)
if (err)
goto freeq;
- if (adap->flags & USING_MSIX) {
+ if (adap->flags & CXGB4_USING_MSIX) {
name_msix_vecs(adap);
err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
adap->msix_info[0].desc, adap);
@@ -2242,7 +2293,8 @@ static int cxgb_up(struct adapter *adap)
}
} else {
err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
- (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
+ (adap->flags & CXGB4_USING_MSI) ? 0
+ : IRQF_SHARED,
adap->port[0]->name, adap);
if (err)
goto irq_err;
@@ -2251,7 +2303,7 @@ static int cxgb_up(struct adapter *adap)
enable_rx(adap);
t4_sge_start(adap);
t4_intr_enable(adap);
- adap->flags |= FULL_INIT_DONE;
+ adap->flags |= CXGB4_FULL_INIT_DONE;
mutex_unlock(&uld_mutex);
notify_ulds(adap, CXGB4_STATE_UP);
@@ -2280,7 +2332,7 @@ static void cxgb_down(struct adapter *adapter)
t4_sge_stop(adapter);
t4_free_sge_resources(adapter);
- adapter->flags &= ~FULL_INIT_DONE;
+ adapter->flags &= ~CXGB4_FULL_INIT_DONE;
}
/*
@@ -2294,7 +2346,7 @@ static int cxgb_open(struct net_device *dev)
netif_carrier_off(dev);
- if (!(adapter->flags & FULL_INIT_DONE)) {
+ if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) {
err = cxgb_up(adapter);
if (err < 0)
return err;
@@ -2689,6 +2741,7 @@ static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
ivi->min_tx_rate = 0;
ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
ivi->vlan = vfinfo->vlan;
+ ivi->linkstate = vfinfo->link_state;
return 0;
}
@@ -2828,6 +2881,49 @@ static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
return ret;
}
+
+static int cxgb4_mgmt_set_vf_link_state(struct net_device *dev, int vf,
+ int link)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ u32 param, val;
+ int ret = 0;
+
+ if (vf >= adap->num_vfs)
+ return -EINVAL;
+
+ switch (link) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ val = FW_VF_LINK_STATE_AUTO;
+ break;
+
+ case IFLA_VF_LINK_STATE_ENABLE:
+ val = FW_VF_LINK_STATE_ENABLE;
+ break;
+
+ case IFLA_VF_LINK_STATE_DISABLE:
+ val = FW_VF_LINK_STATE_DISABLE;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_LINK_STATE));
+ ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
+ &param, &val);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Error %d in setting PF %d VF %d link state\n",
+ ret, adap->pf, vf);
+ return -EINVAL;
+ }
+
+ adap->vfinfo[vf].link_state = link;
+ return ret;
+}
#endif /* CONFIG_PCI_IOV */
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
@@ -2839,9 +2935,8 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
- pi->xact_addr_filt, addr->sa_data, true,
- &pi->smt_idx);
+ ret = cxgb4_change_mac(pi, pi->viid, &pi->xact_addr_filt,
+ addr->sa_data, true, &pi->smt_idx);
if (ret < 0)
return ret;
@@ -2856,7 +2951,7 @@ static void cxgb_netpoll(struct net_device *dev)
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
- if (adap->flags & USING_MSIX) {
+ if (adap->flags & CXGB4_USING_MSIX) {
int i;
struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
@@ -2883,7 +2978,7 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
if (index < 0 || index > pi->nqsets - 1)
return -EINVAL;
- if (!(adap->flags & FULL_INIT_DONE)) {
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
dev_err(adap->pdev_dev,
"Failed to rate limit on queue %d. Link Down?\n",
index);
@@ -2984,7 +3079,7 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
- if (!(adap->flags & FULL_INIT_DONE)) {
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
dev_err(adap->pdev_dev,
"Failed to setup tc on port %d. Link Down?\n",
pi->port_id);
@@ -3244,12 +3339,13 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#ifdef CONFIG_PCI_IOV
static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
- .ndo_open = cxgb4_mgmt_open,
- .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
- .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
- .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
- .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
- .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
+ .ndo_open = cxgb4_mgmt_open,
+ .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
+ .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
+ .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
+ .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
+ .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
+ .ndo_set_vf_link_state = cxgb4_mgmt_set_vf_link_state,
};
#endif
@@ -4115,7 +4211,7 @@ static int adap_init0(struct adapter *adap)
return ret;
}
if (ret == adap->mbox)
- adap->flags |= MASTER_PF;
+ adap->flags |= CXGB4_MASTER_PF;
/*
* If we're the Master PF Driver and the device is uninitialized,
@@ -4130,7 +4226,7 @@ static int adap_init0(struct adapter *adap)
/* If firmware is too old (not supported by driver) force an update. */
if (ret)
state = DEV_STATE_UNINIT;
- if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
+ if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) {
struct fw_info *fw_info;
struct fw_hdr *card_fw;
const struct firmware *fw;
@@ -4192,7 +4288,7 @@ static int adap_init0(struct adapter *adap)
ret);
dev_info(adap->pdev_dev, "Coming up as %s: "\
"Adapter already initialized\n",
- adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
+ adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE");
} else {
dev_info(adap->pdev_dev, "Coming up as MASTER: "\
"Initializing adapter\n");
@@ -4278,6 +4374,24 @@ static int adap_init0(struct adapter *adap)
if (ret < 0)
goto bye;
+ /* Grab the SGE Doorbell Queue Timer values. If successful, that
+ * indicates that the Firmware and Hardware support this.
+ */
+ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+ 1, params, val);
+
+ if (!ret) {
+ adap->sge.dbqtimer_tick = val[0];
+ ret = t4_read_sge_dbqtimers(adap,
+ ARRAY_SIZE(adap->sge.dbqtimer_val),
+ adap->sge.dbqtimer_val);
+ }
+
+ if (!ret)
+ adap->flags |= CXGB4_SGE_DBQ_TIMER;
+
if (is_bypass_device(adap->pdev->device))
adap->params.bypass = 1;
@@ -4400,7 +4514,7 @@ static int adap_init0(struct adapter *adap)
* offload connection through firmware work request
*/
if ((val[0] != val[1]) && (ret >= 0)) {
- adap->flags |= FW_OFLD_CONN;
+ adap->flags |= CXGB4_FW_OFLD_CONN;
adap->tids.aftid_base = val[0];
adap->tids.aftid_end = val[1];
}
@@ -4493,7 +4607,7 @@ static int adap_init0(struct adapter *adap)
* 2. Server filter: This are special filters which are used
* to redirect SYN packets to offload queue.
*/
- if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
+ if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) {
adap->tids.sftid_base = adap->tids.ftid_base +
DIV_ROUND_UP(adap->tids.nftids, 3);
adap->tids.nsftids = adap->tids.nftids -
@@ -4672,7 +4786,7 @@ static int adap_init0(struct adapter *adap)
adap->params.b_wnd);
}
t4_init_sge_params(adap);
- adap->flags |= FW_OK;
+ adap->flags |= CXGB4_FW_OK;
t4_init_tp_params(adap, true);
return 0;
@@ -4707,7 +4821,7 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
goto out;
rtnl_lock();
- adap->flags &= ~FW_OK;
+ adap->flags &= ~CXGB4_FW_OK;
notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
spin_lock(&adap->stats_lock);
for_each_port(adap, i) {
@@ -4719,12 +4833,12 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
}
spin_unlock(&adap->stats_lock);
disable_interrupts(adap);
- if (adap->flags & FULL_INIT_DONE)
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
cxgb_down(adap);
rtnl_unlock();
- if ((adap->flags & DEV_ENABLED)) {
+ if ((adap->flags & CXGB4_DEV_ENABLED)) {
pci_disable_device(pdev);
- adap->flags &= ~DEV_ENABLED;
+ adap->flags &= ~CXGB4_DEV_ENABLED;
}
out: return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
@@ -4742,13 +4856,13 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
- if (!(adap->flags & DEV_ENABLED)) {
+ if (!(adap->flags & CXGB4_DEV_ENABLED)) {
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "Cannot reenable PCI "
"device after reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
- adap->flags |= DEV_ENABLED;
+ adap->flags |= CXGB4_DEV_ENABLED;
}
pci_set_master(pdev);
@@ -4759,7 +4873,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
return PCI_ERS_RESULT_DISCONNECT;
- adap->flags |= FW_OK;
+ adap->flags |= CXGB4_FW_OK;
if (adap_init1(adap, &c))
return PCI_ERS_RESULT_DISCONNECT;
@@ -4871,7 +4985,7 @@ static int cfg_queues(struct adapter *adap)
* at all is problematic ...
*/
niqflint = adap->params.pfres.niqflint - 1;
- if (!(adap->flags & USING_MSIX))
+ if (!(adap->flags & CXGB4_USING_MSIX))
niqflint--;
neq = adap->params.pfres.neq / 2;
avail_eth_qsets = min(niqflint, neq);
@@ -5153,8 +5267,8 @@ static void print_adapter_info(struct adapter *adapter)
/* Software/Hardware configuration */
dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
is_offload(adapter) ? "R" : "",
- ((adapter->flags & USING_MSIX) ? "MSI-X" :
- (adapter->flags & USING_MSI) ? "MSI" : ""),
+ ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" :
+ (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""),
is_offload(adapter) ? "Offload" : "non-Offload");
}
@@ -5229,13 +5343,13 @@ static void free_some_resources(struct adapter *adapter)
kfree(adap2pinfo(adapter, i)->rss);
free_netdev(adapter->port[i]);
}
- if (adapter->flags & FW_OK)
+ if (adapter->flags & CXGB4_FW_OK)
t4_fw_bye(adapter, adapter->pf);
}
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
- NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
+ NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
#define SEGMENT_SIZE 128
static int t4_get_chip_type(struct adapter *adap, int ver)
@@ -5533,7 +5647,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* PCI device has been enabled */
- adapter->flags |= DEV_ENABLED;
+ adapter->flags |= CXGB4_DEV_ENABLED;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
@@ -5551,7 +5665,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* using Relaxed Ordering.
*/
if (!pcie_relaxed_ordering_enabled(pdev))
- adapter->flags |= ROOT_NO_RELAXED_ORDERING;
+ adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING;
spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tid_release_lock);
@@ -5642,7 +5756,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM | NETIF_F_RXHASH |
+ NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_TC;
@@ -5651,9 +5765,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM |
NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_HW_TLS_RECORD;
}
if (highdma)
@@ -5680,7 +5797,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, adapter);
- if (adapter->flags & FW_OK) {
+ if (adapter->flags & CXGB4_FW_OK) {
err = t4_port_init(adapter, func, func, 0);
if (err)
goto out_free_dev;
@@ -5702,7 +5819,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- if (!(adapter->flags & FW_OK))
+ if (!(adapter->flags & CXGB4_FW_OK))
goto fw_attach_fail;
/* Configure queues and allocate tables now, they can be needed as
@@ -5796,9 +5913,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* See what interrupts we'll be using */
if (msi > 1 && enable_msix(adapter) == 0)
- adapter->flags |= USING_MSIX;
+ adapter->flags |= CXGB4_USING_MSIX;
else if (msi > 0 && pci_enable_msi(pdev) == 0) {
- adapter->flags |= USING_MSI;
+ adapter->flags |= CXGB4_USING_MSI;
if (msi > 1)
free_msix_info(adapter);
}
@@ -5866,7 +5983,7 @@ fw_attach_fail:
cxgb4_ptp_init(adapter);
if (IS_REACHABLE(CONFIG_THERMAL) &&
- !is_t4(adapter->params.chip) && (adapter->flags & FW_OK))
+ !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK))
cxgb4_thermal_init(adapter);
print_adapter_info(adapter);
@@ -5875,7 +5992,7 @@ fw_attach_fail:
out_free_dev:
t4_free_sge_resources(adapter);
free_some_resources(adapter);
- if (adapter->flags & USING_MSIX)
+ if (adapter->flags & CXGB4_USING_MSIX)
free_msix_info(adapter);
if (adapter->num_uld || adapter->num_ofld_uld)
t4_uld_mem_free(adapter);
@@ -5908,7 +6025,7 @@ static void remove_one(struct pci_dev *pdev)
return;
}
- adapter->flags |= SHUTTING_DOWN;
+ adapter->flags |= CXGB4_SHUTTING_DOWN;
if (adapter->pf == 4) {
int i;
@@ -5943,10 +6060,10 @@ static void remove_one(struct pci_dev *pdev)
*/
clear_all_filters(adapter);
- if (adapter->flags & FULL_INIT_DONE)
+ if (adapter->flags & CXGB4_FULL_INIT_DONE)
cxgb_down(adapter);
- if (adapter->flags & USING_MSIX)
+ if (adapter->flags & CXGB4_USING_MSIX)
free_msix_info(adapter);
if (adapter->num_uld || adapter->num_ofld_uld)
t4_uld_mem_free(adapter);
@@ -5970,9 +6087,9 @@ static void remove_one(struct pci_dev *pdev)
#endif
iounmap(adapter->regs);
pci_disable_pcie_error_reporting(pdev);
- if ((adapter->flags & DEV_ENABLED)) {
+ if ((adapter->flags & CXGB4_DEV_ENABLED)) {
pci_disable_device(pdev);
- adapter->flags &= ~DEV_ENABLED;
+ adapter->flags &= ~CXGB4_DEV_ENABLED;
}
pci_release_regions(pdev);
kfree(adapter->mbox_log);
@@ -5998,7 +6115,7 @@ static void shutdown_one(struct pci_dev *pdev)
return;
}
- adapter->flags |= SHUTTING_DOWN;
+ adapter->flags |= CXGB4_SHUTTING_DOWN;
if (adapter->pf == 4) {
int i;
@@ -6016,7 +6133,7 @@ static void shutdown_one(struct pci_dev *pdev)
disable_msi(adapter);
t4_sge_stop(adapter);
- if (adapter->flags & FW_OK)
+ if (adapter->flags & CXGB4_FW_OK)
t4_fw_bye(adapter, adapter->mbox);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index c116f96956fe..82a8d1970060 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -83,28 +83,23 @@ static void cxgb4_process_flow_match(struct net_device *dev,
struct tc_cls_flower_offload *cls,
struct ch_filter_specification *fs)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
u16 addr_type = 0;
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- cls->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
- addr_type = key->addr_type;
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- cls->key);
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- cls->mask);
- u16 ethtype_key = ntohs(key->n_proto);
- u16 ethtype_mask = ntohs(mask->n_proto);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+ u16 ethtype_key, ethtype_mask;
+
+ flow_rule_match_basic(rule, &match);
+ ethtype_key = ntohs(match.key->n_proto);
+ ethtype_mask = ntohs(match.mask->n_proto);
if (ethtype_key == ETH_P_ALL) {
ethtype_key = 0;
@@ -116,115 +111,89 @@ static void cxgb4_process_flow_match(struct net_device *dev,
fs->val.ethtype = ethtype_key;
fs->mask.ethtype = ethtype_mask;
- fs->val.proto = key->ip_proto;
- fs->mask.proto = mask->ip_proto;
+ fs->val.proto = match.key->ip_proto;
+ fs->mask.proto = match.mask->ip_proto;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- struct flow_dissector_key_ipv4_addrs *key =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- cls->key);
- struct flow_dissector_key_ipv4_addrs *mask =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- cls->mask);
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
fs->type = 0;
- memcpy(&fs->val.lip[0], &key->dst, sizeof(key->dst));
- memcpy(&fs->val.fip[0], &key->src, sizeof(key->src));
- memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst));
- memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src));
+ memcpy(&fs->val.lip[0], &match.key->dst, sizeof(match.key->dst));
+ memcpy(&fs->val.fip[0], &match.key->src, sizeof(match.key->src));
+ memcpy(&fs->mask.lip[0], &match.mask->dst, sizeof(match.mask->dst));
+ memcpy(&fs->mask.fip[0], &match.mask->src, sizeof(match.mask->src));
/* also initialize nat_lip/fip to same values */
- memcpy(&fs->nat_lip[0], &key->dst, sizeof(key->dst));
- memcpy(&fs->nat_fip[0], &key->src, sizeof(key->src));
-
+ memcpy(&fs->nat_lip[0], &match.key->dst, sizeof(match.key->dst));
+ memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
- struct flow_dissector_key_ipv6_addrs *key =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- cls->key);
- struct flow_dissector_key_ipv6_addrs *mask =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- cls->mask);
+ struct flow_match_ipv6_addrs match;
+ flow_rule_match_ipv6_addrs(rule, &match);
fs->type = 1;
- memcpy(&fs->val.lip[0], key->dst.s6_addr, sizeof(key->dst));
- memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src));
- memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst));
- memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src));
+ memcpy(&fs->val.lip[0], match.key->dst.s6_addr,
+ sizeof(match.key->dst));
+ memcpy(&fs->val.fip[0], match.key->src.s6_addr,
+ sizeof(match.key->src));
+ memcpy(&fs->mask.lip[0], match.mask->dst.s6_addr,
+ sizeof(match.mask->dst));
+ memcpy(&fs->mask.fip[0], match.mask->src.s6_addr,
+ sizeof(match.mask->src));
/* also initialize nat_lip/fip to same values */
- memcpy(&fs->nat_lip[0], key->dst.s6_addr, sizeof(key->dst));
- memcpy(&fs->nat_fip[0], key->src.s6_addr, sizeof(key->src));
+ memcpy(&fs->nat_lip[0], match.key->dst.s6_addr,
+ sizeof(match.key->dst));
+ memcpy(&fs->nat_fip[0], match.key->src.s6_addr,
+ sizeof(match.key->src));
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
- struct flow_dissector_key_ports *key, *mask;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
- key = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- cls->key);
- mask = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- cls->mask);
- fs->val.lport = cpu_to_be16(key->dst);
- fs->mask.lport = cpu_to_be16(mask->dst);
- fs->val.fport = cpu_to_be16(key->src);
- fs->mask.fport = cpu_to_be16(mask->src);
+ flow_rule_match_ports(rule, &match);
+ fs->val.lport = cpu_to_be16(match.key->dst);
+ fs->mask.lport = cpu_to_be16(match.mask->dst);
+ fs->val.fport = cpu_to_be16(match.key->src);
+ fs->mask.fport = cpu_to_be16(match.mask->src);
/* also initialize nat_lport/fport to same values */
- fs->nat_lport = cpu_to_be16(key->dst);
- fs->nat_fport = cpu_to_be16(key->src);
+ fs->nat_lport = cpu_to_be16(match.key->dst);
+ fs->nat_fport = cpu_to_be16(match.key->src);
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) {
- struct flow_dissector_key_ip *key, *mask;
-
- key = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IP,
- cls->key);
- mask = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IP,
- cls->mask);
- fs->val.tos = key->tos;
- fs->mask.tos = mask->tos;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ fs->val.tos = match.key->tos;
+ fs->mask.tos = match.mask->tos;
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_dissector_key_keyid *key, *mask;
-
- key = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- cls->key);
- mask = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- cls->mask);
- fs->val.vni = be32_to_cpu(key->keyid);
- fs->mask.vni = be32_to_cpu(mask->keyid);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
+
+ flow_rule_match_enc_keyid(rule, &match);
+ fs->val.vni = be32_to_cpu(match.key->keyid);
+ fs->mask.vni = be32_to_cpu(match.mask->keyid);
if (fs->mask.vni) {
fs->val.encap_vld = 1;
fs->mask.encap_vld = 1;
}
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_dissector_key_vlan *key, *mask;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
u16 vlan_tci, vlan_tci_mask;
- key = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- cls->key);
- mask = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- cls->mask);
- vlan_tci = key->vlan_id | (key->vlan_priority <<
- VLAN_PRIO_SHIFT);
- vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
- VLAN_PRIO_SHIFT);
+ flow_rule_match_vlan(rule, &match);
+ vlan_tci = match.key->vlan_id | (match.key->vlan_priority <<
+ VLAN_PRIO_SHIFT);
+ vlan_tci_mask = match.mask->vlan_id | (match.mask->vlan_priority <<
+ VLAN_PRIO_SHIFT);
fs->val.ivlan = vlan_tci;
fs->mask.ivlan = vlan_tci_mask;
@@ -255,10 +224,12 @@ static void cxgb4_process_flow_match(struct net_device *dev,
static int cxgb4_validate_flow_match(struct net_device *dev,
struct tc_cls_flower_offload *cls)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
u16 ethtype_mask = 0;
u16 ethtype_key = 0;
- if (cls->dissector->used_keys &
+ if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
@@ -268,36 +239,29 @@ static int cxgb4_validate_flow_match(struct net_device *dev,
BIT(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_IP))) {
netdev_warn(dev, "Unsupported key used: 0x%x\n",
- cls->dissector->used_keys);
+ dissector->used_keys);
return -EOPNOTSUPP;
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- cls->key);
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- cls->mask);
- ethtype_key = ntohs(key->n_proto);
- ethtype_mask = ntohs(mask->n_proto);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ ethtype_key = ntohs(match.key->n_proto);
+ ethtype_mask = ntohs(match.mask->n_proto);
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
u16 eth_ip_type = ethtype_key & ethtype_mask;
- struct flow_dissector_key_ip *mask;
+ struct flow_match_ip match;
if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
netdev_err(dev, "IP Key supported only with IPv4/v6");
return -EINVAL;
}
- mask = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IP,
- cls->mask);
- if (mask->ttl) {
+ flow_rule_match_ip(rule, &match);
+ if (match.mask->ttl) {
netdev_warn(dev, "ttl match unsupported for offload");
return -EOPNOTSUPP;
}
@@ -328,7 +292,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
u32 mask, u32 offset, u8 htype)
{
switch (htype) {
- case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
switch (offset) {
case PEDIT_ETH_DMAC_31_0:
fs->newdmac = 1;
@@ -346,7 +310,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
offload_pedit(fs, val, mask, ETH_SMAC_47_16);
}
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
switch (offset) {
case PEDIT_IP4_SRC:
offload_pedit(fs, val, mask, IP4_SRC);
@@ -356,7 +320,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
}
fs->nat_mode = NAT_MODE_ALL;
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
switch (offset) {
case PEDIT_IP6_SRC_31_0:
offload_pedit(fs, val, mask, IP6_SRC_31_0);
@@ -384,7 +348,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
}
fs->nat_mode = NAT_MODE_ALL;
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
switch (offset) {
case PEDIT_TCP_SPORT_DPORT:
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
@@ -397,7 +361,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
}
fs->nat_mode = NAT_MODE_ALL;
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
switch (offset) {
case PEDIT_UDP_SPORT_DPORT:
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
@@ -416,56 +380,63 @@ static void cxgb4_process_flow_actions(struct net_device *in,
struct tc_cls_flower_offload *cls,
struct ch_filter_specification *fs)
{
- const struct tc_action *a;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
+ struct flow_action_entry *act;
int i;
- tcf_exts_for_each_action(i, a, cls->exts) {
- if (is_tcf_gact_ok(a)) {
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
fs->action = FILTER_PASS;
- } else if (is_tcf_gact_shot(a)) {
+ break;
+ case FLOW_ACTION_DROP:
fs->action = FILTER_DROP;
- } else if (is_tcf_mirred_egress_redirect(a)) {
- struct net_device *out = tcf_mirred_dev(a);
+ break;
+ case FLOW_ACTION_REDIRECT: {
+ struct net_device *out = act->dev;
struct port_info *pi = netdev_priv(out);
fs->action = FILTER_SWITCH;
fs->eport = pi->port_id;
- } else if (is_tcf_vlan(a)) {
- u32 vlan_action = tcf_vlan_action(a);
- u8 prio = tcf_vlan_push_prio(a);
- u16 vid = tcf_vlan_push_vid(a);
+ }
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_MANGLE: {
+ u8 prio = act->vlan.prio;
+ u16 vid = act->vlan.vid;
u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
-
- switch (vlan_action) {
- case TCA_VLAN_ACT_POP:
+ switch (act->id) {
+ case FLOW_ACTION_VLAN_POP:
fs->newvlan |= VLAN_REMOVE;
break;
- case TCA_VLAN_ACT_PUSH:
+ case FLOW_ACTION_VLAN_PUSH:
fs->newvlan |= VLAN_INSERT;
fs->vlan = vlan_tci;
break;
- case TCA_VLAN_ACT_MODIFY:
+ case FLOW_ACTION_VLAN_MANGLE:
fs->newvlan |= VLAN_REWRITE;
fs->vlan = vlan_tci;
break;
default:
break;
}
- } else if (is_tcf_pedit(a)) {
+ }
+ break;
+ case FLOW_ACTION_MANGLE: {
u32 mask, val, offset;
- int nkeys, i;
u8 htype;
- nkeys = tcf_pedit_nkeys(a);
- for (i = 0; i < nkeys; i++) {
- htype = tcf_pedit_htype(a, i);
- mask = tcf_pedit_mask(a, i);
- val = tcf_pedit_val(a, i);
- offset = tcf_pedit_offset(a, i);
+ htype = act->mangle.htype;
+ mask = act->mangle.mask;
+ val = act->mangle.val;
+ offset = act->mangle.offset;
- process_pedit_field(fs, val, mask, offset,
- htype);
+ process_pedit_field(fs, val, mask, offset, htype);
}
+ break;
+ default:
+ break;
}
}
}
@@ -484,101 +455,89 @@ static bool valid_l4_mask(u32 mask)
}
static bool valid_pedit_action(struct net_device *dev,
- const struct tc_action *a)
+ const struct flow_action_entry *act)
{
u32 mask, offset;
- u8 cmd, htype;
- int nkeys, i;
-
- nkeys = tcf_pedit_nkeys(a);
- for (i = 0; i < nkeys; i++) {
- htype = tcf_pedit_htype(a, i);
- cmd = tcf_pedit_cmd(a, i);
- mask = tcf_pedit_mask(a, i);
- offset = tcf_pedit_offset(a, i);
-
- if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) {
- netdev_err(dev, "%s: Unsupported pedit cmd\n",
+ u8 htype;
+
+ htype = act->mangle.htype;
+ mask = act->mangle.mask;
+ offset = act->mangle.offset;
+
+ switch (htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ switch (offset) {
+ case PEDIT_ETH_DMAC_31_0:
+ case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
+ case PEDIT_ETH_SMAC_47_16:
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
__func__);
return false;
}
-
- switch (htype) {
- case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
- switch (offset) {
- case PEDIT_ETH_DMAC_31_0:
- case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
- case PEDIT_ETH_SMAC_47_16:
- break;
- default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
- return false;
- }
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
- switch (offset) {
- case PEDIT_IP4_SRC:
- case PEDIT_IP4_DST:
- break;
- default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
- return false;
- }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ switch (offset) {
+ case PEDIT_IP4_SRC:
+ case PEDIT_IP4_DST:
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
- switch (offset) {
- case PEDIT_IP6_SRC_31_0:
- case PEDIT_IP6_SRC_63_32:
- case PEDIT_IP6_SRC_95_64:
- case PEDIT_IP6_SRC_127_96:
- case PEDIT_IP6_DST_31_0:
- case PEDIT_IP6_DST_63_32:
- case PEDIT_IP6_DST_95_64:
- case PEDIT_IP6_DST_127_96:
- break;
- default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
- return false;
- }
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+ __func__);
+ return false;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ switch (offset) {
+ case PEDIT_IP6_SRC_31_0:
+ case PEDIT_IP6_SRC_63_32:
+ case PEDIT_IP6_SRC_95_64:
+ case PEDIT_IP6_SRC_127_96:
+ case PEDIT_IP6_DST_31_0:
+ case PEDIT_IP6_DST_63_32:
+ case PEDIT_IP6_DST_95_64:
+ case PEDIT_IP6_DST_127_96:
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
- switch (offset) {
- case PEDIT_TCP_SPORT_DPORT:
- if (!valid_l4_mask(~mask)) {
- netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
- __func__);
- return false;
- }
- break;
- default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+ __func__);
+ return false;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ switch (offset) {
+ case PEDIT_TCP_SPORT_DPORT:
+ if (!valid_l4_mask(~mask)) {
+ netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
__func__);
return false;
}
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
- switch (offset) {
- case PEDIT_UDP_SPORT_DPORT:
- if (!valid_l4_mask(~mask)) {
- netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
- __func__);
- return false;
- }
- break;
- default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+ __func__);
+ return false;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ switch (offset) {
+ case PEDIT_UDP_SPORT_DPORT:
+ if (!valid_l4_mask(~mask)) {
+ netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
__func__);
return false;
}
break;
default:
- netdev_err(dev, "%s: Unsupported pedit type\n",
+ netdev_err(dev, "%s: Unsupported pedit field\n",
__func__);
return false;
}
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
+ return false;
}
return true;
}
@@ -586,24 +545,26 @@ static bool valid_pedit_action(struct net_device *dev,
static int cxgb4_validate_flow_actions(struct net_device *dev,
struct tc_cls_flower_offload *cls)
{
- const struct tc_action *a;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
+ struct flow_action_entry *act;
bool act_redir = false;
bool act_pedit = false;
bool act_vlan = false;
int i;
- tcf_exts_for_each_action(i, a, cls->exts) {
- if (is_tcf_gact_ok(a)) {
- /* Do nothing */
- } else if (is_tcf_gact_shot(a)) {
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
+ case FLOW_ACTION_DROP:
/* Do nothing */
- } else if (is_tcf_mirred_egress_redirect(a)) {
+ break;
+ case FLOW_ACTION_REDIRECT: {
struct adapter *adap = netdev2adap(dev);
struct net_device *n_dev, *target_dev;
unsigned int i;
bool found = false;
- target_dev = tcf_mirred_dev(a);
+ target_dev = act->dev;
for_each_port(adap, i) {
n_dev = adap->port[i];
if (target_dev == n_dev) {
@@ -621,15 +582,18 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
return -EINVAL;
}
act_redir = true;
- } else if (is_tcf_vlan(a)) {
- u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
- u32 vlan_action = tcf_vlan_action(a);
+ }
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_MANGLE: {
+ u16 proto = be16_to_cpu(act->vlan.proto);
- switch (vlan_action) {
- case TCA_VLAN_ACT_POP:
+ switch (act->id) {
+ case FLOW_ACTION_VLAN_POP:
break;
- case TCA_VLAN_ACT_PUSH:
- case TCA_VLAN_ACT_MODIFY:
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_MANGLE:
if (proto != ETH_P_8021Q) {
netdev_err(dev, "%s: Unsupported vlan proto\n",
__func__);
@@ -642,13 +606,17 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
return -EOPNOTSUPP;
}
act_vlan = true;
- } else if (is_tcf_pedit(a)) {
- bool pedit_valid = valid_pedit_action(dev, a);
+ }
+ break;
+ case FLOW_ACTION_MANGLE: {
+ bool pedit_valid = valid_pedit_action(dev, act);
if (!pedit_valid)
return -EOPNOTSUPP;
act_pedit = true;
- } else {
+ }
+ break;
+ default:
netdev_err(dev, "%s: Unsupported action\n", __func__);
return -EOPNOTSUPP;
}
@@ -843,9 +811,9 @@ int cxgb4_tc_flower_stats(struct net_device *dev,
if (ofld_stats->packet_count != packets) {
if (ofld_stats->prev_packet_count != packets)
ofld_stats->last_used = jiffies;
- tcf_exts_stats_update(cls->exts, bytes - ofld_stats->byte_count,
- packets - ofld_stats->packet_count,
- ofld_stats->last_used);
+ flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
+ packets - ofld_stats->packet_count,
+ ofld_stats->last_used);
ofld_stats->packet_count = packets;
ofld_stats->byte_count = bytes;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index c7d2b4dc7568..02fc63fa7f25 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -444,8 +444,7 @@ struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
if (!max_tids)
return NULL;
- t = kvzalloc(sizeof(*t) +
- (max_tids * sizeof(struct cxgb4_link)), GFP_KERNEL);
+ t = kvzalloc(struct_size(t, table, max_tids), GFP_KERNEL);
if (!t)
return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index b3654598a2d5..6c685b920713 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -147,7 +147,7 @@ static int alloc_uld_rxqs(struct adapter *adap,
per_chan = rxq_info->nrxq / adap->params.nports;
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
msi_idx = 1;
else
msi_idx = -((int)s->intrq.abs_id + 1);
@@ -195,7 +195,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int i, ret = 0;
- if (adap->flags & USING_MSIX) {
+ if (adap->flags & CXGB4_USING_MSIX) {
rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
sizeof(unsigned short),
GFP_KERNEL);
@@ -206,7 +206,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
/* Tell uP to route control queue completions to rdma rspq */
- if (adap->flags & FULL_INIT_DONE &&
+ if (adap->flags & CXGB4_FULL_INIT_DONE &&
!ret && uld_type == CXGB4_ULD_RDMA) {
struct sge *s = &adap->sge;
unsigned int cmplqid;
@@ -239,7 +239,7 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
+ if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
struct sge *s = &adap->sge;
u32 param, cmdop, cmplqid = 0;
int i;
@@ -258,7 +258,7 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
t4_free_uld_rxqs(adap, rxq_info->nciq,
rxq_info->uldrxq + rxq_info->nrxq);
t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
kfree(rxq_info->msix_tbl);
}
@@ -273,7 +273,7 @@ static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
if (!rxq_info)
return -ENOMEM;
- if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
+ if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
i = s->nqs_per_uld;
rxq_info->nrxq = roundup(i, adap->params.nports);
} else {
@@ -284,7 +284,7 @@ static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
if (!uld_info->ciq) {
rxq_info->nciq = 0;
} else {
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
rxq_info->nciq = min_t(int, s->nqs_per_uld,
num_online_cpus());
else
@@ -611,10 +611,10 @@ static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type
adap->uld[type].add = NULL;
release_sge_txq_uld(adap, type);
- if (adap->flags & FULL_INIT_DONE)
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
free_sge_queues_uld(adap, type);
@@ -673,7 +673,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->sge_egrstatuspagesize = adap->sge.stat_len;
lld->sge_pktshift = adap->sge.pktshift;
lld->ulp_crypto = adap->params.crypto;
- lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
+ lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN;
lld->max_ordird_qp = adap->params.max_ordird_qp;
lld->max_ird_adapter = adap->params.max_ird_adapter;
lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
@@ -702,7 +702,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
adap->uld[uld].handle = handle;
t4_register_netevent_notifier();
- if (adap->flags & FULL_INIT_DONE)
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
}
@@ -737,13 +737,13 @@ void cxgb4_register_uld(enum cxgb4_uld type,
ret = setup_sge_queues_uld(adap, type, p->lro);
if (ret)
goto free_queues;
- if (adap->flags & USING_MSIX) {
+ if (adap->flags & CXGB4_USING_MSIX) {
name_msix_vecs_uld(adap, type);
ret = request_msix_queue_irqs_uld(adap, type);
if (ret)
goto free_rxq;
}
- if (adap->flags & FULL_INIT_DONE)
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
enable_rx_uld(adap, type);
if (adap->uld[type].add)
goto free_irq;
@@ -754,9 +754,9 @@ void cxgb4_register_uld(enum cxgb4_uld type,
uld_attach(adap, type);
continue;
free_irq:
- if (adap->flags & FULL_INIT_DONE)
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
free_rxq:
free_sge_queues_uld(adap, type);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 4852febbfec3..1a407d3c1d67 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -646,7 +646,7 @@ struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
if (l2t_size < L2T_MIN_HASH_BUCKETS)
return NULL;
- d = kvzalloc(sizeof(*d) + l2t_size * sizeof(struct l2t_entry), GFP_KERNEL);
+ d = kvzalloc(struct_size(d, l2tab, l2t_size), GFP_KERNEL);
if (!d)
return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 52edb688942b..ba6c153ee45c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -478,7 +478,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
struct sched_table *s;
unsigned int i;
- s = kvzalloc(sizeof(*s) + sched_size * sizeof(struct sched_class), GFP_KERNEL);
+ s = kvzalloc(struct_size(s, tab, sched_size), GFP_KERNEL);
if (!s)
return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index fc0bc6458e84..88773ca58e6b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -80,9 +80,10 @@
* Max number of Tx descriptors we clean up at a time. Should be modest as
* freeing skbs isn't cheap and it happens while holding locks. We just need
* to free packets faster than they arrive, we eventually catch up and keep
- * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
+ * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. It should
+ * also match the CIDX Flush Threshold.
*/
-#define MAX_TX_RECLAIM 16
+#define MAX_TX_RECLAIM 32
/*
* Max number of Rx buffers we replenish at a time. Again keep this modest,
@@ -401,31 +402,52 @@ static inline int reclaimable(const struct sge_txq *q)
}
/**
- * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
+ * reclaim_completed_tx - reclaims completed TX Descriptors
* @adap: the adapter
* @q: the Tx queue to reclaim completed descriptors from
+ * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
* @unmap: whether the buffers should be unmapped for DMA
*
- * Reclaims Tx descriptors that the SGE has indicated it has processed,
- * and frees the associated buffers if possible. Called with the Tx
- * queue locked.
+ * Reclaims Tx Descriptors that the SGE has indicated it has processed,
+ * and frees the associated buffers if possible. If @max == -1, then
+ * we'll use a defaiult maximum. Called with the TX Queue locked.
*/
-inline void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
- bool unmap)
+static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
+ int maxreclaim, bool unmap)
{
- int avail = reclaimable(q);
+ int reclaim = reclaimable(q);
- if (avail) {
+ if (reclaim) {
/*
* Limit the amount of clean up work we do at a time to keep
* the Tx lock hold time O(1).
*/
- if (avail > MAX_TX_RECLAIM)
- avail = MAX_TX_RECLAIM;
+ if (maxreclaim < 0)
+ maxreclaim = MAX_TX_RECLAIM;
+ if (reclaim > maxreclaim)
+ reclaim = maxreclaim;
- free_tx_desc(adap, q, avail, unmap);
- q->in_use -= avail;
+ free_tx_desc(adap, q, reclaim, unmap);
+ q->in_use -= reclaim;
}
+
+ return reclaim;
+}
+
+/**
+ * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
+ * @adap: the adapter
+ * @q: the Tx queue to reclaim completed descriptors from
+ * @unmap: whether the buffers should be unmapped for DMA
+ *
+ * Reclaims Tx descriptors that the SGE has indicated it has processed,
+ * and frees the associated buffers if possible. Called with the Tx
+ * queue locked.
+ */
+void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
+ bool unmap)
+{
+ (void)reclaim_completed_tx(adap, q, -1, unmap);
}
EXPORT_SYMBOL(cxgb4_reclaim_completed_tx);
@@ -1288,6 +1310,44 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
}
/**
+ * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
+ * @adap: the adapter
+ * @eq: the Ethernet TX Queue
+ * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
+ *
+ * We're typically called here to update the state of an Ethernet TX
+ * Queue with respect to the hardware's progress in consuming the TX
+ * Work Requests that we've put on that Egress Queue. This happens
+ * when we get Egress Queue Update messages and also prophylactically
+ * in regular timer-based Ethernet TX Queue maintenance.
+ */
+int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
+ int maxreclaim)
+{
+ struct sge_txq *q = &eq->q;
+ unsigned int reclaimed;
+
+ if (!q->in_use || !__netif_tx_trylock(eq->txq))
+ return 0;
+
+ /* Reclaim pending completed TX Descriptors. */
+ reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
+
+ /* If the TX Queue is currently stopped and there's now more than half
+ * the queue available, restart it. Otherwise bail out since the rest
+ * of what we want do here is with the possibility of shipping any
+ * currently buffered Coalesced TX Work Request.
+ */
+ if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) {
+ netif_tx_wake_queue(eq->txq);
+ eq->q.restarts++;
+ }
+
+ __netif_tx_unlock(eq->txq);
+ return reclaimed;
+}
+
+/**
* cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
* @dev: the egress net device
@@ -1357,7 +1417,7 @@ out_free: dev_kfree_skb_any(skb);
}
skb_tx_timestamp(skb);
- cxgb4_reclaim_completed_tx(adap, &q->q, true);
+ reclaim_completed_tx(adap, &q->q, -1, true);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
#ifdef CONFIG_CHELSIO_T4_FCOE
@@ -1400,8 +1460,25 @@ out_free: dev_kfree_skb_any(skb);
wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ /* After we're done injecting the Work Request for this
+ * packet, we'll be below our "stop threshold" so stop the TX
+ * Queue now and schedule a request for an SGE Egress Queue
+ * Update message. The queue will get started later on when
+ * the firmware processes this Work Request and sends us an
+ * Egress Queue Status Update message indicating that space
+ * has opened up.
+ */
eth_txq_stop(q);
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+
+ /* If we're using the SGE Doorbell Queue Timer facility, we
+ * don't need to ask the Firmware to send us Egress Queue CIDX
+ * Updates: the Hardware will do this automatically. And
+ * since we send the Ingress Queue CIDX Updates to the
+ * corresponding Ethernet Response Queue, we'll get them very
+ * quickly.
+ */
+ if (!q->dbqt)
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
wr = (void *)&q->q.desc[q->q.pidx];
@@ -1671,7 +1748,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
/* Take this opportunity to reclaim any TX Descriptors whose DMA
* transfers have completed.
*/
- cxgb4_reclaim_completed_tx(adapter, &txq->q, true);
+ reclaim_completed_tx(adapter, &txq->q, -1, true);
/* Calculate the number of flits and TX Descriptors we're going to
* need along with how many TX Descriptors will be left over after
@@ -1715,7 +1792,16 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
* has opened up.
*/
eth_txq_stop(txq);
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+
+ /* If we're using the SGE Doorbell Queue Timer facility, we
+ * don't need to ask the Firmware to send us Egress Queue CIDX
+ * Updates: the Hardware will do this automatically. And
+ * since we send the Ingress Queue CIDX Updates to the
+ * corresponding Ethernet Response Queue, we'll get them very
+ * quickly.
+ */
+ if (!txq->dbqt)
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
/* Start filling in our Work Request. Note that we do _not_ handle
@@ -2794,6 +2880,74 @@ static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb,
}
/**
+ * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages
+ * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue
+ * @rsp: Response Entry pointer into Response Queue
+ * @gl: Gather List pointer
+ *
+ * For adapters which support the SGE Doorbell Queue Timer facility,
+ * we configure the Ethernet TX Queues to send CIDX Updates to the
+ * Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE
+ * messages. This adds a small load to PCIe Link RX bandwidth and,
+ * potentially, higher CPU Interrupt load, but allows us to respond
+ * much more quickly to the CIDX Updates. This is important for
+ * Upper Layer Software which isn't willing to have a large amount
+ * of TX Data outstanding before receiving DMA Completions.
+ */
+static void t4_tx_completion_handler(struct sge_rspq *rspq,
+ const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ u8 opcode = ((const struct rss_header *)rsp)->opcode;
+ struct port_info *pi = netdev_priv(rspq->netdev);
+ struct adapter *adapter = rspq->adap;
+ struct sge *s = &adapter->sge;
+ struct sge_eth_txq *txq;
+
+ /* skip RSS header */
+ rsp++;
+
+ /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
+ */
+ if (unlikely(opcode == CPL_FW4_MSG &&
+ ((const struct cpl_fw4_msg *)rsp)->type ==
+ FW_TYPE_RSSCPL)) {
+ rsp++;
+ opcode = ((const struct rss_header *)rsp)->opcode;
+ rsp++;
+ }
+
+ if (unlikely(opcode != CPL_SGE_EGR_UPDATE)) {
+ pr_info("%s: unexpected FW4/CPL %#x on Rx queue\n",
+ __func__, opcode);
+ return;
+ }
+
+ txq = &s->ethtxq[pi->first_qset + rspq->idx];
+
+ /* We've got the Hardware Consumer Index Update in the Egress Update
+ * message. If we're using the SGE Doorbell Queue Timer mechanism,
+ * these Egress Update messages will be our sole CIDX Updates we get
+ * since we don't want to chew up PCIe bandwidth for both Ingress
+ * Messages and Status Page writes. However, The code which manages
+ * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
+ * stored in the Status Page at the end of the TX Queue. It's easiest
+ * to simply copy the CIDX Update value from the Egress Update message
+ * to the Status Page. Also note that no Endian issues need to be
+ * considered here since both are Big Endian and we're just copying
+ * bytes consistently ...
+ */
+ if (txq->dbqt) {
+ struct cpl_sge_egr_update *egr;
+
+ egr = (struct cpl_sge_egr_update *)rsp;
+ WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
+ }
+
+ t4_sge_eth_txq_egress_update(adapter, txq, -1);
+}
+
+/**
* t4_ethrx_handler - process an ingress ethernet packet
* @q: the response queue that received the packet
* @rsp: the response queue descriptor holding the RX_PKT message
@@ -2816,6 +2970,15 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
struct port_info *pi;
int ret = 0;
+ /* If we're looking at TX Queue CIDX Update, handle that separately
+ * and return.
+ */
+ if (unlikely((*(u8 *)rsp == CPL_FW4_MSG) ||
+ (*(u8 *)rsp == CPL_SGE_EGR_UPDATE))) {
+ t4_tx_completion_handler(q, rsp, si);
+ return 0;
+ }
+
if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
return handle_trace_pkt(q->adap, si);
@@ -3212,7 +3375,7 @@ static irqreturn_t t4_intr_msi(int irq, void *cookie)
{
struct adapter *adap = cookie;
- if (adap->flags & MASTER_PF)
+ if (adap->flags & CXGB4_MASTER_PF)
t4_slow_intr_handler(adap);
process_intrq(adap);
return IRQ_HANDLED;
@@ -3228,7 +3391,7 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie)
struct adapter *adap = cookie;
t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
- if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) |
+ if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) |
process_intrq(adap))
return IRQ_HANDLED;
return IRQ_NONE; /* probably shared interrupt */
@@ -3243,9 +3406,9 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie)
*/
irq_handler_t t4_intr_handler(struct adapter *adap)
{
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
return t4_sge_intr_msix;
- if (adap->flags & USING_MSI)
+ if (adap->flags & CXGB4_USING_MSI)
return t4_intr_msi;
return t4_intr_intx;
}
@@ -3278,7 +3441,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
* global Master PF activities like checking for chip ingress stalls,
* etc.
*/
- if (!(adap->flags & MASTER_PF))
+ if (!(adap->flags & CXGB4_MASTER_PF))
goto done;
t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
@@ -3289,10 +3452,10 @@ done:
static void sge_tx_timer_cb(struct timer_list *t)
{
- unsigned long m;
- unsigned int i, budget;
struct adapter *adap = from_timer(adap, t, sge.tx_timer);
struct sge *s = &adap->sge;
+ unsigned long m, period;
+ unsigned int i, budget;
for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
for (m = s->txq_maperr[i]; m; m &= m - 1) {
@@ -3320,29 +3483,29 @@ static void sge_tx_timer_cb(struct timer_list *t)
budget = MAX_TIMER_TX_RECLAIM;
i = s->ethtxq_rover;
do {
- struct sge_eth_txq *q = &s->ethtxq[i];
-
- if (q->q.in_use &&
- time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
- __netif_tx_trylock(q->txq)) {
- int avail = reclaimable(&q->q);
-
- if (avail) {
- if (avail > budget)
- avail = budget;
-
- free_tx_desc(adap, &q->q, avail, true);
- q->q.in_use -= avail;
- budget -= avail;
- }
- __netif_tx_unlock(q->txq);
- }
+ budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i],
+ budget);
+ if (!budget)
+ break;
if (++i >= s->ethqsets)
i = 0;
- } while (budget && i != s->ethtxq_rover);
+ } while (i != s->ethtxq_rover);
s->ethtxq_rover = i;
- mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
+
+ if (budget == 0) {
+ /* If we found too many reclaimable packets schedule a timer
+ * in the near future to continue where we left off.
+ */
+ period = 2;
+ } else {
+ /* We reclaimed all reclaimable TX Descriptors, so reschedule
+ * at the normal period.
+ */
+ period = TX_QCHECK_PERIOD;
+ }
+
+ mod_timer(&s->tx_timer, jiffies + period);
}
/**
@@ -3386,7 +3549,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct fw_iq_cmd c;
struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
- int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING);
+ int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING);
/* Size needs to be multiple of 16, including status entry. */
iq->size = roundup(iq->size, 16);
@@ -3421,7 +3584,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
: FW_IQ_IQTYPE_OFLD));
if (fl) {
- enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+ unsigned int chip_ver =
+ CHELSIO_CHIP_VERSION(adap->params.chip);
/* Allocate the ring for the hardware free list (with space
* for its status page) along with the associated software
@@ -3459,10 +3623,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
* the smaller 64-byte value there).
*/
c.fl0dcaen_to_fl0cidxfthresh =
- htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
+ htons(FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 ?
FETCHBURSTMIN_128B_X :
- FETCHBURSTMIN_64B_X) |
- FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
+ FETCHBURSTMIN_64B_T6_X) |
+ FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ?
FETCHBURSTMAX_512B_X :
FETCHBURSTMAX_256B_X));
c.fl0size = htons(flsz);
@@ -3584,14 +3748,24 @@ static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
adap->sge.egr_map[id - adap->sge.egr_start] = q;
}
+/**
+ * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue
+ * @adap: the adapter
+ * @txq: the SGE Ethernet TX Queue to initialize
+ * @dev: the Linux Network Device
+ * @netdevq: the corresponding Linux TX Queue
+ * @iqid: the Ingress Queue to which to deliver CIDX Update messages
+ * @dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers
+ */
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
struct net_device *dev, struct netdev_queue *netdevq,
- unsigned int iqid)
+ unsigned int iqid, u8 dbqt)
{
- int ret, nentries;
- struct fw_eq_eth_cmd c;
- struct sge *s = &adap->sge;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
struct port_info *pi = netdev_priv(dev);
+ struct sge *s = &adap->sge;
+ struct fw_eq_eth_cmd c;
+ int ret, nentries;
/* Add status entries */
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
@@ -3610,19 +3784,47 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
FW_EQ_ETH_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
- c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
- FW_EQ_ETH_CMD_VIID_V(pi->viid));
+
+ /* For TX Ethernet Queues using the SGE Doorbell Queue Timer
+ * mechanism, we use Ingress Queue messages for Hardware Consumer
+ * Index Updates on the TX Queue. Otherwise we have the Hardware
+ * write the CIDX Updates into the Status Page at the end of the
+ * TX Queue.
+ */
+ c.autoequiqe_to_viid = htonl((dbqt
+ ? FW_EQ_ETH_CMD_AUTOEQUIQE_F
+ : FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
+ FW_EQ_ETH_CMD_VIID_V(pi->viid));
+
c.fetchszm_to_iqid =
- htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(dbqt
+ ? HOSTFCMODE_INGRESS_QUEUE_X
+ : HOSTFCMODE_STATUS_PAGE_X) |
FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
+
+ /* Note that the CIDX Flush Threshold should match MAX_TX_RECLAIM. */
c.dcaen_to_eqsize =
- htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+ htonl(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
+ ? FETCHBURSTMIN_64B_X
+ : FETCHBURSTMIN_64B_T6_X) |
FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
FW_EQ_ETH_CMD_EQSIZE_V(nentries));
+
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+ /* If we're using the SGE Doorbell Queue Timer mechanism, pass in the
+ * currently configured Timer Index. THis can be changed later via an
+ * ethtool -C tx-usecs {Timer Val} command. Note that the SGE
+ * Doorbell Queue mode is currently automatically enabled in the
+ * Firmware by setting either AUTOEQUEQE or AUTOEQUIQE ...
+ */
+ if (dbqt)
+ c.timeren_timerix =
+ cpu_to_be32(FW_EQ_ETH_CMD_TIMEREN_F |
+ FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix));
+
ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
kfree(txq->q.sdesc);
@@ -3639,6 +3841,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
txq->txq = netdevq;
txq->tso = txq->tx_cso = txq->vlan_ins = 0;
txq->mapping_err = 0;
+ txq->dbqt = dbqt;
+
return 0;
}
@@ -3646,10 +3850,11 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
struct net_device *dev, unsigned int iqid,
unsigned int cmplqid)
{
- int ret, nentries;
- struct fw_eq_ctrl_cmd c;
- struct sge *s = &adap->sge;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
struct port_info *pi = netdev_priv(dev);
+ struct sge *s = &adap->sge;
+ struct fw_eq_ctrl_cmd c;
+ int ret, nentries;
/* Add status entries */
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
@@ -3673,7 +3878,9 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
c.dcaen_to_eqsize =
- htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+ htonl(FW_EQ_CTRL_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
+ ? FETCHBURSTMIN_64B_X
+ : FETCHBURSTMIN_64B_T6_X) |
FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
@@ -3713,6 +3920,7 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
struct net_device *dev, unsigned int iqid,
unsigned int uld_type)
{
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
int ret, nentries;
struct fw_eq_ofld_cmd c;
struct sge *s = &adap->sge;
@@ -3743,7 +3951,9 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
c.dcaen_to_eqsize =
- htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+ htonl(FW_EQ_OFLD_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
+ ? FETCHBURSTMIN_64B_X
+ : FETCHBURSTMIN_64B_T6_X) |
FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.c b/drivers/net/ethernet/chelsio/cxgb4/smt.c
index 7b2207a2a130..eaf1fb74689c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/smt.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.c
@@ -47,8 +47,7 @@ struct smt_data *t4_init_smt(void)
smt_size = SMT_SIZE;
- s = kvzalloc(sizeof(*s) + smt_size * sizeof(struct smt_entry),
- GFP_KERNEL);
+ s = kvzalloc(struct_size(s, smtab, smt_size), GFP_KERNEL);
if (!s)
return NULL;
s->smt_size = smt_size;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.c b/drivers/net/ethernet/chelsio/cxgb4/srq.c
index 82b70a565e24..9a54302bb046 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/srq.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/srq.c
@@ -77,7 +77,7 @@ int cxgb4_get_srq_entry(struct net_device *dev,
adap = netdev2adap(dev);
s = adap->srq;
- if (!(adap->flags & FULL_INIT_DONE) || !s)
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE) || !s)
goto out;
skb = alloc_skb(sizeof(*req), GFP_KERNEL);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 2b03f6187a24..a3544041ad32 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -198,7 +198,7 @@ static void t4_report_fw_error(struct adapter *adap)
if (pcie_fw & PCIE_FW_ERR_F) {
dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
reason[PCIE_FW_EVAL_G(pcie_fw)]);
- adap->flags &= ~FW_OK;
+ adap->flags &= ~CXGB4_FW_OK;
}
}
@@ -4105,6 +4105,9 @@ static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
* @mbox: the Firmware Mailbox to use
* @port: the Port ID
* @lc: the Port's Link Configuration
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ * @timeout: time to wait for command to finish before timing out
+ * (negative implies @sleep_ok=false)
*
* Set up a port's MAC and PHY according to a desired link configuration.
* - If the PHY can auto-negotiate first decide what to advertise, then
@@ -4124,6 +4127,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
int ret;
fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
+
/* Convert driver coding of Pause Frame Flow Control settings into the
* Firmware's API.
*/
@@ -4143,8 +4147,13 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
fw_fec = cc_to_fwcap_fec(cc_fec);
/* Figure out what our Requested Port Capabilities are going to be.
+ * Note parallel structure in t4_handle_get_port_info() and
+ * init_link_config().
*/
if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
+ if (lc->autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
rcap = lc->acaps | fw_fc | fw_fec;
lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
lc->fec = cc_fec;
@@ -4156,7 +4165,11 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
}
- /* Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so
+ /* Some Requested Port Capabilities are trivially wrong if they exceed
+ * the Physical Port Capabilities. We can check that here and provide
+ * moderately useful feedback in the system log.
+ *
+ * Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so
* we need to exclude this from this check in order to maintain
* compatibility ...
*/
@@ -4185,6 +4198,13 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL,
sleep_ok, timeout);
+
+ /* Unfortunately, even if the Requested Port Capabilities "fit" within
+ * the Physical Port Capabilities, some combinations of features may
+ * still not be leagal. For example, 40Gb/s and Reed-Solomon Forward
+ * Error Correction. So if the Firmware rejects the L1 Configure
+ * request, flag that here.
+ */
if (ret) {
dev_err(adapter->pdev_dev,
"Requested Port Capabilities %#x rejected, error %d\n",
@@ -4942,7 +4962,13 @@ static void pl_intr_handler(struct adapter *adap)
*/
int t4_slow_intr_handler(struct adapter *adapter)
{
- u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
+ /* There are rare cases where a PL_INT_CAUSE bit may end up getting
+ * set when the corresponding PL_INT_ENABLE bit isn't set. It's
+ * easiest just to mask that case here.
+ */
+ u32 raw_cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
+ u32 enable = t4_read_reg(adapter, PL_INT_ENABLE_A);
+ u32 cause = raw_cause & enable;
if (!(cause & GLBL_INTR_MASK))
return 0;
@@ -4994,7 +5020,7 @@ int t4_slow_intr_handler(struct adapter *adapter)
ulptx_intr_handler(adapter);
/* Clear the interrupts just processed for which we are the master. */
- t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
+ t4_write_reg(adapter, PL_INT_CAUSE_A, raw_cause & GLBL_INTR_MASK);
(void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
return 1;
}
@@ -5217,7 +5243,7 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
static unsigned int t4_use_ldst(struct adapter *adap)
{
- return (adap->flags & FW_OK) && !adap->use_bd;
+ return (adap->flags & CXGB4_FW_OK) && !adap->use_bd;
}
/**
@@ -6106,7 +6132,7 @@ unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
* ( MPSBGMAP[Port 1] << 8 ) |
* ( MPSBGMAP[Port 0] << 0 ))
*/
- if (adapter->flags & FW_OK) {
+ if (adapter->flags & CXGB4_FW_OK) {
u32 param, val;
int ret;
@@ -6693,6 +6719,47 @@ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
}
/**
+ * t4_read_sge_dbqtimers - reag SGE Doorbell Queue Timer values
+ * @adap - the adapter
+ * @ndbqtimers: size of the provided SGE Doorbell Queue Timer table
+ * @dbqtimers: SGE Doorbell Queue Timer table
+ *
+ * Reads the SGE Doorbell Queue Timer values into the provided table.
+ * Returns 0 on success (Firmware and Hardware support this feature),
+ * an error on failure.
+ */
+int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers,
+ u16 *dbqtimers)
+{
+ int ret, dbqtimerix;
+
+ ret = 0;
+ dbqtimerix = 0;
+ while (dbqtimerix < ndbqtimers) {
+ int nparams, param;
+ u32 params[7], vals[7];
+
+ nparams = ndbqtimers - dbqtimerix;
+ if (nparams > ARRAY_SIZE(params))
+ nparams = ARRAY_SIZE(params);
+
+ for (param = 0; param < nparams; param++)
+ params[param] =
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMER) |
+ FW_PARAMS_PARAM_Y_V(dbqtimerix + param));
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+ nparams, params, vals);
+ if (ret)
+ break;
+
+ for (param = 0; param < nparams; param++)
+ dbqtimers[dbqtimerix++] = vals[param];
+ }
+ return ret;
+}
+
+/**
* t4_fw_hello - establish communication with FW
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -7026,10 +7093,10 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
if (!t4_fw_matches_chip(adap, fw_hdr))
return -EINVAL;
- /* Disable FW_OK flag so that mbox commands with FW_OK flag set
- * wont be sent when we are flashing FW.
+ /* Disable CXGB4_FW_OK flag so that mbox commands with CXGB4_FW_OK flag
+ * set wont be sent when we are flashing FW.
*/
- adap->flags &= ~FW_OK;
+ adap->flags &= ~CXGB4_FW_OK;
ret = t4_fw_halt(adap, mbox, force);
if (ret < 0 && !force)
@@ -7068,7 +7135,7 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
*/
(void)t4_init_devlog_params(adap);
out:
- adap->flags |= FW_OK;
+ adap->flags |= CXGB4_FW_OK;
return ret;
}
@@ -8461,6 +8528,10 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
fc = fwcap_to_cc_pause(linkattr);
speed = fwcap_to_speed(linkattr);
+ /* Reset state for communicating new Transceiver Module status and
+ * whether the OS-dependent layer wants us to redo the current
+ * "sticky" L1 Configure Link Parameters.
+ */
lc->new_module = false;
lc->redo_l1cfg = false;
@@ -8497,9 +8568,15 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
*/
pi->port_type = port_type;
+ /* Record new Module Type information.
+ */
pi->mod_type = mod_type;
+ /* Let the OS-dependent layer know if we have a new
+ * Transceiver Module inserted.
+ */
lc->new_module = t4_is_inserted_mod_type(mod_type);
+
t4_os_portmod_changed(adapter, pi->port_id);
}
@@ -8507,8 +8584,10 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
fc != lc->fc || fec != lc->fec) { /* something changed */
if (!link_ok && lc->link_ok) {
lc->link_down_rc = linkdnrc;
- dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n",
- pi->tx_chan, t4_link_down_rc_str(linkdnrc));
+ dev_warn_ratelimited(adapter->pdev_dev,
+ "Port %d link down, reason: %s\n",
+ pi->tx_chan,
+ t4_link_down_rc_str(linkdnrc));
}
lc->link_ok = link_ok;
lc->speed = speed;
@@ -8518,6 +8597,11 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
lc->lpacaps = lpacaps;
lc->acaps = acaps & ADVERT_MASK;
+ /* If we're not physically capable of Auto-Negotiation, note
+ * this as Auto-Negotiation disabled. Otherwise, we track
+ * what Auto-Negotiation settings we have. Note parallel
+ * structure in t4_link_l1cfg_core() and init_link_config().
+ */
if (!(lc->acaps & FW_PORT_CAP32_ANEG)) {
lc->autoneg = AUTONEG_DISABLE;
} else if (lc->acaps & FW_PORT_CAP32_ANEG) {
@@ -8535,6 +8619,10 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
t4_os_link_changed(adapter, pi->port_id, link_ok);
}
+ /* If we have a new Transceiver Module and the OS-dependent code has
+ * told us that it wants us to redo whatever "sticky" L1 Configuration
+ * Link Parameters are set, do that now.
+ */
if (lc->new_module && lc->redo_l1cfg) {
struct link_config old_lc;
int ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 361d5032c288..002fc62ea726 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -91,6 +91,7 @@ enum {
SGE_CTXT_SIZE = 24, /* size of SGE context */
SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
+ SGE_NDBQTIMERS = 8, /* # of Doorbell Queue Timer values */
SGE_MAX_IQ_SIZE = 65520,
SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index bf7325f6d553..0c5373462ced 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -218,6 +218,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */
CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
index f6558cbfc54e..eb1aa82149db 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -71,12 +71,18 @@
#define FETCHBURSTMIN_64B_X 2
#define FETCHBURSTMIN_128B_X 3
+/* T6 and later use a single-bit encoding for FetchBurstMin */
+#define FETCHBURSTMIN_64B_T6_X 0
+#define FETCHBURSTMIN_128B_T6_X 1
+
#define FETCHBURSTMAX_256B_X 2
#define FETCHBURSTMAX_512B_X 3
+#define HOSTFCMODE_INGRESS_QUEUE_X 1
#define HOSTFCMODE_STATUS_PAGE_X 2
#define CIDXFLUSHTHRESH_32_X 5
+#define CIDXFLUSHTHRESH_128_X 7
#define UPDATEDELIVERY_INTERRUPT_X 1
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 1d9b3e1e5f94..b2a618e72fcf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1254,6 +1254,8 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21,
FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24,
FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27,
+ FW_PARAMS_PARAM_DEV_DBQ_TIMER = 0x29,
+ FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A,
};
/*
@@ -1310,6 +1312,14 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_RAWF_END = 0x37,
FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x39,
FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A,
+ FW_PARAMS_PARAM_PFVF_LINK_STATE = 0x40,
+};
+
+/* Virtual link state as seen by the specified VF */
+enum vf_link_states {
+ FW_VF_LINK_STATE_AUTO = 0x00,
+ FW_VF_LINK_STATE_ENABLE = 0x01,
+ FW_VF_LINK_STATE_DISABLE = 0x02,
};
/*
@@ -1322,6 +1332,7 @@ enum fw_params_param_dmaq {
FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13,
+ FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX = 0x15,
FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20,
};
@@ -1751,8 +1762,8 @@ struct fw_eq_eth_cmd {
__be32 fetchszm_to_iqid;
__be32 dcaen_to_eqsize;
__be64 eqaddr;
- __be32 viid_pkd;
- __be32 r8_lo;
+ __be32 autoequiqe_to_viid;
+ __be32 timeren_timerix;
__be64 r9;
};
@@ -1847,6 +1858,10 @@ struct fw_eq_eth_cmd {
#define FW_EQ_ETH_CMD_EQSIZE_S 0
#define FW_EQ_ETH_CMD_EQSIZE_V(x) ((x) << FW_EQ_ETH_CMD_EQSIZE_S)
+#define FW_EQ_ETH_CMD_AUTOEQUIQE_S 31
+#define FW_EQ_ETH_CMD_AUTOEQUIQE_V(x) ((x) << FW_EQ_ETH_CMD_AUTOEQUIQE_S)
+#define FW_EQ_ETH_CMD_AUTOEQUIQE_F FW_EQ_ETH_CMD_AUTOEQUIQE_V(1U)
+
#define FW_EQ_ETH_CMD_AUTOEQUEQE_S 30
#define FW_EQ_ETH_CMD_AUTOEQUEQE_V(x) ((x) << FW_EQ_ETH_CMD_AUTOEQUEQE_S)
#define FW_EQ_ETH_CMD_AUTOEQUEQE_F FW_EQ_ETH_CMD_AUTOEQUEQE_V(1U)
@@ -1854,6 +1869,19 @@ struct fw_eq_eth_cmd {
#define FW_EQ_ETH_CMD_VIID_S 16
#define FW_EQ_ETH_CMD_VIID_V(x) ((x) << FW_EQ_ETH_CMD_VIID_S)
+#define FW_EQ_ETH_CMD_TIMEREN_S 3
+#define FW_EQ_ETH_CMD_TIMEREN_M 0x1
+#define FW_EQ_ETH_CMD_TIMEREN_V(x) ((x) << FW_EQ_ETH_CMD_TIMEREN_S)
+#define FW_EQ_ETH_CMD_TIMEREN_G(x) \
+ (((x) >> FW_EQ_ETH_CMD_TIMEREN_S) & FW_EQ_ETH_CMD_TIMEREN_M)
+#define FW_EQ_ETH_CMD_TIMEREN_F FW_EQ_ETH_CMD_TIMEREN_V(1U)
+
+#define FW_EQ_ETH_CMD_TIMERIX_S 0
+#define FW_EQ_ETH_CMD_TIMERIX_M 0x7
+#define FW_EQ_ETH_CMD_TIMERIX_V(x) ((x) << FW_EQ_ETH_CMD_TIMERIX_S)
+#define FW_EQ_ETH_CMD_TIMERIX_G(x) \
+ (((x) >> FW_EQ_ETH_CMD_TIMERIX_S) & FW_EQ_ETH_CMD_TIMERIX_M)
+
struct fw_eq_ctrl_cmd {
__be32 op_to_vfn;
__be32 alloc_to_len16;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index a844296135b4..9125ddd89dd1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x14
-#define T4FW_VERSION_MICRO 0x08
+#define T4FW_VERSION_MINOR 0x16
+#define T4FW_VERSION_MICRO 0x09
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x14
-#define T5FW_VERSION_MICRO 0x08
+#define T5FW_VERSION_MINOR 0x16
+#define T5FW_VERSION_MICRO 0x09
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x14
-#define T6FW_VERSION_MICRO 0x08
+#define T6FW_VERSION_MINOR 0x16
+#define T6FW_VERSION_MICRO 0x09
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00