summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/emulex/benet/be_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/emulex/benet/be_main.c')
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c1039
1 files changed, 517 insertions, 522 deletions
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index e703d64434f8..528a886bc2cd 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -127,9 +127,11 @@ static inline bool be_is_mc(struct be_adapter *adapter) {
static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
struct be_dma_mem *mem = &q->dma_mem;
- if (mem->va)
+ if (mem->va) {
dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
mem->dma);
+ mem->va = NULL;
+ }
}
static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
@@ -144,7 +146,7 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
GFP_KERNEL);
if (!mem->va)
- return -1;
+ return -ENOMEM;
memset(mem->va, 0, mem->size);
return 0;
}
@@ -233,7 +235,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
struct sockaddr *addr = p;
int status = 0;
u8 current_mac[ETH_ALEN];
- u32 pmac_id = adapter->pmac_id;
+ u32 pmac_id = adapter->pmac_id[0];
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -246,7 +248,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
- adapter->if_handle, &adapter->pmac_id, 0);
+ adapter->if_handle, &adapter->pmac_id[0], 0);
if (status)
goto err;
@@ -286,7 +288,9 @@ static void populate_be2_stats(struct be_adapter *adapter)
drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
drvs->rx_dropped_header_too_small =
port_stats->rx_dropped_header_too_small;
- drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
+ drvs->rx_address_mismatch_drops =
+ port_stats->rx_address_mismatch_drops +
+ port_stats->rx_vlan_mismatch_drops;
drvs->rx_alignment_symbol_errors =
port_stats->rx_alignment_symbol_errors;
@@ -298,9 +302,7 @@ static void populate_be2_stats(struct be_adapter *adapter)
else
drvs->jabber_events = rxf_stats->port0_jabber_events;
drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
- drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
- drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
drvs->forwarded_packets = rxf_stats->forwarded_packets;
drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
@@ -337,7 +339,7 @@ static void populate_be3_stats(struct be_adapter *adapter)
port_stats->rx_dropped_header_too_small;
drvs->rx_input_fifo_overflow_drop =
port_stats->rx_input_fifo_overflow_drop;
- drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
+ drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
drvs->rx_alignment_symbol_errors =
port_stats->rx_alignment_symbol_errors;
drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
@@ -345,9 +347,7 @@ static void populate_be3_stats(struct be_adapter *adapter)
drvs->tx_controlframes = port_stats->tx_controlframes;
drvs->jabber_events = port_stats->jabber_events;
drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
- drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
- drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
drvs->forwarded_packets = rxf_stats->forwarded_packets;
drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
@@ -380,13 +380,14 @@ static void populate_lancer_stats(struct be_adapter *adapter)
drvs->rx_dropped_header_too_small =
pport_stats->rx_dropped_header_too_small;
drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
- drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
+ drvs->rx_address_mismatch_drops =
+ pport_stats->rx_address_mismatch_drops +
+ pport_stats->rx_vlan_mismatch_drops;
drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
drvs->jabber_events = pport_stats->rx_jabbers;
- drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
drvs->forwarded_packets = pport_stats->num_forwards_lo;
drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
drvs->rx_drops_too_many_frags =
@@ -884,6 +885,29 @@ static void be_set_rx_mode(struct net_device *netdev)
goto done;
}
+ if (netdev_uc_count(netdev) != adapter->uc_macs) {
+ struct netdev_hw_addr *ha;
+ int i = 1; /* First slot is claimed by the Primary MAC */
+
+ for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id[i], 0);
+ }
+
+ if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
+ be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
+ adapter->promiscuous = true;
+ goto done;
+ }
+
+ netdev_for_each_uc_addr(ha, adapter->netdev) {
+ adapter->uc_macs++; /* First slot is for Primary MAC */
+ be_cmd_pmac_add(adapter, (u8 *)ha->addr,
+ adapter->if_handle,
+ &adapter->pmac_id[adapter->uc_macs], 0);
+ }
+ }
+
be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
done:
return;
@@ -954,14 +978,21 @@ static int be_set_vf_vlan(struct net_device *netdev,
return -EINVAL;
if (vlan) {
- adapter->vf_cfg[vf].vlan_tag = vlan;
- adapter->vlans_added++;
+ if (adapter->vf_cfg[vf].vlan_tag != vlan) {
+ /* If this is new value, program it. Else skip. */
+ adapter->vf_cfg[vf].vlan_tag = vlan;
+
+ status = be_cmd_set_hsw_config(adapter, vlan,
+ vf + 1, adapter->vf_cfg[vf].if_handle);
+ }
} else {
+ /* Reset Transparent Vlan Tagging. */
adapter->vf_cfg[vf].vlan_tag = 0;
- adapter->vlans_added--;
+ vlan = adapter->vf_cfg[vf].def_vid;
+ status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
+ adapter->vf_cfg[vf].if_handle);
}
- status = be_vid_config(adapter, true, vf);
if (status)
dev_info(&adapter->pdev->dev,
@@ -997,18 +1028,24 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
return status;
}
-static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
+static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
{
- struct be_eq_obj *rx_eq = &rxo->rx_eq;
- struct be_rx_stats *stats = rx_stats(rxo);
+ struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
ulong now = jiffies;
ulong delta = now - stats->rx_jiffies;
u64 pkts;
unsigned int start, eqd;
- if (!rx_eq->enable_aic)
+ if (!eqo->enable_aic) {
+ eqd = eqo->eqd;
+ goto modify_eqd;
+ }
+
+ if (eqo->idx >= adapter->num_rx_qs)
return;
+ stats = rx_stats(&adapter->rx_obj[eqo->idx]);
+
/* Wrapped around */
if (time_before(now, stats->rx_jiffies)) {
stats->rx_jiffies = now;
@@ -1027,17 +1064,16 @@ static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
stats->rx_pkts_prev = pkts;
stats->rx_jiffies = now;
- eqd = stats->rx_pps / 110000;
- eqd = eqd << 3;
- if (eqd > rx_eq->max_eqd)
- eqd = rx_eq->max_eqd;
- if (eqd < rx_eq->min_eqd)
- eqd = rx_eq->min_eqd;
+ eqd = (stats->rx_pps / 110000) << 3;
+ eqd = min(eqd, eqo->max_eqd);
+ eqd = max(eqd, eqo->min_eqd);
if (eqd < 10)
eqd = 0;
- if (eqd != rx_eq->cur_eqd) {
- be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
- rx_eq->cur_eqd = eqd;
+
+modify_eqd:
+ if (eqd != eqo->cur_eqd) {
+ be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
+ eqo->cur_eqd = eqd;
}
}
@@ -1065,11 +1101,10 @@ static inline bool csum_passed(struct be_rx_compl_info *rxcp)
(rxcp->ip_csum || rxcp->ipv6);
}
-static struct be_rx_page_info *
-get_rx_page_info(struct be_adapter *adapter,
- struct be_rx_obj *rxo,
- u16 frag_idx)
+static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
+ u16 frag_idx)
{
+ struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *rx_page_info;
struct be_queue_info *rxq = &rxo->q;
@@ -1088,16 +1123,15 @@ get_rx_page_info(struct be_adapter *adapter,
}
/* Throwaway the data in the Rx completion */
-static void be_rx_compl_discard(struct be_adapter *adapter,
- struct be_rx_obj *rxo,
- struct be_rx_compl_info *rxcp)
+static void be_rx_compl_discard(struct be_rx_obj *rxo,
+ struct be_rx_compl_info *rxcp)
{
struct be_queue_info *rxq = &rxo->q;
struct be_rx_page_info *page_info;
u16 i, num_rcvd = rxcp->num_rcvd;
for (i = 0; i < num_rcvd; i++) {
- page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info));
index_inc(&rxcp->rxq_idx, rxq->len);
@@ -1108,8 +1142,8 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
* skb_fill_rx_data forms a complete skb for an ether frame
* indicated by rxcp.
*/
-static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
- struct sk_buff *skb, struct be_rx_compl_info *rxcp)
+static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
+ struct be_rx_compl_info *rxcp)
{
struct be_queue_info *rxq = &rxo->q;
struct be_rx_page_info *page_info;
@@ -1117,7 +1151,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
u16 hdr_len, curr_frag_len, remaining;
u8 *start;
- page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
start = page_address(page_info->page) + page_info->page_offset;
prefetch(start);
@@ -1154,7 +1188,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
index_inc(&rxcp->rxq_idx, rxq->len);
remaining = rxcp->pkt_size - curr_frag_len;
for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
- page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
curr_frag_len = min(remaining, rx_frag_size);
/* Coalesce all frags from the same physical page in one slot */
@@ -1182,21 +1216,21 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
}
/* Process the RX completion indicated by rxcp when GRO is disabled */
-static void be_rx_compl_process(struct be_adapter *adapter,
- struct be_rx_obj *rxo,
- struct be_rx_compl_info *rxcp)
+static void be_rx_compl_process(struct be_rx_obj *rxo,
+ struct be_rx_compl_info *rxcp)
{
+ struct be_adapter *adapter = rxo->adapter;
struct net_device *netdev = adapter->netdev;
struct sk_buff *skb;
- skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
+ skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
if (unlikely(!skb)) {
rx_stats(rxo)->rx_drops_no_skbs++;
- be_rx_compl_discard(adapter, rxo, rxcp);
+ be_rx_compl_discard(rxo, rxcp);
return;
}
- skb_fill_rx_data(adapter, rxo, skb, rxcp);
+ skb_fill_rx_data(rxo, skb, rxcp);
if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1204,7 +1238,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
- if (adapter->netdev->features & NETIF_F_RXHASH)
+ if (netdev->features & NETIF_F_RXHASH)
skb->rxhash = rxcp->rss_hash;
@@ -1215,26 +1249,25 @@ static void be_rx_compl_process(struct be_adapter *adapter,
}
/* Process the RX completion indicated by rxcp when GRO is enabled */
-static void be_rx_compl_process_gro(struct be_adapter *adapter,
- struct be_rx_obj *rxo,
- struct be_rx_compl_info *rxcp)
+void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
+ struct be_rx_compl_info *rxcp)
{
+ struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *page_info;
struct sk_buff *skb = NULL;
struct be_queue_info *rxq = &rxo->q;
- struct be_eq_obj *eq_obj = &rxo->rx_eq;
u16 remaining, curr_frag_len;
u16 i, j;
- skb = napi_get_frags(&eq_obj->napi);
+ skb = napi_get_frags(napi);
if (!skb) {
- be_rx_compl_discard(adapter, rxo, rxcp);
+ be_rx_compl_discard(rxo, rxcp);
return;
}
remaining = rxcp->pkt_size;
for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
- page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+ page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
curr_frag_len = min(remaining, rx_frag_size);
@@ -1267,12 +1300,11 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
if (rxcp->vlanf)
__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
- napi_gro_frags(&eq_obj->napi);
+ napi_gro_frags(napi);
}
-static void be_parse_rx_compl_v1(struct be_adapter *adapter,
- struct be_eth_rx_compl *compl,
- struct be_rx_compl_info *rxcp)
+static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
+ struct be_rx_compl_info *rxcp)
{
rxcp->pkt_size =
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
@@ -1303,9 +1335,8 @@ static void be_parse_rx_compl_v1(struct be_adapter *adapter,
rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
}
-static void be_parse_rx_compl_v0(struct be_adapter *adapter,
- struct be_eth_rx_compl *compl,
- struct be_rx_compl_info *rxcp)
+static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
+ struct be_rx_compl_info *rxcp)
{
rxcp->pkt_size =
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
@@ -1351,9 +1382,9 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
be_dws_le_to_cpu(compl, sizeof(*compl));
if (adapter->be3_native)
- be_parse_rx_compl_v1(adapter, compl, rxcp);
+ be_parse_rx_compl_v1(compl, rxcp);
else
- be_parse_rx_compl_v0(adapter, compl, rxcp);
+ be_parse_rx_compl_v0(compl, rxcp);
if (rxcp->vlanf) {
/* vlanf could be wrongly set in some cards.
@@ -1392,7 +1423,6 @@ static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
{
struct be_adapter *adapter = rxo->adapter;
- struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
struct be_queue_info *rxq = &rxo->q;
struct page *pagep = NULL;
@@ -1434,7 +1464,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
prev_page_info = page_info;
queue_head_inc(rxq);
- page_info = &page_info_tbl[rxq->head];
+ page_info = &rxo->page_info_tbl[rxq->head];
}
if (pagep)
prev_page_info->last_page_user = true;
@@ -1496,62 +1526,51 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
return num_wrbs;
}
-static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
+/* Return the number of events in the event queue */
+static inline int events_get(struct be_eq_obj *eqo)
{
- struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
+ struct be_eq_entry *eqe;
+ int num = 0;
- if (!eqe->evt)
- return NULL;
+ do {
+ eqe = queue_tail_node(&eqo->q);
+ if (eqe->evt == 0)
+ break;
- rmb();
- eqe->evt = le32_to_cpu(eqe->evt);
- queue_tail_inc(&eq_obj->q);
- return eqe;
+ rmb();
+ eqe->evt = 0;
+ num++;
+ queue_tail_inc(&eqo->q);
+ } while (true);
+
+ return num;
}
-static int event_handle(struct be_adapter *adapter,
- struct be_eq_obj *eq_obj,
- bool rearm)
+static int event_handle(struct be_eq_obj *eqo)
{
- struct be_eq_entry *eqe;
- u16 num = 0;
+ bool rearm = false;
+ int num = events_get(eqo);
- while ((eqe = event_get(eq_obj)) != NULL) {
- eqe->evt = 0;
- num++;
- }
-
- /* Deal with any spurious interrupts that come
- * without events
- */
+ /* Deal with any spurious interrupts that come without events */
if (!num)
rearm = true;
- be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
+ be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
if (num)
- napi_schedule(&eq_obj->napi);
+ napi_schedule(&eqo->napi);
return num;
}
-/* Just read and notify events without processing them.
- * Used at the time of destroying event queues */
-static void be_eq_clean(struct be_adapter *adapter,
- struct be_eq_obj *eq_obj)
+/* Leaves the EQ is disarmed state */
+static void be_eq_clean(struct be_eq_obj *eqo)
{
- struct be_eq_entry *eqe;
- u16 num = 0;
+ int num = events_get(eqo);
- while ((eqe = event_get(eq_obj)) != NULL) {
- eqe->evt = 0;
- num++;
- }
-
- if (num)
- be_eq_notify(adapter, eq_obj->q.id, false, true, num);
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
}
-static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
+static void be_rx_cq_clean(struct be_rx_obj *rxo)
{
struct be_rx_page_info *page_info;
struct be_queue_info *rxq = &rxo->q;
@@ -1561,14 +1580,14 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
/* First cleanup pending rx completions */
while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
- be_rx_compl_discard(adapter, rxo, rxcp);
- be_cq_notify(adapter, rx_cq->id, false, 1);
+ be_rx_compl_discard(rxo, rxcp);
+ be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
}
/* Then free posted rx buffer that were not used */
tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
- page_info = get_rx_page_info(adapter, rxo, tail);
+ page_info = get_rx_page_info(rxo, tail);
put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info));
}
@@ -1576,52 +1595,104 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
rxq->tail = rxq->head = 0;
}
-static void be_tx_compl_clean(struct be_adapter *adapter,
- struct be_tx_obj *txo)
+static void be_tx_compl_clean(struct be_adapter *adapter)
{
- struct be_queue_info *tx_cq = &txo->cq;
- struct be_queue_info *txq = &txo->q;
+ struct be_tx_obj *txo;
+ struct be_queue_info *txq;
struct be_eth_tx_compl *txcp;
u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
- struct sk_buff **sent_skbs = txo->sent_skb_list;
struct sk_buff *sent_skb;
bool dummy_wrb;
+ int i, pending_txqs;
/* Wait for a max of 200ms for all the tx-completions to arrive. */
do {
- while ((txcp = be_tx_compl_get(tx_cq))) {
- end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
- wrb_index, txcp);
- num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
- cmpl++;
- }
- if (cmpl) {
- be_cq_notify(adapter, tx_cq->id, false, cmpl);
- atomic_sub(num_wrbs, &txq->used);
- cmpl = 0;
- num_wrbs = 0;
+ pending_txqs = adapter->num_tx_qs;
+
+ for_all_tx_queues(adapter, txo, i) {
+ txq = &txo->q;
+ while ((txcp = be_tx_compl_get(&txo->cq))) {
+ end_idx =
+ AMAP_GET_BITS(struct amap_eth_tx_compl,
+ wrb_index, txcp);
+ num_wrbs += be_tx_compl_process(adapter, txo,
+ end_idx);
+ cmpl++;
+ }
+ if (cmpl) {
+ be_cq_notify(adapter, txo->cq.id, false, cmpl);
+ atomic_sub(num_wrbs, &txq->used);
+ cmpl = 0;
+ num_wrbs = 0;
+ }
+ if (atomic_read(&txq->used) == 0)
+ pending_txqs--;
}
- if (atomic_read(&txq->used) == 0 || ++timeo > 200)
+ if (pending_txqs == 0 || ++timeo > 200)
break;
mdelay(1);
} while (true);
- if (atomic_read(&txq->used))
- dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
- atomic_read(&txq->used));
+ for_all_tx_queues(adapter, txo, i) {
+ txq = &txo->q;
+ if (atomic_read(&txq->used))
+ dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
+ atomic_read(&txq->used));
+
+ /* free posted tx for which compls will never arrive */
+ while (atomic_read(&txq->used)) {
+ sent_skb = txo->sent_skb_list[txq->tail];
+ end_idx = txq->tail;
+ num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
+ &dummy_wrb);
+ index_adv(&end_idx, num_wrbs - 1, txq->len);
+ num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
+ atomic_sub(num_wrbs, &txq->used);
+ }
+ }
+}
+
+static void be_evt_queues_destroy(struct be_adapter *adapter)
+{
+ struct be_eq_obj *eqo;
+ int i;
+
+ for_all_evt_queues(adapter, eqo, i) {
+ be_eq_clean(eqo);
+ if (eqo->q.created)
+ be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
+ be_queue_free(adapter, &eqo->q);
+ }
+}
+
+static int be_evt_queues_create(struct be_adapter *adapter)
+{
+ struct be_queue_info *eq;
+ struct be_eq_obj *eqo;
+ int i, rc;
+
+ adapter->num_evt_qs = num_irqs(adapter);
+
+ for_all_evt_queues(adapter, eqo, i) {
+ eqo->adapter = adapter;
+ eqo->tx_budget = BE_TX_BUDGET;
+ eqo->idx = i;
+ eqo->max_eqd = BE_MAX_EQD;
+ eqo->enable_aic = true;
+
+ eq = &eqo->q;
+ rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
+ sizeof(struct be_eq_entry));
+ if (rc)
+ return rc;
- /* free posted tx for which compls will never arrive */
- while (atomic_read(&txq->used)) {
- sent_skb = sent_skbs[txq->tail];
- end_idx = txq->tail;
- index_adv(&end_idx,
- wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
- txq->len);
- num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
- atomic_sub(num_wrbs, &txq->used);
+ rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
+ if (rc)
+ return rc;
}
+ return 0;
}
static void be_mcc_queues_destroy(struct be_adapter *adapter)
@@ -1644,22 +1715,19 @@ static int be_mcc_queues_create(struct be_adapter *adapter)
{
struct be_queue_info *q, *cq;
- /* Alloc MCC compl queue */
cq = &adapter->mcc_obj.cq;
if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
sizeof(struct be_mcc_compl)))
goto err;
- /* Ask BE to create MCC compl queue; share TX's eq */
- if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
+ /* Use the default EQ for MCC completions */
+ if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
goto mcc_cq_free;
- /* Alloc MCC queue */
q = &adapter->mcc_obj.q;
if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
goto mcc_cq_destroy;
- /* Ask BE to create MCC queue */
if (be_cmd_mccq_create(adapter, q, cq))
goto mcc_q_free;
@@ -1692,14 +1760,6 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
be_cmd_q_destroy(adapter, q, QTYPE_CQ);
be_queue_free(adapter, q);
}
-
- /* Clear any residual events */
- be_eq_clean(adapter, &adapter->tx_eq);
-
- q = &adapter->tx_eq.q;
- if (q->created)
- be_cmd_q_destroy(adapter, q, QTYPE_EQ);
- be_queue_free(adapter, q);
}
static int be_num_txqs_want(struct be_adapter *adapter)
@@ -1712,10 +1772,10 @@ static int be_num_txqs_want(struct be_adapter *adapter)
return MAX_TX_QS;
}
-/* One TX event queue is shared by all TX compl qs */
-static int be_tx_queues_create(struct be_adapter *adapter)
+static int be_tx_cqs_create(struct be_adapter *adapter)
{
- struct be_queue_info *eq, *q, *cq;
+ struct be_queue_info *cq, *eq;
+ int status;
struct be_tx_obj *txo;
u8 i;
@@ -1727,192 +1787,109 @@ static int be_tx_queues_create(struct be_adapter *adapter)
rtnl_unlock();
}
- adapter->tx_eq.max_eqd = 0;
- adapter->tx_eq.min_eqd = 0;
- adapter->tx_eq.cur_eqd = 96;
- adapter->tx_eq.enable_aic = false;
+ for_all_tx_queues(adapter, txo, i) {
+ cq = &txo->cq;
+ status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
+ sizeof(struct be_eth_tx_compl));
+ if (status)
+ return status;
- eq = &adapter->tx_eq.q;
- if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
- sizeof(struct be_eq_entry)))
- return -1;
+ /* If num_evt_qs is less than num_tx_qs, then more than
+ * one txq share an eq
+ */
+ eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
+ status = be_cmd_cq_create(adapter, cq, eq, false, 3);
+ if (status)
+ return status;
+ }
+ return 0;
+}
- if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
- goto err;
- adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
+static int be_tx_qs_create(struct be_adapter *adapter)
+{
+ struct be_tx_obj *txo;
+ int i, status;
for_all_tx_queues(adapter, txo, i) {
- cq = &txo->cq;
- if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
- sizeof(struct be_eth_tx_compl)))
- goto err;
-
- if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
- goto err;
+ status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
+ sizeof(struct be_eth_wrb));
+ if (status)
+ return status;
- q = &txo->q;
- if (be_queue_alloc(adapter, q, TX_Q_LEN,
- sizeof(struct be_eth_wrb)))
- goto err;
+ status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
+ if (status)
+ return status;
}
- return 0;
-err:
- be_tx_queues_destroy(adapter);
- return -1;
+ return 0;
}
-static void be_rx_queues_destroy(struct be_adapter *adapter)
+static void be_rx_cqs_destroy(struct be_adapter *adapter)
{
struct be_queue_info *q;
struct be_rx_obj *rxo;
int i;
for_all_rx_queues(adapter, rxo, i) {
- be_queue_free(adapter, &rxo->q);
-
q = &rxo->cq;
if (q->created)
be_cmd_q_destroy(adapter, q, QTYPE_CQ);
be_queue_free(adapter, q);
-
- q = &rxo->rx_eq.q;
- if (q->created)
- be_cmd_q_destroy(adapter, q, QTYPE_EQ);
- be_queue_free(adapter, q);
}
}
-static u32 be_num_rxqs_want(struct be_adapter *adapter)
+static int be_rx_cqs_create(struct be_adapter *adapter)
{
- if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
- !sriov_enabled(adapter) && be_physfn(adapter)) {
- return 1 + MAX_RSS_QS; /* one default non-RSS queue */
- } else {
- dev_warn(&adapter->pdev->dev,
- "No support for multiple RX queues\n");
- return 1;
- }
-}
-
-static int be_rx_queues_create(struct be_adapter *adapter)
-{
- struct be_queue_info *eq, *q, *cq;
+ struct be_queue_info *eq, *cq;
struct be_rx_obj *rxo;
int rc, i;
- adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
- msix_enabled(adapter) ?
- adapter->num_msix_vec - 1 : 1);
- if (adapter->num_rx_qs != MAX_RX_QS)
- dev_warn(&adapter->pdev->dev,
- "Can create only %d RX queues", adapter->num_rx_qs);
+ /* We'll create as many RSS rings as there are irqs.
+ * But when there's only one irq there's no use creating RSS rings
+ */
+ adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
+ num_irqs(adapter) + 1 : 1;
adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
for_all_rx_queues(adapter, rxo, i) {
rxo->adapter = adapter;
- rxo->rx_eq.max_eqd = BE_MAX_EQD;
- rxo->rx_eq.enable_aic = true;
-
- /* EQ */
- eq = &rxo->rx_eq.q;
- rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
- sizeof(struct be_eq_entry));
- if (rc)
- goto err;
-
- rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
- if (rc)
- goto err;
-
- rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
-
- /* CQ */
cq = &rxo->cq;
rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
sizeof(struct be_eth_rx_compl));
if (rc)
- goto err;
-
- rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
- if (rc)
- goto err;
+ return rc;
- /* Rx Q - will be created in be_open() */
- q = &rxo->q;
- rc = be_queue_alloc(adapter, q, RX_Q_LEN,
- sizeof(struct be_eth_rx_d));
+ eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
+ rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
if (rc)
- goto err;
-
+ return rc;
}
- return 0;
-err:
- be_rx_queues_destroy(adapter);
- return -1;
-}
+ if (adapter->num_rx_qs != MAX_RX_QS)
+ dev_info(&adapter->pdev->dev,
+ "Created only %d receive queues", adapter->num_rx_qs);
-static bool event_peek(struct be_eq_obj *eq_obj)
-{
- struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
- if (!eqe->evt)
- return false;
- else
- return true;
+ return 0;
}
static irqreturn_t be_intx(int irq, void *dev)
{
struct be_adapter *adapter = dev;
- struct be_rx_obj *rxo;
- int isr, i, tx = 0 , rx = 0;
-
- if (lancer_chip(adapter)) {
- if (event_peek(&adapter->tx_eq))
- tx = event_handle(adapter, &adapter->tx_eq, false);
- for_all_rx_queues(adapter, rxo, i) {
- if (event_peek(&rxo->rx_eq))
- rx |= event_handle(adapter, &rxo->rx_eq, true);
- }
-
- if (!(tx || rx))
- return IRQ_NONE;
-
- } else {
- isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
- (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
- if (!isr)
- return IRQ_NONE;
-
- if ((1 << adapter->tx_eq.eq_idx & isr))
- event_handle(adapter, &adapter->tx_eq, false);
-
- for_all_rx_queues(adapter, rxo, i) {
- if ((1 << rxo->rx_eq.eq_idx & isr))
- event_handle(adapter, &rxo->rx_eq, true);
- }
- }
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t be_msix_rx(int irq, void *dev)
-{
- struct be_rx_obj *rxo = dev;
- struct be_adapter *adapter = rxo->adapter;
-
- event_handle(adapter, &rxo->rx_eq, true);
+ int num_evts;
- return IRQ_HANDLED;
+ /* With INTx only one EQ is used */
+ num_evts = event_handle(&adapter->eq_obj[0]);
+ if (num_evts)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
}
-static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
+static irqreturn_t be_msix(int irq, void *dev)
{
- struct be_adapter *adapter = dev;
-
- event_handle(adapter, &adapter->tx_eq, false);
+ struct be_eq_obj *eqo = dev;
+ event_handle(eqo);
return IRQ_HANDLED;
}
@@ -1921,16 +1898,14 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
return (rxcp->tcpf && !rxcp->err) ? true : false;
}
-static int be_poll_rx(struct napi_struct *napi, int budget)
+static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
+ int budget)
{
- struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
- struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
struct be_adapter *adapter = rxo->adapter;
struct be_queue_info *rx_cq = &rxo->cq;
struct be_rx_compl_info *rxcp;
u32 work_done;
- rx_stats(rxo)->rx_polls++;
for (work_done = 0; work_done < budget; work_done++) {
rxcp = be_rx_compl_get(rxo);
if (!rxcp)
@@ -1942,7 +1917,7 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
/* Discard compl with partial DMA Lancer B0 */
if (unlikely(!rxcp->pkt_size)) {
- be_rx_compl_discard(adapter, rxo, rxcp);
+ be_rx_compl_discard(rxo, rxcp);
goto loop_continue;
}
@@ -1951,94 +1926,96 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
*/
if (unlikely(rxcp->port != adapter->port_num &&
!lancer_chip(adapter))) {
- be_rx_compl_discard(adapter, rxo, rxcp);
+ be_rx_compl_discard(rxo, rxcp);
goto loop_continue;
}
if (do_gro(rxcp))
- be_rx_compl_process_gro(adapter, rxo, rxcp);
+ be_rx_compl_process_gro(rxo, napi, rxcp);
else
- be_rx_compl_process(adapter, rxo, rxcp);
+ be_rx_compl_process(rxo, rxcp);
loop_continue:
be_rx_stats_update(rxo, rxcp);
}
- be_cq_notify(adapter, rx_cq->id, false, work_done);
+ if (work_done) {
+ be_cq_notify(adapter, rx_cq->id, true, work_done);
- /* Refill the queue */
- if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
- be_post_rx_frags(rxo, GFP_ATOMIC);
-
- /* All consumed */
- if (work_done < budget) {
- napi_complete(napi);
- /* Arm CQ */
- be_cq_notify(adapter, rx_cq->id, true, 0);
+ if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
+ be_post_rx_frags(rxo, GFP_ATOMIC);
}
+
return work_done;
}
-/* As TX and MCC share the same EQ check for both TX and MCC completions.
- * For TX/MCC we don't honour budget; consume everything
- */
-static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
+static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
+ int budget, int idx)
{
- struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
- struct be_adapter *adapter =
- container_of(tx_eq, struct be_adapter, tx_eq);
- struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- struct be_tx_obj *txo;
struct be_eth_tx_compl *txcp;
- int tx_compl, mcc_compl, status = 0;
- u8 i;
- u16 num_wrbs;
+ int num_wrbs = 0, work_done;
- for_all_tx_queues(adapter, txo, i) {
- tx_compl = 0;
- num_wrbs = 0;
- while ((txcp = be_tx_compl_get(&txo->cq))) {
- num_wrbs += be_tx_compl_process(adapter, txo,
+ for (work_done = 0; work_done < budget; work_done++) {
+ txcp = be_tx_compl_get(&txo->cq);
+ if (!txcp)
+ break;
+ num_wrbs += be_tx_compl_process(adapter, txo,
AMAP_GET_BITS(struct amap_eth_tx_compl,
wrb_index, txcp));
- tx_compl++;
- }
- if (tx_compl) {
- be_cq_notify(adapter, txo->cq.id, true, tx_compl);
-
- atomic_sub(num_wrbs, &txo->q.used);
+ }
- /* As Tx wrbs have been freed up, wake up netdev queue
- * if it was stopped due to lack of tx wrbs. */
- if (__netif_subqueue_stopped(adapter->netdev, i) &&
- atomic_read(&txo->q.used) < txo->q.len / 2) {
- netif_wake_subqueue(adapter->netdev, i);
- }
+ if (work_done) {
+ be_cq_notify(adapter, txo->cq.id, true, work_done);
+ atomic_sub(num_wrbs, &txo->q.used);
- u64_stats_update_begin(&tx_stats(txo)->sync_compl);
- tx_stats(txo)->tx_compl += tx_compl;
- u64_stats_update_end(&tx_stats(txo)->sync_compl);
+ /* As Tx wrbs have been freed up, wake up netdev queue
+ * if it was stopped due to lack of tx wrbs. */
+ if (__netif_subqueue_stopped(adapter->netdev, idx) &&
+ atomic_read(&txo->q.used) < txo->q.len / 2) {
+ netif_wake_subqueue(adapter->netdev, idx);
}
+
+ u64_stats_update_begin(&tx_stats(txo)->sync_compl);
+ tx_stats(txo)->tx_compl += work_done;
+ u64_stats_update_end(&tx_stats(txo)->sync_compl);
}
+ return (work_done < budget); /* Done */
+}
- mcc_compl = be_process_mcc(adapter, &status);
+int be_poll(struct napi_struct *napi, int budget)
+{
+ struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
+ struct be_adapter *adapter = eqo->adapter;
+ int max_work = 0, work, i;
+ bool tx_done;
- if (mcc_compl) {
- be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
+ /* Process all TXQs serviced by this EQ */
+ for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
+ tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
+ eqo->tx_budget, i);
+ if (!tx_done)
+ max_work = budget;
}
- napi_complete(napi);
+ /* This loop will iterate twice for EQ0 in which
+ * completions of the last RXQ (default one) are also processed
+ * For other EQs the loop iterates only once
+ */
+ for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
+ work = be_process_rx(&adapter->rx_obj[i], napi, budget);
+ max_work = max(work, max_work);
+ }
- /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
- if (lancer_chip(adapter) && !msix_enabled(adapter)) {
- for_all_tx_queues(adapter, txo, i)
- be_cq_notify(adapter, txo->cq.id, true, 0);
+ if (is_mcc_eqo(eqo))
+ be_process_mcc(adapter);
- be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
+ if (max_work < budget) {
+ napi_complete(napi);
+ be_eq_notify(adapter, eqo->q.id, true, false, 0);
+ } else {
+ /* As we'll continue in polling mode, count and clear events */
+ be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
}
-
- be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
- adapter->drv_stats.tx_events++;
- return 1;
+ return max_work;
}
void be_detect_dump_ue(struct be_adapter *adapter)
@@ -2113,12 +2090,24 @@ static void be_msix_disable(struct be_adapter *adapter)
}
}
+static uint be_num_rss_want(struct be_adapter *adapter)
+{
+ if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
+ adapter->num_vfs == 0 && be_physfn(adapter) &&
+ !be_is_mc(adapter))
+ return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
+ else
+ return 0;
+}
+
static void be_msix_enable(struct be_adapter *adapter)
{
-#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
+#define BE_MIN_MSIX_VECTORS 1
int i, status, num_vec;
- num_vec = be_num_rxqs_want(adapter) + 1;
+ /* If RSS queues are not used, need a vec for default RX Q */
+ num_vec = min(be_num_rss_want(adapter), num_online_cpus());
+ num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
for (i = 0; i < num_vec; i++)
adapter->msix_entries[i].entry = i;
@@ -2186,60 +2175,31 @@ static void be_sriov_disable(struct be_adapter *adapter)
}
static inline int be_msix_vec_get(struct be_adapter *adapter,
- struct be_eq_obj *eq_obj)
+ struct be_eq_obj *eqo)
{
- return adapter->msix_entries[eq_obj->eq_idx].vector;
-}
-
-static int be_request_irq(struct be_adapter *adapter,
- struct be_eq_obj *eq_obj,
- void *handler, char *desc, void *context)
-{
- struct net_device *netdev = adapter->netdev;
- int vec;
-
- sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
- vec = be_msix_vec_get(adapter, eq_obj);
- return request_irq(vec, handler, 0, eq_obj->desc, context);
-}
-
-static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
- void *context)
-{
- int vec = be_msix_vec_get(adapter, eq_obj);
- free_irq(vec, context);
+ return adapter->msix_entries[eqo->idx].vector;
}
static int be_msix_register(struct be_adapter *adapter)
{
- struct be_rx_obj *rxo;
- int status, i;
- char qname[10];
-
- status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
- adapter);
- if (status)
- goto err;
+ struct net_device *netdev = adapter->netdev;
+ struct be_eq_obj *eqo;
+ int status, i, vec;
- for_all_rx_queues(adapter, rxo, i) {
- sprintf(qname, "rxq%d", i);
- status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
- qname, rxo);
+ for_all_evt_queues(adapter, eqo, i) {
+ sprintf(eqo->desc, "%s-q%d", netdev->name, i);
+ vec = be_msix_vec_get(adapter, eqo);
+ status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
if (status)
goto err_msix;
}
return 0;
-
err_msix:
- be_free_irq(adapter, &adapter->tx_eq, adapter);
-
- for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
- be_free_irq(adapter, &rxo->rx_eq, rxo);
-
-err:
- dev_warn(&adapter->pdev->dev,
- "MSIX Request IRQ failed - err %d\n", status);
+ for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
+ free_irq(be_msix_vec_get(adapter, eqo), eqo);
+ dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
+ status);
be_msix_disable(adapter);
return status;
}
@@ -2275,7 +2235,7 @@ done:
static void be_irq_unregister(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct be_rx_obj *rxo;
+ struct be_eq_obj *eqo;
int i;
if (!adapter->isr_registered)
@@ -2288,16 +2248,14 @@ static void be_irq_unregister(struct be_adapter *adapter)
}
/* MSIx */
- be_free_irq(adapter, &adapter->tx_eq, adapter);
-
- for_all_rx_queues(adapter, rxo, i)
- be_free_irq(adapter, &rxo->rx_eq, rxo);
+ for_all_evt_queues(adapter, eqo, i)
+ free_irq(be_msix_vec_get(adapter, eqo), eqo);
done:
adapter->isr_registered = false;
}
-static void be_rx_queues_clear(struct be_adapter *adapter)
+static void be_rx_qs_destroy(struct be_adapter *adapter)
{
struct be_queue_info *q;
struct be_rx_obj *rxo;
@@ -2312,76 +2270,67 @@ static void be_rx_queues_clear(struct be_adapter *adapter)
* arrive
*/
mdelay(1);
- be_rx_q_clean(adapter, rxo);
+ be_rx_cq_clean(rxo);
}
-
- /* Clear any residual events */
- q = &rxo->rx_eq.q;
- if (q->created)
- be_eq_clean(adapter, &rxo->rx_eq);
+ be_queue_free(adapter, q);
}
}
static int be_close(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_rx_obj *rxo;
- struct be_tx_obj *txo;
- struct be_eq_obj *tx_eq = &adapter->tx_eq;
- int vec, i;
+ struct be_eq_obj *eqo;
+ int i;
be_async_mcc_disable(adapter);
if (!lancer_chip(adapter))
be_intr_set(adapter, false);
- for_all_rx_queues(adapter, rxo, i)
- napi_disable(&rxo->rx_eq.napi);
-
- napi_disable(&tx_eq->napi);
-
- if (lancer_chip(adapter)) {
- be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
- for_all_rx_queues(adapter, rxo, i)
- be_cq_notify(adapter, rxo->cq.id, false, 0);
- for_all_tx_queues(adapter, txo, i)
- be_cq_notify(adapter, txo->cq.id, false, 0);
+ for_all_evt_queues(adapter, eqo, i) {
+ napi_disable(&eqo->napi);
+ if (msix_enabled(adapter))
+ synchronize_irq(be_msix_vec_get(adapter, eqo));
+ else
+ synchronize_irq(netdev->irq);
+ be_eq_clean(eqo);
}
- if (msix_enabled(adapter)) {
- vec = be_msix_vec_get(adapter, tx_eq);
- synchronize_irq(vec);
-
- for_all_rx_queues(adapter, rxo, i) {
- vec = be_msix_vec_get(adapter, &rxo->rx_eq);
- synchronize_irq(vec);
- }
- } else {
- synchronize_irq(netdev->irq);
- }
be_irq_unregister(adapter);
/* Wait for all pending tx completions to arrive so that
* all tx skbs are freed.
*/
- for_all_tx_queues(adapter, txo, i)
- be_tx_compl_clean(adapter, txo);
+ be_tx_compl_clean(adapter);
- be_rx_queues_clear(adapter);
+ be_rx_qs_destroy(adapter);
return 0;
}
-static int be_rx_queues_setup(struct be_adapter *adapter)
+static int be_rx_qs_create(struct be_adapter *adapter)
{
struct be_rx_obj *rxo;
int rc, i, j;
u8 rsstable[128];
for_all_rx_queues(adapter, rxo, i) {
+ rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
+ sizeof(struct be_eth_rx_d));
+ if (rc)
+ return rc;
+ }
+
+ /* The FW would like the default RXQ to be created first */
+ rxo = default_rxo(adapter);
+ rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
+ adapter->if_handle, false, &rxo->rss_id);
+ if (rc)
+ return rc;
+
+ for_all_rss_queues(adapter, rxo, i) {
rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
- rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
- adapter->if_handle,
- (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
+ rx_frag_size, adapter->if_handle,
+ true, &rxo->rss_id);
if (rc)
return rc;
}
@@ -2395,48 +2344,47 @@ static int be_rx_queues_setup(struct be_adapter *adapter)
}
}
rc = be_cmd_rss_config(adapter, rsstable, 128);
-
if (rc)
return rc;
}
/* First time posting */
- for_all_rx_queues(adapter, rxo, i) {
+ for_all_rx_queues(adapter, rxo, i)
be_post_rx_frags(rxo, GFP_KERNEL);
- napi_enable(&rxo->rx_eq.napi);
- }
return 0;
}
static int be_open(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_eq_obj *tx_eq = &adapter->tx_eq;
+ struct be_eq_obj *eqo;
struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
u8 link_status;
int status, i;
- status = be_rx_queues_setup(adapter);
+ status = be_rx_qs_create(adapter);
if (status)
goto err;
- napi_enable(&tx_eq->napi);
-
be_irq_register(adapter);
if (!lancer_chip(adapter))
be_intr_set(adapter, true);
- /* The evt queues are created in unarmed state; arm them */
- for_all_rx_queues(adapter, rxo, i) {
- be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
+ for_all_rx_queues(adapter, rxo, i)
be_cq_notify(adapter, rxo->cq.id, true, 0);
- }
- be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
- /* Now that interrupts are on we can process async mcc */
+ for_all_tx_queues(adapter, txo, i)
+ be_cq_notify(adapter, txo->cq.id, true, 0);
+
be_async_mcc_enable(adapter);
+ for_all_evt_queues(adapter, eqo, i) {
+ napi_enable(&eqo->napi);
+ be_eq_notify(adapter, eqo->q.id, true, false, 0);
+ }
+
status = be_cmd_link_status_query(adapter, NULL, NULL,
&link_status, 0);
if (!status)
@@ -2540,17 +2488,32 @@ static void be_vf_clear(struct be_adapter *adapter)
static int be_clear(struct be_adapter *adapter)
{
+ int i = 1;
+
+ if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
+ cancel_delayed_work_sync(&adapter->work);
+ adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
+ }
+
if (sriov_enabled(adapter))
be_vf_clear(adapter);
+ for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id[i], 0);
+
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
be_mcc_queues_destroy(adapter);
- be_rx_queues_destroy(adapter);
+ be_rx_cqs_destroy(adapter);
be_tx_queues_destroy(adapter);
+ be_evt_queues_destroy(adapter);
/* tell fw we're done with firing cmds */
be_cmd_fw_clean(adapter);
+
+ be_msix_disable(adapter);
+ kfree(adapter->pmac_id);
return 0;
}
@@ -2569,7 +2532,7 @@ static int be_vf_setup(struct be_adapter *adapter)
{
struct be_vf_cfg *vf_cfg;
u32 cap_flags, en_flags, vf;
- u16 lnk_speed;
+ u16 def_vlan, lnk_speed;
int status;
be_vf_setup_init(adapter);
@@ -2593,6 +2556,12 @@ static int be_vf_setup(struct be_adapter *adapter)
if (status)
goto err;
vf_cfg->tx_rate = lnk_speed * 10;
+
+ status = be_cmd_get_hsw_config(adapter, &def_vlan,
+ vf + 1, vf_cfg->if_handle);
+ if (status)
+ goto err;
+ vf_cfg->def_vid = def_vlan;
}
return 0;
err:
@@ -2609,19 +2578,28 @@ static void be_setup_init(struct be_adapter *adapter)
adapter->eq_next_idx = 0;
}
-static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
+static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
{
u32 pmac_id;
- int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
- if (status != 0)
- goto do_none;
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK,
- false, adapter->if_handle, pmac_id);
+ int status;
+ bool pmac_id_active;
+
+ status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
+ &pmac_id, mac);
if (status != 0)
goto do_none;
- status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
- &adapter->pmac_id, 0);
+
+ if (pmac_id_active) {
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK,
+ false, adapter->if_handle, pmac_id);
+
+ if (!status)
+ adapter->pmac_id[0] = pmac_id;
+ } else {
+ status = be_cmd_pmac_add(adapter, mac,
+ adapter->if_handle, &adapter->pmac_id[0], 0);
+ }
do_none:
return status;
}
@@ -2631,24 +2609,29 @@ static int be_setup(struct be_adapter *adapter)
struct net_device *netdev = adapter->netdev;
u32 cap_flags, en_flags;
u32 tx_fc, rx_fc;
- int status, i;
+ int status;
u8 mac[ETH_ALEN];
- struct be_tx_obj *txo;
be_setup_init(adapter);
be_cmd_req_native_mode(adapter);
- status = be_tx_queues_create(adapter);
- if (status != 0)
+ be_msix_enable(adapter);
+
+ status = be_evt_queues_create(adapter);
+ if (status)
goto err;
- status = be_rx_queues_create(adapter);
- if (status != 0)
+ status = be_tx_cqs_create(adapter);
+ if (status)
+ goto err;
+
+ status = be_rx_cqs_create(adapter);
+ if (status)
goto err;
status = be_mcc_queues_create(adapter);
- if (status != 0)
+ if (status)
goto err;
memset(mac, 0, ETH_ALEN);
@@ -2670,23 +2653,17 @@ static int be_setup(struct be_adapter *adapter)
}
status = be_cmd_if_create(adapter, cap_flags, en_flags,
netdev->dev_addr, &adapter->if_handle,
- &adapter->pmac_id, 0);
+ &adapter->pmac_id[0], 0);
if (status != 0)
goto err;
- for_all_tx_queues(adapter, txo, i) {
- status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
- if (status)
- goto err;
- }
-
/* The VF's permanent mac queried from card is incorrect.
* For BEx: Query the mac configued by the PF using if_handle
* For Lancer: Get and use mac_list to obtain mac address.
*/
if (!be_physfn(adapter)) {
if (lancer_chip(adapter))
- status = be_configure_mac_from_list(adapter, mac);
+ status = be_add_mac_from_list(adapter, mac);
else
status = be_cmd_mac_addr_query(adapter, mac,
MAC_ADDRESS_TYPE_NETWORK, false,
@@ -2697,6 +2674,10 @@ static int be_setup(struct be_adapter *adapter)
}
}
+ status = be_tx_qs_create(adapter);
+ if (status)
+ goto err;
+
be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
status = be_vid_config(adapter, false, 0);
@@ -2726,6 +2707,9 @@ static int be_setup(struct be_adapter *adapter)
goto err;
}
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+ adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
+
return 0;
err:
be_clear(adapter);
@@ -2736,12 +2720,13 @@ err:
static void be_netpoll(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_rx_obj *rxo;
+ struct be_eq_obj *eqo;
int i;
- event_handle(adapter, &adapter->tx_eq, false);
- for_all_rx_queues(adapter, rxo, i)
- event_handle(adapter, &rxo->rx_eq, true);
+ for_all_evt_queues(adapter, eqo, i)
+ event_handle(eqo);
+
+ return;
}
#endif
@@ -3102,7 +3087,7 @@ static const struct net_device_ops be_netdev_ops = {
static void be_netdev_init(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_rx_obj *rxo;
+ struct be_eq_obj *eqo;
int i;
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
@@ -3117,20 +3102,18 @@ static void be_netdev_init(struct net_device *netdev)
netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
netdev->flags |= IFF_MULTICAST;
netif_set_gso_max_size(netdev, 65535);
- BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
+ netdev->netdev_ops = &be_netdev_ops;
SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
- for_all_rx_queues(adapter, rxo, i)
- netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
- BE_NAPI_WEIGHT);
-
- netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
- BE_NAPI_WEIGHT);
+ for_all_evt_queues(adapter, eqo, i)
+ netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
}
static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -3289,8 +3272,6 @@ static void __devexit be_remove(struct pci_dev *pdev)
if (!adapter)
return;
- cancel_delayed_work_sync(&adapter->work);
-
unregister_netdev(adapter->netdev);
be_clear(adapter);
@@ -3301,8 +3282,6 @@ static void __devexit be_remove(struct pci_dev *pdev)
be_sriov_disable(adapter);
- be_msix_disable(adapter);
-
pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -3310,6 +3289,12 @@ static void __devexit be_remove(struct pci_dev *pdev)
free_netdev(adapter->netdev);
}
+bool be_is_wol_supported(struct be_adapter *adapter)
+{
+ return ((adapter->wol_cap & BE_WOL_CAP) &&
+ !be_is_wol_excluded(adapter)) ? true : false;
+}
+
static int be_get_config(struct be_adapter *adapter)
{
int status;
@@ -3320,14 +3305,36 @@ static int be_get_config(struct be_adapter *adapter)
return status;
if (adapter->function_mode & FLEX10_MODE)
- adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
else
adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
+ if (be_physfn(adapter))
+ adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
+ else
+ adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
+
+ /* primary mac needs 1 pmac entry */
+ adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
+ sizeof(u32), GFP_KERNEL);
+ if (!adapter->pmac_id)
+ return -ENOMEM;
+
status = be_cmd_get_cntl_attributes(adapter);
if (status)
return status;
+ status = be_cmd_get_acpi_wol_cap(adapter);
+ if (status) {
+ /* in case of a failure to get wol capabillities
+ * check the exclusion list to determine WOL capability */
+ if (!be_is_wol_excluded(adapter))
+ adapter->wol_cap |= BE_WOL_CAP;
+ }
+
+ if (be_is_wol_supported(adapter))
+ adapter->wol = true;
+
return 0;
}
@@ -3469,6 +3476,7 @@ static void be_worker(struct work_struct *work)
struct be_adapter *adapter =
container_of(work, struct be_adapter, work.work);
struct be_rx_obj *rxo;
+ struct be_eq_obj *eqo;
int i;
if (lancer_chip(adapter))
@@ -3479,15 +3487,7 @@ static void be_worker(struct work_struct *work)
/* when interrupts are not yet enabled, just reap any pending
* mcc completions */
if (!netif_running(adapter->netdev)) {
- int mcc_compl, status = 0;
-
- mcc_compl = be_process_mcc(adapter, &status);
-
- if (mcc_compl) {
- struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
- }
-
+ be_process_mcc(adapter);
goto reschedule;
}
@@ -3500,14 +3500,15 @@ static void be_worker(struct work_struct *work)
}
for_all_rx_queues(adapter, rxo, i) {
- be_rx_eqd_update(adapter, rxo);
-
if (rxo->rx_post_starved) {
rxo->rx_post_starved = false;
be_post_rx_frags(rxo, GFP_KERNEL);
}
}
+ for_all_evt_queues(adapter, eqo, i)
+ be_eqd_update(adapter, eqo);
+
reschedule:
adapter->work_counter++;
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
@@ -3593,6 +3594,12 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status)
goto ctrl_clean;
+ /* The INTR bit may be set in the card when probed by a kdump kernel
+ * after a crash.
+ */
+ if (!lancer_chip(adapter))
+ be_intr_set(adapter, false);
+
status = be_stats_init(adapter);
if (status)
goto ctrl_clean;
@@ -3601,14 +3608,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status)
goto stats_clean;
- /* The INTR bit may be set in the card when probed by a kdump kernel
- * after a crash.
- */
- if (!lancer_chip(adapter))
- be_intr_set(adapter, false);
-
- be_msix_enable(adapter);
-
INIT_DELAYED_WORK(&adapter->work, be_worker);
adapter->rx_fc = adapter->tx_fc = true;
@@ -3621,9 +3620,9 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status != 0)
goto unsetup;
- dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
+ dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
+ adapter->port_num);
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
unsetup:
@@ -3653,7 +3652,6 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
struct be_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
- cancel_delayed_work_sync(&adapter->work);
if (adapter->wol)
be_setup_wol(adapter, true);
@@ -3665,7 +3663,6 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
}
be_clear(adapter);
- be_msix_disable(adapter);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -3687,7 +3684,6 @@ static int be_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, 0);
pci_restore_state(pdev);
- be_msix_enable(adapter);
/* tell fw we're ready to fire cmds */
status = be_cmd_fw_init(adapter);
if (status)
@@ -3704,7 +3700,6 @@ static int be_resume(struct pci_dev *pdev)
if (adapter->wol)
be_setup_wol(adapter, false);
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
}