summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/ibm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-18 02:26:30 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-18 02:26:30 +0300
commita7fd20d1c476af4563e66865213474a2f9f473a4 (patch)
treefb1399e2f82842450245fb058a8fb23c52865f43 /drivers/net/ethernet/ibm
parentb80fed9595513384424cd141923c9161c4b5021b (diff)
parent917fa5353da05e8a0045b8acacba8d50400d5b12 (diff)
downloadlinux-a7fd20d1c476af4563e66865213474a2f9f473a4.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Support SPI based w5100 devices, from Akinobu Mita. 2) Partial Segmentation Offload, from Alexander Duyck. 3) Add GMAC4 support to stmmac driver, from Alexandre TORGUE. 4) Allow cls_flower stats offload, from Amir Vadai. 5) Implement bpf blinding, from Daniel Borkmann. 6) Optimize _ASYNC_ bit twiddling on sockets, unless the socket is actually using FASYNC these atomics are superfluous. From Eric Dumazet. 7) Run TCP more preemptibly, also from Eric Dumazet. 8) Support LED blinking, EEPROM dumps, and rxvlan offloading in mlx5e driver, from Gal Pressman. 9) Allow creating ppp devices via rtnetlink, from Guillaume Nault. 10) Improve BPF usage documentation, from Jesper Dangaard Brouer. 11) Support tunneling offloads in qed, from Manish Chopra. 12) aRFS offloading in mlx5e, from Maor Gottlieb. 13) Add RFS and RPS support to SCTP protocol, from Marcelo Ricardo Leitner. 14) Add MSG_EOR support to TCP, this allows controlling packet coalescing on application record boundaries for more accurate socket timestamp sampling. From Martin KaFai Lau. 15) Fix alignment of 64-bit netlink attributes across the board, from Nicolas Dichtel. 16) Per-vlan stats in bridging, from Nikolay Aleksandrov. 17) Several conversions of drivers to ethtool ksettings, from Philippe Reynes. 18) Checksum neutral ILA in ipv6, from Tom Herbert. 19) Factorize all of the various marvell dsa drivers into one, from Vivien Didelot 20) Add VF support to qed driver, from Yuval Mintz" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1649 commits) Revert "phy dp83867: Fix compilation with CONFIG_OF_MDIO=m" Revert "phy dp83867: Make rgmii parameters optional" r8169: default to 64-bit DMA on recent PCIe chips phy dp83867: Make rgmii parameters optional phy dp83867: Fix compilation with CONFIG_OF_MDIO=m bpf: arm64: remove callee-save registers use for tmp registers asix: Fix offset calculation in asix_rx_fixup() causing slow transmissions switchdev: pass pointer to fib_info instead of copy net_sched: close another race condition in tcf_mirred_release() tipc: fix nametable publication field in nl compat drivers: net: Don't print unpopulated net_device name qed: add support for dcbx. ravb: Add missing free_irq() calls to ravb_close() qed: Remove a stray tab net: ethernet: fec-mpc52xx: use phy_ethtool_{get|set}_link_ksettings net: ethernet: fec-mpc52xx: use phydev from struct net_device bpf, doc: fix typo on bpf_asm descriptions stmmac: hardware TX COE doesn't work when force_thresh_dma_mode is set net: ethernet: fs-enet: use phy_ethtool_{get|set}_link_ksettings net: ethernet: fs-enet: use phydev from struct net_device ...
Diffstat (limited to 'drivers/net/ethernet/ibm')
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.c26
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c251
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h4
4 files changed, 261 insertions, 24 deletions
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 5d7db6c01c46..4c9771d57d6e 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -301,7 +301,7 @@ static inline void emac_netif_stop(struct emac_instance *dev)
dev->no_mcast = 1;
netif_addr_unlock(dev->ndev);
netif_tx_unlock_bh(dev->ndev);
- dev->ndev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev->ndev); /* prevent tx timeout */
mal_poll_disable(dev->mal, &dev->commac);
netif_tx_disable(dev->ndev);
}
@@ -1377,7 +1377,7 @@ static inline int emac_xmit_finish(struct emac_instance *dev, int len)
DBG2(dev, "stopped TX queue" NL);
}
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
++dev->stats.tx_packets;
dev->stats.tx_bytes += len;
diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c
index d3b9d103353e..5b88cc690c22 100644
--- a/drivers/net/ethernet/ibm/emac/phy.c
+++ b/drivers/net/ethernet/ibm/emac/phy.c
@@ -470,12 +470,38 @@ static struct mii_phy_def m88e1112_phy_def = {
.ops = &m88e1112_phy_ops,
};
+static int ar8035_init(struct mii_phy *phy)
+{
+ phy_write(phy, 0x1d, 0x5); /* Address debug register 5 */
+ phy_write(phy, 0x1e, 0x2d47); /* Value copied from u-boot */
+ phy_write(phy, 0x1d, 0xb); /* Address hib ctrl */
+ phy_write(phy, 0x1e, 0xbc20); /* Value copied from u-boot */
+
+ return 0;
+}
+
+static struct mii_phy_ops ar8035_phy_ops = {
+ .init = ar8035_init,
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = genmii_read_link,
+};
+
+static struct mii_phy_def ar8035_phy_def = {
+ .phy_id = 0x004dd070,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Atheros 8035 Gigabit Ethernet",
+ .ops = &ar8035_phy_ops,
+};
+
static struct mii_phy_def *mii_phy_table[] = {
&et1011c_phy_def,
&cis8201_phy_def,
&bcm5248_phy_def,
&m88e1111_phy_def,
&m88e1112_phy_def,
+ &ar8035_phy_def,
&genmii_phy_def,
NULL
};
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 6e9e16eee5d0..864cb21351a4 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -61,6 +61,7 @@
#include <linux/proc_fs.h>
#include <linux/in.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/irq.h>
#include <linux/kthread.h>
#include <linux/seq_file.h>
@@ -94,6 +95,7 @@ static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
union sub_crq *sub_crq);
+static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
static int enable_scrq_irq(struct ibmvnic_adapter *,
struct ibmvnic_sub_crq_queue *);
@@ -561,10 +563,141 @@ static int ibmvnic_close(struct net_device *netdev)
return 0;
}
+/**
+ * build_hdr_data - creates L2/L3/L4 header data buffer
+ * @hdr_field - bitfield determining needed headers
+ * @skb - socket buffer
+ * @hdr_len - array of header lengths
+ * @tot_len - total length of data
+ *
+ * Reads hdr_field to determine which headers are needed by firmware.
+ * Builds a buffer containing these headers. Saves individual header
+ * lengths and total buffer length to be used to build descriptors.
+ */
+static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
+ int *hdr_len, u8 *hdr_data)
+{
+ int len = 0;
+ u8 *hdr;
+
+ hdr_len[0] = sizeof(struct ethhdr);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ hdr_len[1] = ip_hdr(skb)->ihl * 4;
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ hdr_len[2] = tcp_hdrlen(skb);
+ else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
+ hdr_len[2] = sizeof(struct udphdr);
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ hdr_len[1] = sizeof(struct ipv6hdr);
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ hdr_len[2] = tcp_hdrlen(skb);
+ else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+ hdr_len[2] = sizeof(struct udphdr);
+ }
+
+ memset(hdr_data, 0, 120);
+ if ((hdr_field >> 6) & 1) {
+ hdr = skb_mac_header(skb);
+ memcpy(hdr_data, hdr, hdr_len[0]);
+ len += hdr_len[0];
+ }
+
+ if ((hdr_field >> 5) & 1) {
+ hdr = skb_network_header(skb);
+ memcpy(hdr_data + len, hdr, hdr_len[1]);
+ len += hdr_len[1];
+ }
+
+ if ((hdr_field >> 4) & 1) {
+ hdr = skb_transport_header(skb);
+ memcpy(hdr_data + len, hdr, hdr_len[2]);
+ len += hdr_len[2];
+ }
+ return len;
+}
+
+/**
+ * create_hdr_descs - create header and header extension descriptors
+ * @hdr_field - bitfield determining needed headers
+ * @data - buffer containing header data
+ * @len - length of data buffer
+ * @hdr_len - array of individual header lengths
+ * @scrq_arr - descriptor array
+ *
+ * Creates header and, if needed, header extension descriptors and
+ * places them in a descriptor array, scrq_arr
+ */
+
+static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
+ union sub_crq *scrq_arr)
+{
+ union sub_crq hdr_desc;
+ int tmp_len = len;
+ u8 *data, *cur;
+ int tmp;
+
+ while (tmp_len > 0) {
+ cur = hdr_data + len - tmp_len;
+
+ memset(&hdr_desc, 0, sizeof(hdr_desc));
+ if (cur != hdr_data) {
+ data = hdr_desc.hdr_ext.data;
+ tmp = tmp_len > 29 ? 29 : tmp_len;
+ hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
+ hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
+ hdr_desc.hdr_ext.len = tmp;
+ } else {
+ data = hdr_desc.hdr.data;
+ tmp = tmp_len > 24 ? 24 : tmp_len;
+ hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
+ hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
+ hdr_desc.hdr.len = tmp;
+ hdr_desc.hdr.l2_len = (u8)hdr_len[0];
+ hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
+ hdr_desc.hdr.l4_len = (u8)hdr_len[2];
+ hdr_desc.hdr.flag = hdr_field << 1;
+ }
+ memcpy(data, cur, tmp);
+ tmp_len -= tmp;
+ *scrq_arr = hdr_desc;
+ scrq_arr++;
+ }
+}
+
+/**
+ * build_hdr_descs_arr - build a header descriptor array
+ * @skb - socket buffer
+ * @num_entries - number of descriptors to be sent
+ * @subcrq - first TX descriptor
+ * @hdr_field - bit field determining which headers will be sent
+ *
+ * This function will build a TX descriptor array with applicable
+ * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
+ */
+
+static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
+ int *num_entries, u8 hdr_field)
+{
+ int hdr_len[3] = {0, 0, 0};
+ int tot_len, len;
+ u8 *hdr_data = txbuff->hdr_data;
+
+ tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
+ txbuff->hdr_data);
+ len = tot_len;
+ len -= 24;
+ if (len > 0)
+ num_entries += len % 29 ? len / 29 + 1 : len / 29;
+ create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
+ txbuff->indir_arr + 1);
+}
+
static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int queue_num = skb_get_queue_mapping(skb);
+ u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_tx_buff *tx_buff = NULL;
struct ibmvnic_tx_pool *tx_pool;
@@ -579,6 +712,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned long lpar_rc;
union sub_crq tx_crq;
unsigned int offset;
+ int num_entries = 1;
unsigned char *dst;
u64 *handle_array;
int index = 0;
@@ -644,11 +778,35 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
}
- if (skb->ip_summed == CHECKSUM_PARTIAL)
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
-
- lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq);
-
+ hdrs += 2;
+ }
+ /* determine if l2/3/4 headers are sent to firmware */
+ if ((*hdrs >> 7) & 1 &&
+ (skb->protocol == htons(ETH_P_IP) ||
+ skb->protocol == htons(ETH_P_IPV6))) {
+ build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
+ tx_crq.v1.n_crq_elem = num_entries;
+ tx_buff->indir_arr[0] = tx_crq;
+ tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
+ sizeof(tx_buff->indir_arr),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, tx_buff->indir_dma)) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ dev_err(dev, "tx: unable to map descriptor array\n");
+ tx_map_failed++;
+ tx_dropped++;
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+ lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
+ (u64)tx_buff->indir_dma,
+ (u64)num_entries);
+ } else {
+ lpar_rc = send_subcrq(adapter, handle_array[queue_num],
+ &tx_crq);
+ }
if (lpar_rc != H_SUCCESS) {
dev_err(dev, "tx failed with code %ld\n", lpar_rc);
@@ -832,7 +990,7 @@ restart_poll:
netdev->stats.rx_bytes += length;
frames_processed++;
}
- replenish_pools(adapter);
+ replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
if (frames_processed < budget) {
enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
@@ -1159,6 +1317,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
union sub_crq *next;
int index;
int i, j;
+ u8 first;
restart_loop:
while (pending_scrq(adapter, scrq)) {
@@ -1181,6 +1340,13 @@ restart_loop:
txbuff->data_dma[j] = 0;
txbuff->used_bounce = false;
}
+ /* if sub_crq was sent indirectly */
+ first = txbuff->indir_arr[0].generic.first;
+ if (first == IBMVNIC_CRQ_CMD) {
+ dma_unmap_single(dev, txbuff->indir_dma,
+ sizeof(txbuff->indir_arr),
+ DMA_TO_DEVICE);
+ }
if (txbuff->last_frag)
dev_kfree_skb_any(txbuff->skb);
@@ -1261,9 +1427,9 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
entries_page : adapter->max_rx_add_entries_per_subcrq;
/* Choosing the maximum number of queues supported by firmware*/
- adapter->req_tx_queues = adapter->min_tx_queues;
- adapter->req_rx_queues = adapter->min_rx_queues;
- adapter->req_rx_add_queues = adapter->min_rx_add_queues;
+ adapter->req_tx_queues = adapter->max_tx_queues;
+ adapter->req_rx_queues = adapter->max_rx_queues;
+ adapter->req_rx_add_queues = adapter->max_rx_add_queues;
adapter->req_mtu = adapter->max_mtu;
}
@@ -1494,6 +1660,28 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
return rc;
}
+static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
+ u64 remote_handle, u64 ioba, u64 num_entries)
+{
+ unsigned int ua = adapter->vdev->unit_address;
+ struct device *dev = &adapter->vdev->dev;
+ int rc;
+
+ /* Make sure the hypervisor sees the complete request */
+ mb();
+ rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
+ cpu_to_be64(remote_handle),
+ ioba, num_entries);
+
+ if (rc) {
+ if (rc == H_CLOSED)
+ dev_warn(dev, "CRQ Queue closed\n");
+ dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
+ }
+
+ return rc;
+}
+
static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
union ibmvnic_crq *crq)
{
@@ -1589,13 +1777,11 @@ static void send_login(struct ibmvnic_adapter *adapter)
goto buf_map_failed;
}
- rsp_buffer_size =
- sizeof(struct ibmvnic_login_rsp_buffer) +
- sizeof(u64) * (adapter->req_tx_queues +
- adapter->req_rx_queues *
- adapter->req_rx_add_queues + adapter->
- req_rx_add_queues) +
- sizeof(u8) * (IBMVNIC_TX_DESC_VERSIONS);
+ rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
+ sizeof(u64) * adapter->req_tx_queues +
+ sizeof(u64) * adapter->req_rx_queues +
+ sizeof(u64) * adapter->req_rx_queues +
+ sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
if (!login_rsp_buffer)
@@ -1918,6 +2104,10 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
adapter->netdev->features |= NETIF_F_IPV6_CSUM;
+ if ((adapter->netdev->features &
+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
+ adapter->netdev->features |= NETIF_F_RXCSUM;
+
memset(&crq, 0, sizeof(crq));
crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
@@ -2210,6 +2400,16 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
dma_unmap_single(dev, adapter->login_rsp_buf_token,
adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
+ /* If the number of queues requested can't be allocated by the
+ * server, the login response will return with code 1. We will need
+ * to resend the login buffer with fewer queues requested.
+ */
+ if (login_rsp_crq->generic.rc.code) {
+ adapter->renegotiate = true;
+ complete(&adapter->init_done);
+ return 0;
+ }
+
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
netdev_dbg(adapter->netdev, "%016lx\n",
@@ -3437,14 +3637,21 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
init_completion(&adapter->init_done);
wait_for_completion(&adapter->init_done);
- /* needed to pull init_sub_crqs outside of an interrupt context
- * because it creates IRQ mappings for the subCRQ queues, causing
- * a kernel warning
- */
- init_sub_crqs(adapter, 0);
+ do {
+ adapter->renegotiate = false;
- reinit_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
+ init_sub_crqs(adapter, 0);
+ reinit_completion(&adapter->init_done);
+ wait_for_completion(&adapter->init_done);
+
+ if (adapter->renegotiate) {
+ release_sub_crqs(adapter);
+ send_cap_queries(adapter);
+
+ reinit_completion(&adapter->init_done);
+ wait_for_completion(&adapter->init_done);
+ }
+ } while (adapter->renegotiate);
/* if init_sub_crqs is partially successful, retry */
while (!adapter->tx_scrq || !adapter->rx_scrq) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 1a9993cc79b5..0b66a506a4e4 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -879,6 +879,9 @@ struct ibmvnic_tx_buff {
int pool_index;
bool last_frag;
bool used_bounce;
+ union sub_crq indir_arr[6];
+ u8 hdr_data[140];
+ dma_addr_t indir_dma;
};
struct ibmvnic_tx_pool {
@@ -977,6 +980,7 @@ struct ibmvnic_adapter {
struct ibmvnic_sub_crq_queue **tx_scrq;
struct ibmvnic_sub_crq_queue **rx_scrq;
int requested_caps;
+ bool renegotiate;
/* rx structs */
struct napi_struct *napi;