summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/ibm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/ibm')
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c27
-rw-r--r--drivers/net/ethernet/ibm/emac/emac.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c54
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c280
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h6
5 files changed, 267 insertions, 102 deletions
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index ea55314b209d..d5df131b183c 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2618,10 +2618,8 @@ static int ehea_restart_qps(struct net_device *dev)
u16 dummy16 = 0;
cb0 = (void *)get_zeroed_page(GFP_KERNEL);
- if (!cb0) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!cb0)
+ return -ENOMEM;
for (i = 0; i < (port->num_def_qps); i++) {
struct ehea_port_res *pr = &port->port_res[i];
@@ -2641,6 +2639,7 @@ static int ehea_restart_qps(struct net_device *dev)
cb0);
if (hret != H_SUCCESS) {
netdev_err(dev, "query_ehea_qp failed (1)\n");
+ ret = -EFAULT;
goto out;
}
@@ -2653,6 +2652,7 @@ static int ehea_restart_qps(struct net_device *dev)
&dummy64, &dummy16, &dummy16);
if (hret != H_SUCCESS) {
netdev_err(dev, "modify_ehea_qp failed (1)\n");
+ ret = -EFAULT;
goto out;
}
@@ -2661,6 +2661,7 @@ static int ehea_restart_qps(struct net_device *dev)
cb0);
if (hret != H_SUCCESS) {
netdev_err(dev, "query_ehea_qp failed (2)\n");
+ ret = -EFAULT;
goto out;
}
@@ -2867,14 +2868,14 @@ out:
return ret;
}
-static ssize_t ehea_show_port_id(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t log_port_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
return sprintf(buf, "%d", port->logical_port_id);
}
-static DEVICE_ATTR(log_port_id, 0444, ehea_show_port_id, NULL);
+static DEVICE_ATTR_RO(log_port_id);
static void logical_port_release(struct device *dev)
{
@@ -3113,7 +3114,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
return NULL;
}
-static ssize_t ehea_probe_port(struct device *dev,
+static ssize_t probe_port_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3168,9 +3169,9 @@ static ssize_t ehea_probe_port(struct device *dev,
return (ssize_t) count;
}
-static ssize_t ehea_remove_port(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t remove_port_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct ehea_adapter *adapter = dev_get_drvdata(dev);
struct ehea_port *port;
@@ -3203,8 +3204,8 @@ static ssize_t ehea_remove_port(struct device *dev,
return (ssize_t) count;
}
-static DEVICE_ATTR(probe_port, 0200, NULL, ehea_probe_port);
-static DEVICE_ATTR(remove_port, 0200, NULL, ehea_remove_port);
+static DEVICE_ATTR_WO(probe_port);
+static DEVICE_ATTR_WO(remove_port);
static int ehea_create_device_sysfs(struct platform_device *dev)
{
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
index aa9f651288d5..09d3ac374b2d 100644
--- a/drivers/net/ethernet/ibm/emac/emac.h
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -77,7 +77,7 @@ struct emac_regs {
struct {
u32 rsvd1;
u32 revid;
- u32 rsvd2[2];
+ u32 rsvd2[2];
u32 iaht1; /* Reset, R */
u32 iaht2; /* Reset, R */
u32 iaht3; /* Reset, R */
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 7fea9ae60f13..737ba85e409f 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1285,36 +1285,41 @@ static void ibmveth_rx_csum_helper(struct sk_buff *skb,
iph_proto = iph6->nexthdr;
}
- /* In OVS environment, when a flow is not cached, specifically for a
- * new TCP connection, the first packet information is passed up
+ /* When CSO is enabled the TCP checksum may have be set to NULL by
+ * the sender given that we zeroed out TCP checksum field in
+ * transmit path (refer ibmveth_start_xmit routine). In this case set
+ * up CHECKSUM_PARTIAL. If the packet is forwarded, the checksum will
+ * then be recalculated by the destination NIC (CSO must be enabled
+ * on the destination NIC).
+ *
+ * In an OVS environment, when a flow is not cached, specifically for a
+ * new TCP connection, the first packet information is passed up to
* the user space for finding a flow. During this process, OVS computes
* checksum on the first packet when CHECKSUM_PARTIAL flag is set.
*
- * Given that we zeroed out TCP checksum field in transmit path
- * (refer ibmveth_start_xmit routine) as we set "no checksum bit",
- * OVS computed checksum will be incorrect w/o TCP pseudo checksum
- * in the packet. This leads to OVS dropping the packet and hence
- * TCP retransmissions are seen.
- *
- * So, re-compute TCP pseudo header checksum.
+ * So, re-compute TCP pseudo header checksum when configured for
+ * trunk mode.
*/
- if (iph_proto == IPPROTO_TCP && adapter->is_active_trunk) {
+ if (iph_proto == IPPROTO_TCP) {
struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
-
- tcphdrlen = skb->len - iphlen;
-
- /* Recompute TCP pseudo header checksum */
- if (skb_proto == ETH_P_IP)
- tcph->check = ~csum_tcpudp_magic(iph->saddr,
+ if (tcph->check == 0x0000) {
+ /* Recompute TCP pseudo header checksum */
+ if (adapter->is_active_trunk) {
+ tcphdrlen = skb->len - iphlen;
+ if (skb_proto == ETH_P_IP)
+ tcph->check =
+ ~csum_tcpudp_magic(iph->saddr,
iph->daddr, tcphdrlen, iph_proto, 0);
- else if (skb_proto == ETH_P_IPV6)
- tcph->check = ~csum_ipv6_magic(&iph6->saddr,
+ else if (skb_proto == ETH_P_IPV6)
+ tcph->check =
+ ~csum_ipv6_magic(&iph6->saddr,
&iph6->daddr, tcphdrlen, iph_proto, 0);
-
- /* Setup SKB fields for checksum offload */
- skb_partial_csum_set(skb, iphlen,
- offsetof(struct tcphdr, check));
- skb_reset_network_header(skb);
+ }
+ /* Setup SKB fields for checksum offload */
+ skb_partial_csum_set(skb, iphlen,
+ offsetof(struct tcphdr, check));
+ skb_reset_network_header(skb);
+ }
}
}
@@ -1799,8 +1804,7 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
struct ibmveth_buff_pool *pool = container_of(kobj,
struct ibmveth_buff_pool,
kobj);
- struct net_device *netdev = dev_get_drvdata(
- container_of(kobj->parent, struct device, kobj));
+ struct net_device *netdev = dev_get_drvdata(kobj_to_dev(kobj->parent));
struct ibmveth_adapter *adapter = netdev_priv(netdev);
long value = simple_strtol(buf, NULL, 10);
long rc;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5788bb956d73..374a75d4faea 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -95,7 +95,7 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
struct ibmvnic_sub_crq_queue *);
static int ibmvnic_poll(struct napi_struct *napi, int data);
static void send_query_map(struct ibmvnic_adapter *adapter);
-static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
+static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
static int send_request_unmap(struct ibmvnic_adapter *, u8);
static int send_login(struct ibmvnic_adapter *adapter);
static void send_query_cap(struct ibmvnic_adapter *adapter);
@@ -106,6 +106,8 @@ static void release_crq_queue(struct ibmvnic_adapter *);
static int __ibmvnic_set_mac(struct net_device *, u8 *);
static int init_crq_queue(struct ibmvnic_adapter *adapter);
static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
+static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_sub_crq_queue *tx_scrq);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -141,6 +143,29 @@ static const struct ibmvnic_stat ibmvnic_stats[] = {
{"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
};
+static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
+{
+ union ibmvnic_crq crq;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
+ crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
+
+ return ibmvnic_send_crq(adapter, &crq);
+}
+
+static int send_version_xchg(struct ibmvnic_adapter *adapter)
+{
+ union ibmvnic_crq crq;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.version_exchange.first = IBMVNIC_CRQ_CMD;
+ crq.version_exchange.cmd = VERSION_EXCHANGE;
+ crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
+
+ return ibmvnic_send_crq(adapter, &crq);
+}
+
static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
unsigned long length, unsigned long *number,
unsigned long *irq)
@@ -209,12 +234,11 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
mutex_lock(&adapter->fw_lock);
adapter->fw_done_rc = 0;
reinit_completion(&adapter->fw_done);
- rc = send_request_map(adapter, ltb->addr,
- ltb->size, ltb->map_id);
+
+ rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
if (rc) {
- dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
- mutex_unlock(&adapter->fw_lock);
- return rc;
+ dev_err(dev, "send_request_map failed, rc = %d\n", rc);
+ goto out;
}
rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
@@ -222,20 +246,23 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
dev_err(dev,
"Long term map request aborted or timed out,rc = %d\n",
rc);
- dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
- mutex_unlock(&adapter->fw_lock);
- return rc;
+ goto out;
}
if (adapter->fw_done_rc) {
dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
adapter->fw_done_rc);
+ rc = -1;
+ goto out;
+ }
+ rc = 0;
+out:
+ if (rc) {
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
- mutex_unlock(&adapter->fw_lock);
- return -1;
+ ltb->buff = NULL;
}
mutex_unlock(&adapter->fw_lock);
- return 0;
+ return rc;
}
static void free_long_term_buff(struct ibmvnic_adapter *adapter,
@@ -255,14 +282,44 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
adapter->reset_reason != VNIC_RESET_TIMEOUT)
send_request_unmap(adapter, ltb->map_id);
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+ ltb->buff = NULL;
+ ltb->map_id = 0;
}
-static int reset_long_term_buff(struct ibmvnic_long_term_buff *ltb)
+static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_long_term_buff *ltb)
{
- if (!ltb->buff)
- return -EINVAL;
+ struct device *dev = &adapter->vdev->dev;
+ int rc;
memset(ltb->buff, 0, ltb->size);
+
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
+
+ reinit_completion(&adapter->fw_done);
+ rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
+ if (rc) {
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
+
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
+ if (rc) {
+ dev_info(dev,
+ "Reset failed, long term map request timed out or aborted\n");
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
+
+ if (adapter->fw_done_rc) {
+ dev_info(dev,
+ "Reset failed, attempting to free and reallocate buffer\n");
+ free_long_term_buff(adapter, ltb);
+ mutex_unlock(&adapter->fw_lock);
+ return alloc_long_term_buff(adapter, ltb, ltb->size);
+ }
+ mutex_unlock(&adapter->fw_lock);
return 0;
}
@@ -298,7 +355,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
rx_scrq = adapter->rx_scrq[pool->index];
ind_bufp = &rx_scrq->ind_buf;
- for (i = 0; i < count; ++i) {
+
+ /* netdev_skb_alloc() could have failed after we saved a few skbs
+ * in the indir_buf and we would not have sent them to VIOS yet.
+ * To account for them, start the loop at ind_bufp->index rather
+ * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
+ * be 0.
+ */
+ for (i = ind_bufp->index; i < count; ++i) {
skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
if (!skb) {
dev_err(dev, "Couldn't replenish rx buff\n");
@@ -484,7 +548,8 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
rx_pool->size *
rx_pool->buff_size);
} else {
- rc = reset_long_term_buff(&rx_pool->long_term_buff);
+ rc = reset_long_term_buff(adapter,
+ &rx_pool->long_term_buff);
}
if (rc)
@@ -607,11 +672,12 @@ static int init_rx_pools(struct net_device *netdev)
return 0;
}
-static int reset_one_tx_pool(struct ibmvnic_tx_pool *tx_pool)
+static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_tx_pool *tx_pool)
{
int rc, i;
- rc = reset_long_term_buff(&tx_pool->long_term_buff);
+ rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
if (rc)
return rc;
@@ -638,10 +704,11 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
tx_scrqs = adapter->num_active_tx_pools;
for (i = 0; i < tx_scrqs; i++) {
- rc = reset_one_tx_pool(&adapter->tso_pool[i]);
+ ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
+ rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
if (rc)
return rc;
- rc = reset_one_tx_pool(&adapter->tx_pool[i]);
+ rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
if (rc)
return rc;
}
@@ -734,8 +801,11 @@ static int init_tx_pools(struct net_device *netdev)
adapter->tso_pool = kcalloc(tx_subcrqs,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
- if (!adapter->tso_pool)
+ if (!adapter->tso_pool) {
+ kfree(adapter->tx_pool);
+ adapter->tx_pool = NULL;
return -1;
+ }
adapter->num_active_tx_pools = tx_subcrqs;
@@ -846,9 +916,10 @@ static const char *adapter_state_to_string(enum vnic_state state)
return "REMOVING";
case VNIC_REMOVED:
return "REMOVED";
- default:
- return "UNKNOWN";
+ case VNIC_DOWN:
+ return "DOWN";
}
+ return "UNKNOWN";
}
static int ibmvnic_login(struct net_device *netdev)
@@ -1180,6 +1251,11 @@ static int __ibmvnic_open(struct net_device *netdev)
netif_tx_start_all_queues(netdev);
+ if (prev_state == VNIC_CLOSED) {
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ napi_schedule(&adapter->napi[i]);
+ }
+
adapter->state = VNIC_OPEN;
return rc;
}
@@ -1502,7 +1578,8 @@ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
/**
* build_hdr_descs_arr - build a header descriptor array
- * @txbuff: tx buffer
+ * @skb: tx socket buffer
+ * @indir_arr: indirect array
* @num_entries: number of descriptors to be sent
* @hdr_field: bit field determining which headers will be sent
*
@@ -1583,7 +1660,8 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
ind_bufp->index = 0;
if (atomic_sub_return(entries, &tx_scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
- __netif_subqueue_stopped(adapter->netdev, queue_num)) {
+ __netif_subqueue_stopped(adapter->netdev, queue_num) &&
+ !test_bit(0, &adapter->resetting)) {
netif_wake_subqueue(adapter->netdev, queue_num);
netdev_dbg(adapter->netdev, "Started queue %d\n",
queue_num);
@@ -1676,7 +1754,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_send_failed++;
tx_dropped++;
ret = NETDEV_TX_OK;
- ibmvnic_tx_scrq_flush(adapter, tx_scrq);
goto out;
}
@@ -1946,9 +2023,10 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
return "TIMEOUT";
case VNIC_RESET_CHANGE_PARAM:
return "CHANGE_PARAM";
- default:
- return "UNKNOWN";
+ case VNIC_RESET_PASSIVE_INIT:
+ return "PASSIVE_INIT";
}
+ return "UNKNOWN";
}
/*
@@ -2085,10 +2163,10 @@ static int do_reset(struct ibmvnic_adapter *adapter,
goto out;
}
- /* If the adapter was in PROBE state prior to the reset,
+ /* If the adapter was in PROBE or DOWN state prior to the reset,
* exit here.
*/
- if (reset_state == VNIC_PROBED) {
+ if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
rc = 0;
goto out;
}
@@ -2214,10 +2292,10 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
if (rc)
goto out;
- /* If the adapter was in PROBE state prior to the reset,
+ /* If the adapter was in PROBE or DOWN state prior to the reset,
* exit here.
*/
- if (reset_state == VNIC_PROBED)
+ if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
goto out;
rc = ibmvnic_login(netdev);
@@ -2270,6 +2348,76 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
return rwi;
}
+/**
+ * do_passive_init - complete probing when partner device is detected.
+ * @adapter: ibmvnic_adapter struct
+ *
+ * If the ibmvnic device does not have a partner device to communicate with at boot
+ * and that partner device comes online at a later time, this function is called
+ * to complete the initialization process of ibmvnic device.
+ * Caller is expected to hold rtnl_lock().
+ *
+ * Returns non-zero if sub-CRQs are not initialized properly leaving the device
+ * in the down state.
+ * Returns 0 upon success and the device is in PROBED state.
+ */
+
+static int do_passive_init(struct ibmvnic_adapter *adapter)
+{
+ unsigned long timeout = msecs_to_jiffies(30000);
+ struct net_device *netdev = adapter->netdev;
+ struct device *dev = &adapter->vdev->dev;
+ int rc;
+
+ netdev_dbg(netdev, "Partner device found, probing.\n");
+
+ adapter->state = VNIC_PROBING;
+ reinit_completion(&adapter->init_done);
+ adapter->init_done_rc = 0;
+ adapter->crq.active = true;
+
+ rc = send_crq_init_complete(adapter);
+ if (rc)
+ goto out;
+
+ rc = send_version_xchg(adapter);
+ if (rc)
+ netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
+
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+ dev_err(dev, "Initialization sequence timed out\n");
+ rc = -ETIMEDOUT;
+ goto out;
+ }
+
+ rc = init_sub_crqs(adapter);
+ if (rc) {
+ dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = init_sub_crq_irqs(adapter);
+ if (rc) {
+ dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
+ goto init_failed;
+ }
+
+ netdev->mtu = adapter->req_mtu - ETH_HLEN;
+ netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
+ netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
+
+ adapter->state = VNIC_PROBED;
+ netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
+
+ return 0;
+
+init_failed:
+ release_sub_crqs(adapter, 1);
+out:
+ adapter->state = VNIC_DOWN;
+ return rc;
+}
+
static void __ibmvnic_reset(struct work_struct *work)
{
struct ibmvnic_rwi *rwi;
@@ -2306,7 +2454,13 @@ static void __ibmvnic_reset(struct work_struct *work)
}
spin_unlock_irqrestore(&adapter->state_lock, flags);
- if (adapter->force_reset_recovery) {
+ if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
+ rtnl_lock();
+ rc = do_passive_init(adapter);
+ rtnl_unlock();
+ if (!rc)
+ netif_carrier_on(adapter->netdev);
+ } else if (adapter->force_reset_recovery) {
/* Since we are doing a hard reset now, clear the
* failover_pending flag so we don't ignore any
* future MOBILITY or other resets.
@@ -2402,8 +2556,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
goto err;
}
- list_for_each(entry, &adapter->rwi_list) {
- tmp = list_entry(entry, struct ibmvnic_rwi, list);
+ list_for_each_entry(tmp, &adapter->rwi_list, list) {
if (tmp->reset_reason == reason) {
netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
reset_reason_to_string(reason));
@@ -3140,6 +3293,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
i);
+ ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
if (adapter->tx_scrq[i]->irq) {
free_irq(adapter->tx_scrq[i]->irq,
adapter->tx_scrq[i]);
@@ -3213,7 +3367,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
/* H_EOI would fail with rc = H_FUNCTION when running
* in XIVE mode which is expected, but not an error.
*/
- if (rc && rc != H_FUNCTION)
+ if (rc && (rc != H_FUNCTION))
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
val, rc);
}
@@ -3776,18 +3930,6 @@ static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
return 0;
}
-static int send_version_xchg(struct ibmvnic_adapter *adapter)
-{
- union ibmvnic_crq crq;
-
- memset(&crq, 0, sizeof(crq));
- crq.version_exchange.first = IBMVNIC_CRQ_CMD;
- crq.version_exchange.cmd = VERSION_EXCHANGE;
- crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
-
- return ibmvnic_send_crq(adapter, &crq);
-}
-
struct vnic_login_client_data {
u8 type;
__be16 len;
@@ -3820,21 +3962,21 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
vlcd->type = 1;
len = strlen(os_name) + 1;
vlcd->len = cpu_to_be16(len);
- strncpy(vlcd->name, os_name, len);
+ strscpy(vlcd->name, os_name, len);
vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
/* Type 2 - LPAR name */
vlcd->type = 2;
len = strlen(utsname()->nodename) + 1;
vlcd->len = cpu_to_be16(len);
- strncpy(vlcd->name, utsname()->nodename, len);
+ strscpy(vlcd->name, utsname()->nodename, len);
vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
/* Type 3 - device name */
vlcd->type = 3;
len = strlen(adapter->netdev->name) + 1;
vlcd->len = cpu_to_be16(len);
- strncpy(vlcd->name, adapter->netdev->name, len);
+ strscpy(vlcd->name, adapter->netdev->name, len);
}
static int send_login(struct ibmvnic_adapter *adapter)
@@ -4301,7 +4443,7 @@ static void handle_vpd_rsp(union ibmvnic_crq *crq,
complete:
if (adapter->fw_version[0] == '\0')
- strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
+ strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
complete(&adapter->fw_done);
}
@@ -4907,7 +5049,12 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
complete(&adapter->init_done);
adapter->init_done_rc = -EIO;
}
- rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+
+ if (adapter->state == VNIC_DOWN)
+ rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
+ else
+ rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+
if (rc && rc != -EBUSY) {
/* We were unable to schedule the failover
* reset either because the adapter was still
@@ -5330,6 +5477,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
unsigned char *mac_addr_p;
+ bool init_success;
int rc;
dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
@@ -5376,6 +5524,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
init_completion(&adapter->stats_done);
clear_bit(0, &adapter->resetting);
+ init_success = false;
do {
rc = init_crq_queue(adapter);
if (rc) {
@@ -5385,10 +5534,16 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
rc = ibmvnic_reset_init(adapter, false);
- if (rc && rc != EAGAIN)
- goto ibmvnic_init_fail;
} while (rc == EAGAIN);
+ /* We are ignoring the error from ibmvnic_reset_init() assuming that the
+ * partner is not ready. CRQ is not active. When the partner becomes
+ * ready, we will do the passive init reset.
+ */
+
+ if (!rc)
+ init_success = true;
+
rc = init_stats_buffers(adapter);
if (rc)
goto ibmvnic_init_fail;
@@ -5397,10 +5552,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
if (rc)
goto ibmvnic_stats_fail;
- netdev->mtu = adapter->req_mtu - ETH_HLEN;
- netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
- netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
-
rc = device_create_file(&dev->dev, &dev_attr_failover);
if (rc)
goto ibmvnic_dev_file_err;
@@ -5413,7 +5564,14 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
dev_info(&dev->dev, "ibmvnic registered\n");
- adapter->state = VNIC_PROBED;
+ if (init_success) {
+ adapter->state = VNIC_PROBED;
+ netdev->mtu = adapter->req_mtu - ETH_HLEN;
+ netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
+ netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
+ } else {
+ adapter->state = VNIC_DOWN;
+ }
adapter->wait_for_reset = false;
adapter->last_reset_time = jiffies;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index c1d39a748546..22df602323bc 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -851,14 +851,16 @@ enum vnic_state {VNIC_PROBING = 1,
VNIC_CLOSING,
VNIC_CLOSED,
VNIC_REMOVING,
- VNIC_REMOVED};
+ VNIC_REMOVED,
+ VNIC_DOWN};
enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
VNIC_RESET_MOBILITY,
VNIC_RESET_FATAL,
VNIC_RESET_NON_FATAL,
VNIC_RESET_TIMEOUT,
- VNIC_RESET_CHANGE_PARAM};
+ VNIC_RESET_CHANGE_PARAM,
+ VNIC_RESET_PASSIVE_INIT};
struct ibmvnic_rwi {
enum ibmvnic_reset_reason reset_reason;