summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-10 03:04:10 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-10 03:04:10 +0300
commitc18bb396d3d261ebbb4efbc05129c5d354c541e4 (patch)
tree058a1413dd34fe4e1d9a998a43d56f3358b93e36 /drivers
parentfd3b36d275660c905da9900b078eea341847d5e4 (diff)
parenta2ac99905f1ea8b15997a6ec39af69aa28a3653b (diff)
downloadlinux-c18bb396d3d261ebbb4efbc05129c5d354c541e4.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) The sockmap code has to free socket memory on close if there is corked data, from John Fastabend. 2) Tunnel names coming from userspace need to be length validated. From Eric Dumazet. 3) arp_filter() has to take VRFs properly into account, from Miguel Fadon Perlines. 4) Fix oops in error path of tcf_bpf_init(), from Davide Caratti. 5) Missing idr_remove() in u32_delete_key(), from Cong Wang. 6) More syzbot stuff. Several use of uninitialized value fixes all over, from Eric Dumazet. 7) Do not leak kernel memory to userspace in sctp, also from Eric Dumazet. 8) Discard frames from unused ports in DSA, from Andrew Lunn. 9) Fix DMA mapping and reset/failover problems in ibmvnic, from Thomas Falcon. 10) Do not access dp83640 PHY registers prematurely after reset, from Esben Haabendal. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (46 commits) vhost-net: set packet weight of tx polling to 2 * vq size net: thunderx: rework mac addresses list to u64 array inetpeer: fix uninit-value in inet_getpeer dp83640: Ensure against premature access to PHY registers after reset devlink: convert occ_get op to separate registration ARM: dts: ls1021a: Specify TBIPA register address net/fsl_pq_mdio: Allow explicit speficition of TBIPA address ibmvnic: Do not reset CRQ for Mobility driver resets ibmvnic: Fix failover case for non-redundant configuration ibmvnic: Fix reset scheduler error handling ibmvnic: Zero used TX descriptor counter on reset ibmvnic: Fix DMA mapping mistakes tipc: use the right skb in tipc_sk_fill_sock_diag() sctp: sctp_sockaddr_af must check minimal addr length for AF_INET6 net: dsa: Discard frames from unused ports sctp: do not leak kernel memory to user space soreuseport: initialise timewait reuseport field ipv4: fix uninit-value in ip_route_output_key_hash_rcu() dccp: initialize ireq->ir_mark net: fix uninit-value in __hw_addr_add_ex() ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h7
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c28
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c50
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c146
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c67
-rw-r--r--drivers/net/hyperv/netvsc.c60
-rw-r--r--drivers/net/netdevsim/devlink.c65
-rw-r--r--drivers/net/phy/dp83640.c18
-rw-r--r--drivers/net/phy/marvell.c20
-rw-r--r--drivers/vhost/net.c8
16 files changed, 310 insertions, 195 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 5fc46c5a4f36..448d1fafc827 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -265,14 +265,9 @@ struct nicvf_drv_stats {
struct cavium_ptp;
-struct xcast_addr {
- struct list_head list;
- u64 addr;
-};
-
struct xcast_addr_list {
- struct list_head list;
int count;
+ u64 mc[];
};
struct nicvf_work {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 1e9a31fef729..707db3304396 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1929,7 +1929,7 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
work.work);
struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
union nic_mbx mbx = {};
- struct xcast_addr *xaddr, *next;
+ int idx;
if (!vf_work)
return;
@@ -1956,16 +1956,10 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
/* check if we have any specific MACs to be added to PF DMAC filter */
if (vf_work->mc) {
/* now go through kernel list of MACs and add them one by one */
- list_for_each_entry_safe(xaddr, next,
- &vf_work->mc->list, list) {
+ for (idx = 0; idx < vf_work->mc->count; idx++) {
mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
- mbx.xcast.data.mac = xaddr->addr;
+ mbx.xcast.data.mac = vf_work->mc->mc[idx];
nicvf_send_msg_to_pf(nic, &mbx);
-
- /* after receiving ACK from PF release memory */
- list_del(&xaddr->list);
- kfree(xaddr);
- vf_work->mc->count--;
}
kfree(vf_work->mc);
}
@@ -1996,17 +1990,15 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
mode |= BGX_XCAST_MCAST_FILTER;
/* here we need to copy mc addrs */
if (netdev_mc_count(netdev)) {
- struct xcast_addr *xaddr;
-
- mc_list = kmalloc(sizeof(*mc_list), GFP_ATOMIC);
- INIT_LIST_HEAD(&mc_list->list);
+ mc_list = kmalloc(offsetof(typeof(*mc_list),
+ mc[netdev_mc_count(netdev)]),
+ GFP_ATOMIC);
+ if (unlikely(!mc_list))
+ return;
+ mc_list->count = 0;
netdev_hw_addr_list_for_each(ha, &netdev->mc) {
- xaddr = kmalloc(sizeof(*xaddr),
- GFP_ATOMIC);
- xaddr->addr =
+ mc_list->mc[mc_list->count] =
ether_addr_to_u64(ha->addr);
- list_add_tail(&xaddr->list,
- &mc_list->list);
mc_list->count++;
}
}
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 80ad16acf0f1..ac2c3f6a12bc 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -377,6 +377,38 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
};
MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
+static void set_tbipa(const u32 tbipa_val, struct platform_device *pdev,
+ uint32_t __iomem * (*get_tbipa)(void __iomem *),
+ void __iomem *reg_map, struct resource *reg_res)
+{
+ struct device_node *np = pdev->dev.of_node;
+ uint32_t __iomem *tbipa;
+ bool tbipa_mapped;
+
+ tbipa = of_iomap(np, 1);
+ if (tbipa) {
+ tbipa_mapped = true;
+ } else {
+ tbipa_mapped = false;
+ tbipa = (*get_tbipa)(reg_map);
+
+ /*
+ * Add consistency check to make sure TBI is contained within
+ * the mapped range (not because we would get a segfault,
+ * rather to catch bugs in computing TBI address). Print error
+ * message but continue anyway.
+ */
+ if ((void *)tbipa > reg_map + resource_size(reg_res) - 4)
+ dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n",
+ ((void *)tbipa - reg_map) + 4);
+ }
+
+ iowrite32be(be32_to_cpu(tbipa_val), tbipa);
+
+ if (tbipa_mapped)
+ iounmap(tbipa);
+}
+
static int fsl_pq_mdio_probe(struct platform_device *pdev)
{
const struct of_device_id *id =
@@ -450,8 +482,6 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
if (tbi) {
const u32 *prop = of_get_property(tbi, "reg", NULL);
- uint32_t __iomem *tbipa;
-
if (!prop) {
dev_err(&pdev->dev,
"missing 'reg' property in node %pOF\n",
@@ -459,20 +489,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
err = -EBUSY;
goto error;
}
-
- tbipa = data->get_tbipa(priv->map);
-
- /*
- * Add consistency check to make sure TBI is contained
- * within the mapped range (not because we would get a
- * segfault, rather to catch bugs in computing TBI
- * address). Print error message but continue anyway.
- */
- if ((void *)tbipa > priv->map + resource_size(&res) - 4)
- dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n",
- ((void *)tbipa - priv->map) + 4);
-
- iowrite32be(be32_to_cpup(prop), tbipa);
+ set_tbipa(*prop, pdev,
+ data->get_tbipa, priv->map, &res);
}
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index b492af6affc3..aad5658d79d5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -118,6 +118,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
static int ibmvnic_init(struct ibmvnic_adapter *);
static void release_crq_queue(struct ibmvnic_adapter *);
static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
+static int init_crq_queue(struct ibmvnic_adapter *adapter);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -320,18 +321,16 @@ failure:
dev_info(dev, "replenish pools failure\n");
pool->free_map[pool->next_free] = index;
pool->rx_buff[index].skb = NULL;
- if (!dma_mapping_error(dev, dma_addr))
- dma_unmap_single(dev, dma_addr, pool->buff_size,
- DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
adapter->replenish_add_buff_failure++;
atomic_add(buffers_added, &pool->available);
- if (lpar_rc == H_CLOSED) {
+ if (lpar_rc == H_CLOSED || adapter->failover_pending) {
/* Disable buffer pool replenishment and report carrier off if
- * queue is closed. Firmware guarantees that a signal will
- * be sent to the driver, triggering a reset.
+ * queue is closed or pending failover.
+ * Firmware guarantees that a signal will be sent to the
+ * driver, triggering a reset.
*/
deactivate_rx_pools(adapter);
netif_carrier_off(adapter->netdev);
@@ -1071,6 +1070,14 @@ static int ibmvnic_open(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int rc;
+ /* If device failover is pending, just set device state and return.
+ * Device operation will be handled by reset routine.
+ */
+ if (adapter->failover_pending) {
+ adapter->state = VNIC_OPEN;
+ return 0;
+ }
+
mutex_lock(&adapter->reset_lock);
if (adapter->state != VNIC_CLOSED) {
@@ -1218,7 +1225,6 @@ static int __ibmvnic_close(struct net_device *netdev)
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
if (rc)
return rc;
- ibmvnic_cleanup(netdev);
adapter->state = VNIC_CLOSED;
return 0;
}
@@ -1228,8 +1234,17 @@ static int ibmvnic_close(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int rc;
+ /* If device failover is pending, just set device state and return.
+ * Device operation will be handled by reset routine.
+ */
+ if (adapter->failover_pending) {
+ adapter->state = VNIC_CLOSED;
+ return 0;
+ }
+
mutex_lock(&adapter->reset_lock);
rc = __ibmvnic_close(netdev);
+ ibmvnic_cleanup(netdev);
mutex_unlock(&adapter->reset_lock);
return rc;
@@ -1562,8 +1577,9 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
dev_kfree_skb_any(skb);
tx_buff->skb = NULL;
- if (lpar_rc == H_CLOSED) {
- /* Disable TX and report carrier off if queue is closed.
+ if (lpar_rc == H_CLOSED || adapter->failover_pending) {
+ /* Disable TX and report carrier off if queue is closed
+ * or pending failover.
* Firmware guarantees that a signal will be sent to the
* driver, triggering a reset or some other action.
*/
@@ -1711,14 +1727,10 @@ static int do_reset(struct ibmvnic_adapter *adapter,
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
- if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
- rc = ibmvnic_reenable_crq_queue(adapter);
- if (rc)
- return 0;
- ibmvnic_cleanup(netdev);
- } else if (rwi->reset_reason == VNIC_RESET_FAILOVER) {
- ibmvnic_cleanup(netdev);
- } else {
+ ibmvnic_cleanup(netdev);
+
+ if (adapter->reset_reason != VNIC_RESET_MOBILITY &&
+ adapter->reset_reason != VNIC_RESET_FAILOVER) {
rc = __ibmvnic_close(netdev);
if (rc)
return rc;
@@ -1737,6 +1749,23 @@ static int do_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
+ if (adapter->wait_for_reset) {
+ rc = init_crq_queue(adapter);
+ } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
+ rc = ibmvnic_reenable_crq_queue(adapter);
+ release_sub_crqs(adapter, 1);
+ } else {
+ rc = ibmvnic_reset_crq(adapter);
+ if (!rc)
+ rc = vio_enable_interrupts(adapter->vdev);
+ }
+
+ if (rc) {
+ netdev_err(adapter->netdev,
+ "Couldn't initialize crq. rc=%d\n", rc);
+ return rc;
+ }
+
rc = ibmvnic_init(adapter);
if (rc)
return IBMVNIC_INIT_FAILED;
@@ -1878,23 +1907,26 @@ static void __ibmvnic_reset(struct work_struct *work)
mutex_unlock(&adapter->reset_lock);
}
-static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
- enum ibmvnic_reset_reason reason)
+static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
+ enum ibmvnic_reset_reason reason)
{
struct ibmvnic_rwi *rwi, *tmp;
struct net_device *netdev = adapter->netdev;
struct list_head *entry;
+ int ret;
if (adapter->state == VNIC_REMOVING ||
- adapter->state == VNIC_REMOVED) {
- netdev_dbg(netdev, "Adapter removing, skipping reset\n");
- return;
+ adapter->state == VNIC_REMOVED ||
+ adapter->failover_pending) {
+ ret = EBUSY;
+ netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
+ goto err;
}
if (adapter->state == VNIC_PROBING) {
netdev_warn(netdev, "Adapter reset during probe\n");
- adapter->init_done_rc = EAGAIN;
- return;
+ ret = adapter->init_done_rc = EAGAIN;
+ goto err;
}
mutex_lock(&adapter->rwi_lock);
@@ -1904,7 +1936,8 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
if (tmp->reset_reason == reason) {
netdev_dbg(netdev, "Skipping matching reset\n");
mutex_unlock(&adapter->rwi_lock);
- return;
+ ret = EBUSY;
+ goto err;
}
}
@@ -1912,7 +1945,8 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
if (!rwi) {
mutex_unlock(&adapter->rwi_lock);
ibmvnic_close(netdev);
- return;
+ ret = ENOMEM;
+ goto err;
}
rwi->reset_reason = reason;
@@ -1921,6 +1955,12 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
schedule_work(&adapter->ibmvnic_reset);
+
+ return 0;
+err:
+ if (adapter->wait_for_reset)
+ adapter->wait_for_reset = false;
+ return -ret;
}
static void ibmvnic_tx_timeout(struct net_device *dev)
@@ -2055,6 +2095,8 @@ static void ibmvnic_netpoll_controller(struct net_device *dev)
static int wait_for_reset(struct ibmvnic_adapter *adapter)
{
+ int rc, ret;
+
adapter->fallback.mtu = adapter->req_mtu;
adapter->fallback.rx_queues = adapter->req_rx_queues;
adapter->fallback.tx_queues = adapter->req_tx_queues;
@@ -2062,11 +2104,15 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter)
adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
init_completion(&adapter->reset_done);
- ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
adapter->wait_for_reset = true;
+ rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
+ if (rc)
+ return rc;
wait_for_completion(&adapter->reset_done);
+ ret = 0;
if (adapter->reset_done_rc) {
+ ret = -EIO;
adapter->desired.mtu = adapter->fallback.mtu;
adapter->desired.rx_queues = adapter->fallback.rx_queues;
adapter->desired.tx_queues = adapter->fallback.tx_queues;
@@ -2074,12 +2120,15 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter)
adapter->desired.tx_entries = adapter->fallback.tx_entries;
init_completion(&adapter->reset_done);
- ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
+ adapter->wait_for_reset = true;
+ rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
+ if (rc)
+ return ret;
wait_for_completion(&adapter->reset_done);
}
adapter->wait_for_reset = false;
- return adapter->reset_done_rc;
+ return ret;
}
static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
@@ -2364,6 +2413,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
}
memset(scrq->msgs, 0, 4 * PAGE_SIZE);
+ atomic_set(&scrq->used, 0);
scrq->cur = 0;
rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
@@ -2574,7 +2624,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
union sub_crq *next;
int index;
int i, j;
- u8 first;
+ u8 *first;
restart_loop:
while (pending_scrq(adapter, scrq)) {
@@ -2605,11 +2655,12 @@ restart_loop:
txbuff->data_dma[j] = 0;
}
/* if sub_crq was sent indirectly */
- first = txbuff->indir_arr[0].generic.first;
- if (first == IBMVNIC_CRQ_CMD) {
+ first = &txbuff->indir_arr[0].generic.first;
+ if (*first == IBMVNIC_CRQ_CMD) {
dma_unmap_single(dev, txbuff->indir_dma,
sizeof(txbuff->indir_arr),
DMA_TO_DEVICE);
+ *first = 0;
}
if (txbuff->last_frag) {
@@ -3882,9 +3933,9 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
int i;
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
- DMA_BIDIRECTIONAL);
+ DMA_TO_DEVICE);
dma_unmap_single(dev, adapter->login_rsp_buf_token,
- adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
+ adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
/* If the number of queues requested can't be allocated by the
* server, the login response will return with code 1. We will need
@@ -4144,7 +4195,9 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
case IBMVNIC_CRQ_INIT:
dev_info(dev, "Partner initialized\n");
adapter->from_passive_init = true;
+ adapter->failover_pending = false;
complete(&adapter->init_done);
+ ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
break;
case IBMVNIC_CRQ_INIT_COMPLETE:
dev_info(dev, "Partner initialization complete\n");
@@ -4161,7 +4214,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
dev_info(dev, "Backing device failover detected\n");
- ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+ adapter->failover_pending = true;
} else {
/* The adapter lost the connection */
dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
@@ -4461,19 +4514,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
u64 old_num_rx_queues, old_num_tx_queues;
int rc;
- if (adapter->resetting && !adapter->wait_for_reset) {
- rc = ibmvnic_reset_crq(adapter);
- if (!rc)
- rc = vio_enable_interrupts(adapter->vdev);
- } else {
- rc = init_crq_queue(adapter);
- }
-
- if (rc) {
- dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
- return rc;
- }
-
adapter->from_passive_init = false;
old_num_rx_queues = adapter->req_rx_queues;
@@ -4498,7 +4538,8 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
return -1;
}
- if (adapter->resetting && !adapter->wait_for_reset) {
+ if (adapter->resetting && !adapter->wait_for_reset &&
+ adapter->reset_reason != VNIC_RESET_MOBILITY) {
if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues) {
release_sub_crqs(adapter, 0);
@@ -4586,6 +4627,13 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->mac_change_pending = false;
do {
+ rc = init_crq_queue(adapter);
+ if (rc) {
+ dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
+ rc);
+ goto ibmvnic_init_fail;
+ }
+
rc = ibmvnic_init(adapter);
if (rc && rc != EAGAIN)
goto ibmvnic_init_fail;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 89efe700eafe..99c0b58c2c39 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1108,6 +1108,7 @@ struct ibmvnic_adapter {
bool napi_enabled, from_passive_init;
bool mac_change_pending;
+ bool failover_pending;
struct ibmvnic_tunables desired;
struct ibmvnic_tunables fallback;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 385f5d425d19..21977ec984c4 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -468,8 +468,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp);
mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL);
- if (!mac_buf)
+ if (!mac_buf) {
+ status = ICE_ERR_NO_MEMORY;
goto err_unroll_fltr_mgmt_struct;
+ }
status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
devm_kfree(ice_hw_to_dev(hw), mac_buf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 186764a5c263..1db304c01d10 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -156,7 +156,7 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
static int ice_get_regs_len(struct net_device __always_unused *netdev)
{
- return ARRAY_SIZE(ice_regs_dump_list);
+ return sizeof(ice_regs_dump_list);
}
static void
@@ -170,7 +170,7 @@ ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
regs->version = 1;
- for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list) / sizeof(u32); ++i)
+ for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i)
regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
}
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 7fc1bbf51c44..54a038943c06 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -1604,7 +1604,7 @@ static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
{
int i;
- if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+ if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
return -EINVAL;
memset(pe, 0, sizeof(*pe));
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 53fffd09d133..ca38a30fbe91 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3805,18 +3805,6 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
},
};
-static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
-{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
-
- return mlxsw_sp_kvdl_occ_get(mlxsw_sp);
-}
-
-static const struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
- .occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
-};
-
static void
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
struct devlink_resource_size_params *kvd_size_params,
@@ -3877,8 +3865,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
kvd_size, MLXSW_SP_RESOURCE_KVD,
DEVLINK_RESOURCE_ID_PARENT_TOP,
- &kvd_size_params,
- NULL);
+ &kvd_size_params);
if (err)
return err;
@@ -3887,8 +3874,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
linear_size,
MLXSW_SP_RESOURCE_KVD_LINEAR,
MLXSW_SP_RESOURCE_KVD,
- &linear_size_params,
- &mlxsw_sp_resource_kvd_linear_ops);
+ &linear_size_params);
if (err)
return err;
@@ -3905,8 +3891,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
double_size,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
MLXSW_SP_RESOURCE_KVD,
- &hash_double_size_params,
- NULL);
+ &hash_double_size_params);
if (err)
return err;
@@ -3915,8 +3900,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
single_size,
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
MLXSW_SP_RESOURCE_KVD,
- &hash_single_size_params,
- NULL);
+ &hash_single_size_params);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 82820ba43728..804d4d2c8031 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -442,7 +442,6 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count,
unsigned int *p_alloc_size);
-u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
struct mlxsw_sp_acl_rule_info {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index 8796db44dcc3..fe4327f547d2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -315,8 +315,9 @@ static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
return occ;
}
-u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
+static u64 mlxsw_sp_kvdl_occ_get(void *priv)
{
+ const struct mlxsw_sp *mlxsw_sp = priv;
u64 occ = 0;
int i;
@@ -326,48 +327,33 @@ u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
return occ;
}
-static u64 mlxsw_sp_kvdl_single_occ_get(struct devlink *devlink)
+static u64 mlxsw_sp_kvdl_single_occ_get(void *priv)
{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE];
return mlxsw_sp_kvdl_part_occ(part);
}
-static u64 mlxsw_sp_kvdl_chunks_occ_get(struct devlink *devlink)
+static u64 mlxsw_sp_kvdl_chunks_occ_get(void *priv)
{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS];
return mlxsw_sp_kvdl_part_occ(part);
}
-static u64 mlxsw_sp_kvdl_large_chunks_occ_get(struct devlink *devlink)
+static u64 mlxsw_sp_kvdl_large_chunks_occ_get(void *priv)
{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS];
return mlxsw_sp_kvdl_part_occ(part);
}
-static const struct devlink_resource_ops mlxsw_sp_kvdl_single_ops = {
- .occ_get = mlxsw_sp_kvdl_single_occ_get,
-};
-
-static const struct devlink_resource_ops mlxsw_sp_kvdl_chunks_ops = {
- .occ_get = mlxsw_sp_kvdl_chunks_occ_get,
-};
-
-static const struct devlink_resource_ops mlxsw_sp_kvdl_chunks_large_ops = {
- .occ_get = mlxsw_sp_kvdl_large_chunks_occ_get,
-};
-
int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
@@ -386,8 +372,7 @@ int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
MLXSW_SP_KVDL_SINGLE_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params,
- &mlxsw_sp_kvdl_single_ops);
+ &size_params);
if (err)
return err;
@@ -398,8 +383,7 @@ int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
MLXSW_SP_KVDL_CHUNKS_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params,
- &mlxsw_sp_kvdl_chunks_ops);
+ &size_params);
if (err)
return err;
@@ -410,13 +394,13 @@ int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params,
- &mlxsw_sp_kvdl_chunks_large_ops);
+ &size_params);
return err;
}
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_kvdl *kvdl;
int err;
@@ -429,6 +413,23 @@ int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_kvdl_parts_init;
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ mlxsw_sp_kvdl_occ_get,
+ mlxsw_sp);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ mlxsw_sp_kvdl_single_occ_get,
+ mlxsw_sp);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ mlxsw_sp_kvdl_chunks_occ_get,
+ mlxsw_sp);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ mlxsw_sp_kvdl_large_chunks_occ_get,
+ mlxsw_sp);
+
return 0;
err_kvdl_parts_init:
@@ -438,6 +439,16 @@ err_kvdl_parts_init:
void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR);
mlxsw_sp_kvdl_parts_fini(mlxsw_sp);
kfree(mlxsw_sp->kvdl);
}
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index c9910c33e671..04f611e6f678 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -109,11 +109,11 @@ static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
call_rcu(&nvdev->rcu, free_netvsc_device);
}
-static void netvsc_revoke_buf(struct hv_device *device,
- struct netvsc_device *net_device)
+static void netvsc_revoke_recv_buf(struct hv_device *device,
+ struct netvsc_device *net_device,
+ struct net_device *ndev)
{
struct nvsp_message *revoke_packet;
- struct net_device *ndev = hv_get_drvdata(device);
int ret;
/*
@@ -157,6 +157,14 @@ static void netvsc_revoke_buf(struct hv_device *device,
}
net_device->recv_section_cnt = 0;
}
+}
+
+static void netvsc_revoke_send_buf(struct hv_device *device,
+ struct netvsc_device *net_device,
+ struct net_device *ndev)
+{
+ struct nvsp_message *revoke_packet;
+ int ret;
/* Deal with the send buffer we may have setup.
* If we got a send section size, it means we received a
@@ -202,10 +210,10 @@ static void netvsc_revoke_buf(struct hv_device *device,
}
}
-static void netvsc_teardown_gpadl(struct hv_device *device,
- struct netvsc_device *net_device)
+static void netvsc_teardown_recv_gpadl(struct hv_device *device,
+ struct netvsc_device *net_device,
+ struct net_device *ndev)
{
- struct net_device *ndev = hv_get_drvdata(device);
int ret;
if (net_device->recv_buf_gpadl_handle) {
@@ -222,6 +230,13 @@ static void netvsc_teardown_gpadl(struct hv_device *device,
}
net_device->recv_buf_gpadl_handle = 0;
}
+}
+
+static void netvsc_teardown_send_gpadl(struct hv_device *device,
+ struct netvsc_device *net_device,
+ struct net_device *ndev)
+{
+ int ret;
if (net_device->send_buf_gpadl_handle) {
ret = vmbus_teardown_gpadl(device->channel,
@@ -437,8 +452,10 @@ static int netvsc_init_buf(struct hv_device *device,
goto exit;
cleanup:
- netvsc_revoke_buf(device, net_device);
- netvsc_teardown_gpadl(device, net_device);
+ netvsc_revoke_recv_buf(device, net_device, ndev);
+ netvsc_revoke_send_buf(device, net_device, ndev);
+ netvsc_teardown_recv_gpadl(device, net_device, ndev);
+ netvsc_teardown_send_gpadl(device, net_device, ndev);
exit:
return ret;
@@ -457,7 +474,6 @@ static int negotiate_nvsp_ver(struct hv_device *device,
init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
-
trace_nvsp_send(ndev, init_packet);
/* Send the init request */
@@ -575,7 +591,17 @@ void netvsc_device_remove(struct hv_device *device)
= rtnl_dereference(net_device_ctx->nvdev);
int i;
- netvsc_revoke_buf(device, net_device);
+ /*
+ * Revoke receive buffer. If host is pre-Win2016 then tear down
+ * receive buffer GPADL. Do the same for send buffer.
+ */
+ netvsc_revoke_recv_buf(device, net_device, ndev);
+ if (vmbus_proto_version < VERSION_WIN10)
+ netvsc_teardown_recv_gpadl(device, net_device, ndev);
+
+ netvsc_revoke_send_buf(device, net_device, ndev);
+ if (vmbus_proto_version < VERSION_WIN10)
+ netvsc_teardown_send_gpadl(device, net_device, ndev);
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
@@ -589,15 +615,17 @@ void netvsc_device_remove(struct hv_device *device)
*/
netdev_dbg(ndev, "net device safe to remove\n");
- /* older versions require that buffer be revoked before close */
- if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_4)
- netvsc_teardown_gpadl(device, net_device);
-
/* Now, we can close the channel safely */
vmbus_close(device->channel);
- if (net_device->nvsp_version >= NVSP_PROTOCOL_VERSION_4)
- netvsc_teardown_gpadl(device, net_device);
+ /*
+ * If host is Win2016 or higher then we do the GPADL tear down
+ * here after VMBus is closed.
+ */
+ if (vmbus_proto_version >= VERSION_WIN10) {
+ netvsc_teardown_recv_gpadl(device, net_device, ndev);
+ netvsc_teardown_send_gpadl(device, net_device, ndev);
+ }
/* Release all resources */
free_netvsc_device_rcu(net_device);
diff --git a/drivers/net/netdevsim/devlink.c b/drivers/net/netdevsim/devlink.c
index 1dba47936456..bef7db5d129a 100644
--- a/drivers/net/netdevsim/devlink.c
+++ b/drivers/net/netdevsim/devlink.c
@@ -30,52 +30,36 @@ static struct net *nsim_devlink_net(struct devlink *devlink)
/* IPv4
*/
-static u64 nsim_ipv4_fib_resource_occ_get(struct devlink *devlink)
+static u64 nsim_ipv4_fib_resource_occ_get(void *priv)
{
- struct net *net = nsim_devlink_net(devlink);
+ struct net *net = priv;
return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false);
}
-static struct devlink_resource_ops nsim_ipv4_fib_res_ops = {
- .occ_get = nsim_ipv4_fib_resource_occ_get,
-};
-
-static u64 nsim_ipv4_fib_rules_res_occ_get(struct devlink *devlink)
+static u64 nsim_ipv4_fib_rules_res_occ_get(void *priv)
{
- struct net *net = nsim_devlink_net(devlink);
+ struct net *net = priv;
return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false);
}
-static struct devlink_resource_ops nsim_ipv4_fib_rules_res_ops = {
- .occ_get = nsim_ipv4_fib_rules_res_occ_get,
-};
-
/* IPv6
*/
-static u64 nsim_ipv6_fib_resource_occ_get(struct devlink *devlink)
+static u64 nsim_ipv6_fib_resource_occ_get(void *priv)
{
- struct net *net = nsim_devlink_net(devlink);
+ struct net *net = priv;
return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false);
}
-static struct devlink_resource_ops nsim_ipv6_fib_res_ops = {
- .occ_get = nsim_ipv6_fib_resource_occ_get,
-};
-
-static u64 nsim_ipv6_fib_rules_res_occ_get(struct devlink *devlink)
+static u64 nsim_ipv6_fib_rules_res_occ_get(void *priv)
{
- struct net *net = nsim_devlink_net(devlink);
+ struct net *net = priv;
return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false);
}
-static struct devlink_resource_ops nsim_ipv6_fib_rules_res_ops = {
- .occ_get = nsim_ipv6_fib_rules_res_occ_get,
-};
-
static int devlink_resources_register(struct devlink *devlink)
{
struct devlink_resource_size_params params = {
@@ -91,7 +75,7 @@ static int devlink_resources_register(struct devlink *devlink)
err = devlink_resource_register(devlink, "IPv4", (u64)-1,
NSIM_RESOURCE_IPV4,
DEVLINK_RESOURCE_ID_PARENT_TOP,
- &params, NULL);
+ &params);
if (err) {
pr_err("Failed to register IPv4 top resource\n");
goto out;
@@ -100,8 +84,7 @@ static int devlink_resources_register(struct devlink *devlink)
n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true);
err = devlink_resource_register(devlink, "fib", n,
NSIM_RESOURCE_IPV4_FIB,
- NSIM_RESOURCE_IPV4,
- &params, &nsim_ipv4_fib_res_ops);
+ NSIM_RESOURCE_IPV4, &params);
if (err) {
pr_err("Failed to register IPv4 FIB resource\n");
return err;
@@ -110,8 +93,7 @@ static int devlink_resources_register(struct devlink *devlink)
n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true);
err = devlink_resource_register(devlink, "fib-rules", n,
NSIM_RESOURCE_IPV4_FIB_RULES,
- NSIM_RESOURCE_IPV4,
- &params, &nsim_ipv4_fib_rules_res_ops);
+ NSIM_RESOURCE_IPV4, &params);
if (err) {
pr_err("Failed to register IPv4 FIB rules resource\n");
return err;
@@ -121,7 +103,7 @@ static int devlink_resources_register(struct devlink *devlink)
err = devlink_resource_register(devlink, "IPv6", (u64)-1,
NSIM_RESOURCE_IPV6,
DEVLINK_RESOURCE_ID_PARENT_TOP,
- &params, NULL);
+ &params);
if (err) {
pr_err("Failed to register IPv6 top resource\n");
goto out;
@@ -130,8 +112,7 @@ static int devlink_resources_register(struct devlink *devlink)
n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true);
err = devlink_resource_register(devlink, "fib", n,
NSIM_RESOURCE_IPV6_FIB,
- NSIM_RESOURCE_IPV6,
- &params, &nsim_ipv6_fib_res_ops);
+ NSIM_RESOURCE_IPV6, &params);
if (err) {
pr_err("Failed to register IPv6 FIB resource\n");
return err;
@@ -140,12 +121,28 @@ static int devlink_resources_register(struct devlink *devlink)
n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true);
err = devlink_resource_register(devlink, "fib-rules", n,
NSIM_RESOURCE_IPV6_FIB_RULES,
- NSIM_RESOURCE_IPV6,
- &params, &nsim_ipv6_fib_rules_res_ops);
+ NSIM_RESOURCE_IPV6, &params);
if (err) {
pr_err("Failed to register IPv6 FIB rules resource\n");
return err;
}
+
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB,
+ nsim_ipv4_fib_resource_occ_get,
+ net);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ nsim_ipv4_fib_rules_res_occ_get,
+ net);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB,
+ nsim_ipv6_fib_resource_occ_get,
+ net);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+ nsim_ipv6_fib_rules_res_occ_get,
+ net);
out:
return err;
}
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 654f42d00092..a6c87793d899 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1207,6 +1207,23 @@ static void dp83640_remove(struct phy_device *phydev)
kfree(dp83640);
}
+static int dp83640_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_soft_reset(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* From DP83640 datasheet: "Software driver code must wait 3 us
+ * following a software reset before allowing further serial MII
+ * operations with the DP83640."
+ */
+ udelay(10); /* Taking udelay inaccuracy into account */
+
+ return 0;
+}
+
static int dp83640_config_init(struct phy_device *phydev)
{
struct dp83640_private *dp83640 = phydev->priv;
@@ -1501,6 +1518,7 @@ static struct phy_driver dp83640_driver = {
.flags = PHY_HAS_INTERRUPT,
.probe = dp83640_probe,
.remove = dp83640_remove,
+ .soft_reset = dp83640_soft_reset,
.config_init = dp83640_config_init,
.ack_interrupt = dp83640_ack_interrupt,
.config_intr = dp83640_config_intr,
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index a75c511950c3..c22e8e383247 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -828,6 +828,22 @@ static int m88e1121_config_init(struct phy_device *phydev)
return marvell_config_init(phydev);
}
+static int m88e1318_config_init(struct phy_device *phydev)
+{
+ if (phy_interrupt_is_valid(phydev)) {
+ int err = phy_modify_paged(
+ phydev, MII_MARVELL_LED_PAGE,
+ MII_88E1318S_PHY_LED_TCR,
+ MII_88E1318S_PHY_LED_TCR_FORCE_INT,
+ MII_88E1318S_PHY_LED_TCR_INTn_ENABLE |
+ MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW);
+ if (err < 0)
+ return err;
+ }
+
+ return m88e1121_config_init(phydev);
+}
+
static int m88e1510_config_init(struct phy_device *phydev)
{
int err;
@@ -870,7 +886,7 @@ static int m88e1510_config_init(struct phy_device *phydev)
phydev->advertising &= ~pause;
}
- return m88e1121_config_init(phydev);
+ return m88e1318_config_init(phydev);
}
static int m88e1118_config_aneg(struct phy_device *phydev)
@@ -2086,7 +2102,7 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
- .config_init = &m88e1121_config_init,
+ .config_init = &m88e1318_config_init,
.config_aneg = &m88e1318_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index edc6fec9ad84..986058a57917 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -44,6 +44,10 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
* Using this limit prevents one virtqueue from starving others. */
#define VHOST_NET_WEIGHT 0x80000
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving rx. */
+#define VHOST_NET_PKT_WEIGHT(vq) ((vq)->num * 2)
+
/* MAX number of TX used buffers for outstanding zerocopy */
#define VHOST_MAX_PEND 128
#define VHOST_GOODCOPY_LEN 256
@@ -473,6 +477,7 @@ static void handle_tx(struct vhost_net *net)
struct socket *sock;
struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
bool zcopy, zcopy_used;
+ int sent_pkts = 0;
mutex_lock(&vq->mutex);
sock = vq->private_data;
@@ -580,7 +585,8 @@ static void handle_tx(struct vhost_net *net)
else
vhost_zerocopy_signal_used(net, vq);
vhost_net_tx_packet(net);
- if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
+ if (unlikely(total_len >= VHOST_NET_WEIGHT) ||
+ unlikely(++sent_pkts >= VHOST_NET_PKT_WEIGHT(vq))) {
vhost_poll_queue(&vq->poll);
break;
}