summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/qlogic/qed/qed_sriov.h
diff options
context:
space:
mode:
authorMintz, Yuval <Yuval.Mintz@cavium.com>2017-02-27 12:06:32 +0300
committerDavid S. Miller <davem@davemloft.net>2017-02-27 17:22:09 +0300
commitfd3c615ae13a853ab26211e14f70b416cc8f8134 (patch)
treecf7f5adebaf75054e3aec992947b64bec1a8ddcc /drivers/net/ethernet/qlogic/qed/qed_sriov.h
parent4ca257eed6adf58d325c39c320a06dbcd34c43db (diff)
downloadlinux-fd3c615ae13a853ab26211e14f70b416cc8f8134.tar.xz
qed: Fix race with multiple VFs
A PF syncronizes all IOV activity relating to its VFs by using a single workqueue handling the work. The workqueue would reach a bitmask of pending VF events and act upon each in turn. Problem is that the indication of a VF message [which sets the 'vf event' bit for that VF] arrives and is set in the slowpath attention context, which isn't syncronized with the processing of the events. When multiple VFs are present, it's possible that PF would lose the indication of one of the VF's pending evens, leading that VF to later timeout. Instead of adding locks/barriers, simply move from a bitmask into a per-VF indication inside that VF entry in the PF database. Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed/qed_sriov.h')
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index fc08cc2da6a7..a89605821522 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -140,6 +140,9 @@ struct qed_iov_vf_mbx {
/* Address in VF where a pending message is located */
dma_addr_t pending_req;
+ /* Message from VF awaits handling */
+ bool b_pending_msg;
+
u8 *offset;
/* saved VF request header */
@@ -232,7 +235,6 @@ struct qed_vf_info {
*/
struct qed_pf_iov {
struct qed_vf_info vfs_array[MAX_NUM_VFS];
- u64 pending_events[QED_VF_ARRAY_LENGTH];
u64 pending_flr[QED_VF_ARRAY_LENGTH];
/* Allocate message address continuosuly and split to each VF */