summaryrefslogtreecommitdiff
path: root/drivers/net/benet
diff options
context:
space:
mode:
authorSathya Perla <sathyap@serverengines.com>2010-06-29 04:11:17 +0400
committerDavid S. Miller <davem@davemloft.net>2010-07-01 00:26:42 +0400
commitf3eb62d2cc7da7bea4b394dd06f6bc738aa284e7 (patch)
tree2e98c0b346690eeca0ea6cad6f8f21a9e16af476 /drivers/net/benet
parent7e307c7ad5340b226966da6e564ec7f717da3adb (diff)
downloadlinux-f3eb62d2cc7da7bea4b394dd06f6bc738aa284e7.tar.xz
be2net: memory barrier fixes on IBM p7 platform
The ibm p7 architecure seems to reorder memory accesses more aggressively than previous ppc64 architectures. This requires memory barriers to ensure that rx/tx doorbells are pressed only after memory to be DMAed is written. Signed-off-by: Sathya Perla <sathyap@serverengines.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/benet')
-rw-r--r--drivers/net/benet/be_cmds.c2
-rw-r--r--drivers/net/benet/be_main.c7
2 files changed, 9 insertions, 0 deletions
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index ee1ad9693c8f..65e3260d0f08 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -25,6 +25,8 @@ static void be_mcc_notify(struct be_adapter *adapter)
val |= mccq->id & DB_MCCQ_RING_ID_MASK;
val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
+
+ wmb();
iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
}
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 01eb447f98b6..b63687956f2b 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -89,6 +89,8 @@ static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
u32 val = 0;
val |= qid & DB_RQ_RING_ID_MASK;
val |= posted << DB_RQ_NUM_POSTED_SHIFT;
+
+ wmb();
iowrite32(val, adapter->db + DB_RQ_OFFSET);
}
@@ -97,6 +99,8 @@ static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
u32 val = 0;
val |= qid & DB_TXULP_RING_ID_MASK;
val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
+
+ wmb();
iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
}
@@ -973,6 +977,7 @@ static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
return NULL;
+ rmb();
be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
queue_tail_inc(&adapter->rx_obj.cq);
@@ -1066,6 +1071,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
return NULL;
+ rmb();
be_dws_le_to_cpu(txcp, sizeof(*txcp));
txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
@@ -1113,6 +1119,7 @@ static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
if (!eqe->evt)
return NULL;
+ rmb();
eqe->evt = le32_to_cpu(eqe->evt);
queue_tail_inc(&eq_obj->q);
return eqe;