summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw/rxe
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-12-17 17:23:42 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2022-12-17 17:23:42 +0300
commited56954cf5a8b7abb530676a073d14f9de661d69 (patch)
tree4b7dcd9a516a941ef72564387548714e577b45aa /drivers/infiniband/sw/rxe
parent6830d50325ee27fbf0150f77cbec1ed304a5b8f6 (diff)
parent5244ca88671a1981ceec09c5c8809f003e6a62aa (diff)
downloadlinux-ed56954cf5a8b7abb530676a073d14f9de661d69.tar.xz
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Jason Gunthorpe: "Fix two build warnings on 32 bit platforms It seems the linux-next CI and 0-day bot are not testing enough 32 bit configurations, as soon as you merged the rdma pull request there were two instant reports of warnings on these sytems that I would have thought should have been covered by time in linux-next Anyhow, here are the fixes so people don't hit problems with -Werror" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/siw: Fix pointer cast warning RDMA/rxe: Fix compile warnings on 32-bit
Diffstat (limited to 'drivers/infiniband/sw/rxe')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c72
1 files changed, 40 insertions, 32 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 7a60c7709da0..c74972244f08 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -785,53 +785,61 @@ out:
return ret;
}
-static enum resp_states atomic_write_reply(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt)
+#ifdef CONFIG_64BIT
+static enum resp_states do_atomic_write(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
{
- u64 src, *dst;
- struct resp_res *res = qp->resp.res;
struct rxe_mr *mr = qp->resp.mr;
int payload = payload_size(pkt);
+ u64 src, *dst;
- if (!res) {
- res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK);
- qp->resp.res = res;
- }
-
- if (!res->replay) {
-#ifdef CONFIG_64BIT
- if (mr->state != RXE_MR_STATE_VALID)
- return RESPST_ERR_RKEY_VIOLATION;
-
- memcpy(&src, payload_addr(pkt), payload);
+ if (mr->state != RXE_MR_STATE_VALID)
+ return RESPST_ERR_RKEY_VIOLATION;
- dst = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, payload);
- /* check vaddr is 8 bytes aligned. */
- if (!dst || (uintptr_t)dst & 7)
- return RESPST_ERR_MISALIGNED_ATOMIC;
+ memcpy(&src, payload_addr(pkt), payload);
- /* Do atomic write after all prior operations have completed */
- smp_store_release(dst, src);
+ dst = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, payload);
+ /* check vaddr is 8 bytes aligned. */
+ if (!dst || (uintptr_t)dst & 7)
+ return RESPST_ERR_MISALIGNED_ATOMIC;
- /* decrease resp.resid to zero */
- qp->resp.resid -= sizeof(payload);
+ /* Do atomic write after all prior operations have completed */
+ smp_store_release(dst, src);
- qp->resp.msn++;
+ /* decrease resp.resid to zero */
+ qp->resp.resid -= sizeof(payload);
- /* next expected psn, read handles this separately */
- qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
- qp->resp.ack_psn = qp->resp.psn;
+ qp->resp.msn++;
- qp->resp.opcode = pkt->opcode;
- qp->resp.status = IB_WC_SUCCESS;
+ /* next expected psn, read handles this separately */
+ qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+ qp->resp.ack_psn = qp->resp.psn;
- return RESPST_ACKNOWLEDGE;
+ qp->resp.opcode = pkt->opcode;
+ qp->resp.status = IB_WC_SUCCESS;
+ return RESPST_ACKNOWLEDGE;
+}
#else
- return RESPST_ERR_UNSUPPORTED_OPCODE;
+static enum resp_states do_atomic_write(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ return RESPST_ERR_UNSUPPORTED_OPCODE;
+}
#endif /* CONFIG_64BIT */
+
+static enum resp_states atomic_write_reply(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct resp_res *res = qp->resp.res;
+
+ if (!res) {
+ res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK);
+ qp->resp.res = res;
}
- return RESPST_ACKNOWLEDGE;
+ if (res->replay)
+ return RESPST_ACKNOWLEDGE;
+ return do_atomic_write(qp, pkt);
}
static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,