From a3fe3d01bd0d7cd6ee7a5e3eebc0926c47954fe7 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Fri, 28 Jul 2017 13:56:15 +0200 Subject: net/smc: introduce sg-logic for RMBs The follow-on patch makes use of ib_map_mr_sg() when introducing separate memory regions for RMBs. This function is based on scatterlists; thus this patch introduces scatterlists for RMBs. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_ib.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'net/smc/smc_ib.c') diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index b31715505a35..fcfeb89b05d9 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -283,6 +283,37 @@ void smc_ib_buf_unmap(struct smc_ib_device *smcibdev, int buf_size, buf_slot->dma_addr[SMC_SINGLE_LINK] = 0; } +/* Map a new TX or RX buffer SG-table to DMA */ +int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev, + struct smc_buf_desc *buf_slot, + enum dma_data_direction data_direction) +{ + int mapped_nents; + + mapped_nents = ib_dma_map_sg(smcibdev->ibdev, + buf_slot->sgt[SMC_SINGLE_LINK].sgl, + buf_slot->sgt[SMC_SINGLE_LINK].orig_nents, + data_direction); + if (!mapped_nents) + return -ENOMEM; + + return mapped_nents; +} + +void smc_ib_buf_unmap_sg(struct smc_ib_device *smcibdev, + struct smc_buf_desc *buf_slot, + enum dma_data_direction data_direction) +{ + if (!buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address) + return; /* already unmapped */ + + ib_dma_unmap_sg(smcibdev->ibdev, + buf_slot->sgt[SMC_SINGLE_LINK].sgl, + buf_slot->sgt[SMC_SINGLE_LINK].orig_nents, + data_direction); + buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address = 0; +} + static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport) { struct net_device *ndev; -- cgit v1.2.3 From 897e1c245773d93f26f125a99674f585a3aeef5d Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Fri, 28 Jul 2017 13:56:16 +0200 Subject: net/smc: use separate memory regions for RMBs SMC currently uses the unsafe_global_rkey of the protection domain, which exposes all memory for remote reads and writes once a connection is established. This patch introduces separate memory regions with separate rkeys for every RMB. Now the unsafe_global_rkey of the protection domain is no longer needed. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_clc.c | 4 ++-- net/smc/smc_core.c | 18 ++++++++++++++++++ net/smc/smc_core.h | 6 +++++- net/smc/smc_ib.c | 45 +++++++++++++++++++++++++++++++++++++++++++-- net/smc/smc_ib.h | 5 +++-- 5 files changed, 71 insertions(+), 7 deletions(-) (limited to 'net/smc/smc_ib.c') diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 15cb76019009..3934913ab835 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -204,7 +204,7 @@ int smc_clc_send_confirm(struct smc_sock *smc) memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN); hton24(cclc.qpn, link->roce_qp->qp_num); cclc.rmb_rkey = - htonl(link->roce_pd->unsafe_global_rkey); + htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */ cclc.rmbe_alert_token = htonl(conn->alert_token_local); cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); @@ -256,7 +256,7 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact) memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN); hton24(aclc.qpn, link->roce_qp->qp_num); aclc.rmb_rkey = - htonl(link->roce_pd->unsafe_global_rkey); + htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */ aclc.rmbe_alert_token = htonl(conn->alert_token_local); aclc.qp_mtu = link->path_mtu; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index bfdbda795f67..f1dd4e1cd3e1 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -218,6 +218,7 @@ static void smc_sndbuf_unuse(struct smc_connection *conn) static void smc_rmb_unuse(struct smc_connection *conn) { if (conn->rmb_desc) { + conn->rmb_desc->reused = true; conn->rmb_desc->used = 0; conn->rmbe_size = 0; } @@ -274,6 +275,8 @@ static void smc_lgr_free_rmbs(struct smc_link_group *lgr) list_for_each_entry_safe(rmb_desc, bf_desc, &lgr->rmbs[i], list) { list_del(&rmb_desc->list); + smc_ib_put_memory_region( + rmb_desc->mr_rx[SMC_SINGLE_LINK]); smc_ib_buf_unmap_sg(lnk->smcibdev, rmb_desc, DMA_FROM_DEVICE); kfree(rmb_desc->cpu_addr); @@ -627,6 +630,21 @@ int smc_rmb_create(struct smc_sock *smc) rmb_desc = NULL; continue; /* if mapping failed, try smaller one */ } + rc = smc_ib_get_memory_region(lgr->lnk[SMC_SINGLE_LINK].roce_pd, + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_LOCAL_WRITE, + rmb_desc); + if (rc) { + smc_ib_buf_unmap_sg(lgr->lnk[SMC_SINGLE_LINK].smcibdev, + rmb_desc, DMA_FROM_DEVICE); + sg_free_table(&rmb_desc->sgt[SMC_SINGLE_LINK]); + free_pages((unsigned long)rmb_desc->cpu_addr, + rmb_desc->order); + kfree(rmb_desc); + rmb_desc = NULL; + continue; + } + rmb_desc->used = 1; write_lock_bh(&lgr->rmbs_lock); list_add(&rmb_desc->list, &lgr->rmbs[bufsize_short]); diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 0ee450d69907..17b5fea09901 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -94,8 +94,13 @@ struct smc_buf_desc { /* mapped address of buffer */ void *cpu_addr; /* virtual address of buffer */ struct sg_table sgt[SMC_LINKS_PER_LGR_MAX];/* virtual buffer */ + struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX]; + /* for rmb only: memory region + * incl. rkey provided to peer + */ u32 order; /* allocation order */ u32 used; /* currently used / unused */ + bool reused; /* new created / reused */ }; struct smc_rtoken { /* address/key of remote RMB */ @@ -175,5 +180,4 @@ int smc_sndbuf_create(struct smc_sock *smc); int smc_rmb_create(struct smc_sock *smc); int smc_rmb_rtoken_handling(struct smc_connection *conn, struct smc_clc_msg_accept_confirm *clc); - #endif diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index fcfeb89b05d9..08233492ec45 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -192,8 +192,7 @@ int smc_ib_create_protection_domain(struct smc_link *lnk) { int rc; - lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, - IB_PD_UNSAFE_GLOBAL_RKEY); + lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0); rc = PTR_ERR_OR_ZERO(lnk->roce_pd); if (IS_ERR(lnk->roce_pd)) lnk->roce_pd = NULL; @@ -254,6 +253,48 @@ int smc_ib_create_queue_pair(struct smc_link *lnk) return rc; } +void smc_ib_put_memory_region(struct ib_mr *mr) +{ + ib_dereg_mr(mr); +} + +static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot) +{ + unsigned int offset = 0; + int sg_num; + + /* map the largest prefix of a dma mapped SG list */ + sg_num = ib_map_mr_sg(buf_slot->mr_rx[SMC_SINGLE_LINK], + buf_slot->sgt[SMC_SINGLE_LINK].sgl, + buf_slot->sgt[SMC_SINGLE_LINK].orig_nents, + &offset, PAGE_SIZE); + + return sg_num; +} + +/* Allocate a memory region and map the dma mapped SG list of buf_slot */ +int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, + struct smc_buf_desc *buf_slot) +{ + if (buf_slot->mr_rx[SMC_SINGLE_LINK]) + return 0; /* already done */ + + buf_slot->mr_rx[SMC_SINGLE_LINK] = + ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order); + if (IS_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK])) { + int rc; + + rc = PTR_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK]); + buf_slot->mr_rx[SMC_SINGLE_LINK] = NULL; + return rc; + } + + if (smc_ib_map_mr_sg(buf_slot) != 1) + return -EINVAL; + + return 0; +} + /* map a new TX or RX buffer to DMA */ int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size, struct smc_buf_desc *buf_slot, diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index b30e387854b6..b57d29f29042 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -71,6 +71,7 @@ int smc_ib_ready_link(struct smc_link *lnk); int smc_ib_modify_qp_rts(struct smc_link *lnk); int smc_ib_modify_qp_reset(struct smc_link *lnk); long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev); - - +int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, + struct smc_buf_desc *buf_slot); +void smc_ib_put_memory_region(struct ib_mr *mr); #endif -- cgit v1.2.3 From 652a1e41eca7dfaacc47a79badb4a51aea570d35 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Fri, 28 Jul 2017 13:56:17 +0200 Subject: net/smc: register RMB-related memory region A memory region created for a new RMB must be registered explicitly, before the peer can make use of it for remote DMA transfer. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/af_smc.c | 38 ++++++++++++++++++++++++++++++++ net/smc/smc_core.c | 1 - net/smc/smc_core.h | 12 +++++++++++ net/smc/smc_ib.c | 2 +- net/smc/smc_wr.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ net/smc/smc_wr.h | 1 + 6 files changed, 115 insertions(+), 2 deletions(-) (limited to 'net/smc/smc_ib.c') diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 120a7b9b4d8e..e0a95d50bf87 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -338,6 +338,12 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc, union ib_gid *gid) return SMC_CLC_DECL_INTERR; smc_wr_remember_qp_attr(link); + + rc = smc_wr_reg_send(link, + smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]); + if (rc) + return SMC_CLC_DECL_INTERR; + /* send CONFIRM LINK response over RoCE fabric */ rc = smc_llc_send_confirm_link(link, link->smcibdev->mac[link->ibport - 1], @@ -459,6 +465,18 @@ static int smc_connect_rdma(struct smc_sock *smc) reason_code = SMC_CLC_DECL_INTERR; goto decline_rdma_unlock; } + } else { + struct smc_buf_desc *buf_desc = smc->conn.rmb_desc; + + if (!buf_desc->reused) { + /* register memory region for new rmb */ + rc = smc_wr_reg_send(link, + buf_desc->mr_rx[SMC_SINGLE_LINK]); + if (rc) { + reason_code = SMC_CLC_DECL_INTERR; + goto decline_rdma_unlock; + } + } } rc = smc_clc_send_confirm(smc); @@ -692,6 +710,12 @@ static int smc_serv_conf_first_link(struct smc_sock *smc) int rc; link = &lgr->lnk[SMC_SINGLE_LINK]; + + rc = smc_wr_reg_send(link, + smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]); + if (rc) + return SMC_CLC_DECL_INTERR; + /* send CONFIRM LINK request to client over the RoCE fabric */ rc = smc_llc_send_confirm_link(link, link->smcibdev->mac[link->ibport - 1], @@ -803,6 +827,20 @@ static void smc_listen_work(struct work_struct *work) smc_close_init(new_smc); smc_rx_init(new_smc); + if (local_contact != SMC_FIRST_CONTACT) { + struct smc_buf_desc *buf_desc = new_smc->conn.rmb_desc; + + if (!buf_desc->reused) { + /* register memory region for new rmb */ + rc = smc_wr_reg_send(link, + buf_desc->mr_rx[SMC_SINGLE_LINK]); + if (rc) { + reason_code = SMC_CLC_DECL_INTERR; + goto decline_rdma; + } + } + } + rc = smc_clc_send_accept(new_smc, local_contact); if (rc) goto out_err; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index f1dd4e1cd3e1..87bb3e4771a8 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -175,7 +175,6 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr, rc = smc_wr_alloc_link_mem(lnk); if (rc) goto free_lgr; - init_waitqueue_head(&lnk->wr_tx_wait); rc = smc_ib_create_protection_domain(lnk); if (rc) goto free_link_mem; diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 17b5fea09901..f7b40bdbf24c 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -37,6 +37,14 @@ struct smc_wr_buf { u8 raw[SMC_WR_BUF_SIZE]; }; +#define SMC_WR_REG_MR_WAIT_TIME (5 * HZ)/* wait time for ib_wr_reg_mr result */ + +enum smc_wr_reg_state { + POSTED, /* ib_wr_reg_mr request posted */ + CONFIRMED, /* ib_wr_reg_mr response: successful */ + FAILED /* ib_wr_reg_mr response: failure */ +}; + struct smc_link { struct smc_ib_device *smcibdev; /* ib-device */ u8 ibport; /* port - values 1 | 2 */ @@ -65,6 +73,10 @@ struct smc_link { u64 wr_rx_id; /* seq # of last recv WR */ u32 wr_rx_cnt; /* number of WR recv buffers */ + struct ib_reg_wr wr_reg; /* WR register memory region */ + wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */ + enum smc_wr_reg_state wr_reg_state; /* state of wr_reg request */ + union ib_gid gid; /* gid matching used vlan id */ u32 peer_qpn; /* QP number of peer */ enum ib_mtu path_mtu; /* used mtu */ diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 08233492ec45..85e1831f591e 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -231,10 +231,10 @@ int smc_ib_create_queue_pair(struct smc_link *lnk) .recv_cq = lnk->smcibdev->roce_cq_recv, .srq = NULL, .cap = { - .max_send_wr = SMC_WR_BUF_CNT, /* include unsolicited rdma_writes as well, * there are max. 2 RDMA_WRITE per 1 WR_SEND */ + .max_send_wr = SMC_WR_BUF_CNT * 3, .max_recv_wr = SMC_WR_BUF_CNT * 3, .max_send_sge = SMC_IB_MAX_SEND_SGE, .max_recv_sge = 1, diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index 874ee9f9d796..ab56bda66783 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -68,6 +68,16 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc) int i; link = wc->qp->qp_context; + + if (wc->opcode == IB_WC_REG_MR) { + if (wc->status) + link->wr_reg_state = FAILED; + else + link->wr_reg_state = CONFIRMED; + wake_up(&link->wr_reg_wait); + return; + } + pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id); if (pnd_snd_idx == link->wr_tx_cnt) return; @@ -243,6 +253,52 @@ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv) return rc; } +/* Register a memory region and wait for result. */ +int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr) +{ + struct ib_send_wr *failed_wr = NULL; + int rc; + + ib_req_notify_cq(link->smcibdev->roce_cq_send, + IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); + link->wr_reg_state = POSTED; + link->wr_reg.wr.wr_id = (u64)(uintptr_t)mr; + link->wr_reg.mr = mr; + link->wr_reg.key = mr->rkey; + failed_wr = &link->wr_reg.wr; + rc = ib_post_send(link->roce_qp, &link->wr_reg.wr, &failed_wr); + WARN_ON(failed_wr != &link->wr_reg.wr); + if (rc) + return rc; + + rc = wait_event_interruptible_timeout(link->wr_reg_wait, + (link->wr_reg_state != POSTED), + SMC_WR_REG_MR_WAIT_TIME); + if (!rc) { + /* timeout - terminate connections */ + struct smc_link_group *lgr; + + lgr = container_of(link, struct smc_link_group, + lnk[SMC_SINGLE_LINK]); + smc_lgr_terminate(lgr); + return -EPIPE; + } + if (rc == -ERESTARTSYS) + return -EINTR; + switch (link->wr_reg_state) { + case CONFIRMED: + rc = 0; + break; + case FAILED: + rc = -EIO; + break; + case POSTED: + rc = -EPIPE; + break; + } + return rc; +} + void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_rx_hdr_type, smc_wr_tx_filter filter, smc_wr_tx_dismisser dismisser, @@ -458,6 +514,11 @@ static void smc_wr_init_sge(struct smc_link *lnk) lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[i]; lnk->wr_rx_ibs[i].num_sge = 1; } + lnk->wr_reg.wr.next = NULL; + lnk->wr_reg.wr.num_sge = 0; + lnk->wr_reg.wr.send_flags = IB_SEND_SIGNALED; + lnk->wr_reg.wr.opcode = IB_WR_REG_MR; + lnk->wr_reg.access = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE; } void smc_wr_free_link(struct smc_link *lnk) @@ -602,6 +663,8 @@ int smc_wr_create_link(struct smc_link *lnk) smc_wr_init_sge(lnk); memset(lnk->wr_tx_mask, 0, BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask)); + init_waitqueue_head(&lnk->wr_tx_wait); + init_waitqueue_head(&lnk->wr_reg_wait); return rc; dma_unmap: diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h index 0b9beeda6053..45eb53833052 100644 --- a/net/smc/smc_wr.h +++ b/net/smc/smc_wr.h @@ -102,5 +102,6 @@ void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type, int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler); int smc_wr_rx_post_init(struct smc_link *link); void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context); +int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr); #endif /* SMC_WR_H */ -- cgit v1.2.3 From 9d8fb6173477ad61364eeab652a87c2a295fa601 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Fri, 28 Jul 2017 13:56:19 +0200 Subject: net/smc: introduce sg-logic for send buffers SMC send buffers are processed the same way as RMBs. Since RMBs have been converted to sg-logic, do the same for send buffers. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_core.c | 46 +++++++++++++++++++++++++++++++++++----------- net/smc/smc_core.h | 2 -- net/smc/smc_ib.c | 29 ----------------------------- net/smc/smc_ib.h | 6 ------ net/smc/smc_tx.c | 6 +++--- 5 files changed, 38 insertions(+), 51 deletions(-) (limited to 'net/smc/smc_ib.c') diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 87bb3e4771a8..8795c7ed9ce4 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -248,6 +248,7 @@ static void smc_link_clear(struct smc_link *lnk) static void smc_lgr_free_sndbufs(struct smc_link_group *lgr) { + struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK]; struct smc_buf_desc *sndbuf_desc, *bf_desc; int i; @@ -255,10 +256,11 @@ static void smc_lgr_free_sndbufs(struct smc_link_group *lgr) list_for_each_entry_safe(sndbuf_desc, bf_desc, &lgr->sndbufs[i], list) { list_del(&sndbuf_desc->list); - smc_ib_buf_unmap(lgr->lnk[SMC_SINGLE_LINK].smcibdev, - smc_uncompress_bufsize(i), - sndbuf_desc, DMA_TO_DEVICE); - kfree(sndbuf_desc->cpu_addr); + smc_ib_buf_unmap_sg(lnk->smcibdev, sndbuf_desc, + DMA_TO_DEVICE); + sg_free_table(&sndbuf_desc->sgt[SMC_SINGLE_LINK]); + free_pages((unsigned long)sndbuf_desc->cpu_addr, + sndbuf_desc->order); kfree(sndbuf_desc); } } @@ -517,6 +519,9 @@ int smc_sndbuf_create(struct smc_sock *smc) for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2); bufsize_short >= 0; bufsize_short--) { bufsize = smc_uncompress_bufsize(bufsize_short); + if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC) + continue; + /* check for reusable sndbuf_slot in the link group */ sndbuf_desc = smc_sndbuf_get_slot(lgr, bufsize_short); if (sndbuf_desc) { @@ -527,10 +532,12 @@ int smc_sndbuf_create(struct smc_sock *smc) sndbuf_desc = kzalloc(sizeof(*sndbuf_desc), GFP_KERNEL); if (!sndbuf_desc) break; /* give up with -ENOMEM */ - sndbuf_desc->cpu_addr = kzalloc(bufsize, - GFP_KERNEL | __GFP_NOWARN | - __GFP_NOMEMALLOC | - __GFP_NORETRY); + + sndbuf_desc->cpu_addr = + (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | + __GFP_NOMEMALLOC | + __GFP_NORETRY | __GFP_ZERO, + get_order(bufsize)); if (!sndbuf_desc->cpu_addr) { kfree(sndbuf_desc); sndbuf_desc = NULL; @@ -539,14 +546,31 @@ int smc_sndbuf_create(struct smc_sock *smc) */ continue; } - rc = smc_ib_buf_map(lgr->lnk[SMC_SINGLE_LINK].smcibdev, - bufsize, sndbuf_desc, DMA_TO_DEVICE); + sndbuf_desc->order = get_order(bufsize); + + rc = sg_alloc_table(&sndbuf_desc->sgt[SMC_SINGLE_LINK], 1, + GFP_KERNEL); if (rc) { - kfree(sndbuf_desc->cpu_addr); + free_pages((unsigned long)sndbuf_desc->cpu_addr, + sndbuf_desc->order); + kfree(sndbuf_desc); + sndbuf_desc = NULL; + continue; + } + sg_set_buf(sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl, + sndbuf_desc->cpu_addr, bufsize); + + rc = smc_ib_buf_map_sg(lgr->lnk[SMC_SINGLE_LINK].smcibdev, + sndbuf_desc, DMA_TO_DEVICE); + if (rc != 1) { + sg_free_table(&sndbuf_desc->sgt[SMC_SINGLE_LINK]); + free_pages((unsigned long)sndbuf_desc->cpu_addr, + sndbuf_desc->order); kfree(sndbuf_desc); sndbuf_desc = NULL; continue; /* if mapping failed, try smaller one */ } + sndbuf_desc->used = 1; write_lock_bh(&lgr->sndbufs_lock); list_add(&sndbuf_desc->list, &lgr->sndbufs[bufsize_short]); diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index f7b40bdbf24c..72c25cb3eb89 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -102,8 +102,6 @@ struct smc_link { /* tx/rx buffer list element for sndbufs list and rmbs list of a lgr */ struct smc_buf_desc { struct list_head list; - u64 dma_addr[SMC_LINKS_PER_LGR_MAX]; - /* mapped address of buffer */ void *cpu_addr; /* virtual address of buffer */ struct sg_table sgt[SMC_LINKS_PER_LGR_MAX];/* virtual buffer */ struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX]; diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 85e1831f591e..021f061609f5 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -295,35 +295,6 @@ int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, return 0; } -/* map a new TX or RX buffer to DMA */ -int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size, - struct smc_buf_desc *buf_slot, - enum dma_data_direction data_direction) -{ - int rc = 0; - - if (buf_slot->dma_addr[SMC_SINGLE_LINK]) - return rc; /* already mapped */ - buf_slot->dma_addr[SMC_SINGLE_LINK] = - ib_dma_map_single(smcibdev->ibdev, buf_slot->cpu_addr, - buf_size, data_direction); - if (ib_dma_mapping_error(smcibdev->ibdev, - buf_slot->dma_addr[SMC_SINGLE_LINK])) - rc = -EIO; - return rc; -} - -void smc_ib_buf_unmap(struct smc_ib_device *smcibdev, int buf_size, - struct smc_buf_desc *buf_slot, - enum dma_data_direction data_direction) -{ - if (!buf_slot->dma_addr[SMC_SINGLE_LINK]) - return; /* already unmapped */ - ib_dma_unmap_single(smcibdev->ibdev, *buf_slot->dma_addr, buf_size, - data_direction); - buf_slot->dma_addr[SMC_SINGLE_LINK] = 0; -} - /* Map a new TX or RX buffer SG-table to DMA */ int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev, struct smc_buf_desc *buf_slot, diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index b57d29f29042..72acb19ffc67 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -51,12 +51,6 @@ int smc_ib_register_client(void) __init; void smc_ib_unregister_client(void); bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport); int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport); -int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size, - struct smc_buf_desc *buf_slot, - enum dma_data_direction data_direction); -void smc_ib_buf_unmap(struct smc_ib_device *smcibdev, int bufsize, - struct smc_buf_desc *buf_slot, - enum dma_data_direction data_direction); int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev, struct smc_buf_desc *buf_slot, enum dma_data_direction data_direction); diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index 21ec1832ab51..f4d58e2dd559 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -277,6 +277,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) struct smc_link_group *lgr = conn->lgr; int to_send, rmbespace; struct smc_link *link; + dma_addr_t dma_addr; int num_sges; int rc; @@ -334,12 +335,11 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) src_len = conn->sndbuf_size - sent.count; } src_len_sum = src_len; + dma_addr = sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); for (dstchunk = 0; dstchunk < 2; dstchunk++) { num_sges = 0; for (srcchunk = 0; srcchunk < 2; srcchunk++) { - sges[srcchunk].addr = - conn->sndbuf_desc->dma_addr[SMC_SINGLE_LINK] + - src_off; + sges[srcchunk].addr = dma_addr + src_off; sges[srcchunk].length = src_len; sges[srcchunk].lkey = link->roce_pd->local_dma_lkey; num_sges++; -- cgit v1.2.3 From 10428dd8354cc1c74ee806df45c2227c1f9d7b0c Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Fri, 28 Jul 2017 13:56:22 +0200 Subject: net/smc: synchronize buffer usage with device Usage of send buffer "sndbuf" is synced (a) before filling sndbuf for cpu access (b) after filling sndbuf for device access Usage of receive buffer "RMB" is synced (a) before reading RMB content for cpu access (b) after reading RMB content for device access Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/af_smc.c | 2 ++ net/smc/smc_core.c | 32 ++++++++++++++++++++++++++++++++ net/smc/smc_core.h | 4 ++++ net/smc/smc_ib.c | 41 +++++++++++++++++++++++++++++++++++++++++ net/smc/smc_ib.h | 6 ++++++ net/smc/smc_rx.c | 3 +++ net/smc/smc_tx.c | 3 +++ 7 files changed, 91 insertions(+) (limited to 'net/smc/smc_ib.c') diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 75518879b68a..8c6d24b2995d 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -474,6 +474,7 @@ static int smc_connect_rdma(struct smc_sock *smc) } } } + smc_rmb_sync_sg_for_device(&smc->conn); rc = smc_clc_send_confirm(smc); if (rc) @@ -832,6 +833,7 @@ static void smc_listen_work(struct work_struct *work) } } } + smc_rmb_sync_sg_for_device(&new_smc->conn); rc = smc_clc_send_accept(new_smc, local_contact); if (rc) diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 447bd52da0e2..1a16d51e2330 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -615,6 +615,38 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) return 0; } +void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn) +{ + struct smc_link_group *lgr = conn->lgr; + + smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev, + conn->sndbuf_desc, DMA_TO_DEVICE); +} + +void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn) +{ + struct smc_link_group *lgr = conn->lgr; + + smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev, + conn->sndbuf_desc, DMA_TO_DEVICE); +} + +void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn) +{ + struct smc_link_group *lgr = conn->lgr; + + smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev, + conn->rmb_desc, DMA_FROM_DEVICE); +} + +void smc_rmb_sync_sg_for_device(struct smc_connection *conn) +{ + struct smc_link_group *lgr = conn->lgr; + + smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev, + conn->rmb_desc, DMA_FROM_DEVICE); +} + /* create the send and receive buffer for an SMC socket; * receive buffers are called RMBs; * (even though the SMC protocol allows more than one RMB-element per RMB, diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 1d713e8e067c..19c44bf4e391 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -189,4 +189,8 @@ void smc_lgr_terminate(struct smc_link_group *lgr); int smc_buf_create(struct smc_sock *smc); int smc_rmb_rtoken_handling(struct smc_connection *conn, struct smc_clc_msg_accept_confirm *clc); +void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn); +void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn); +void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn); +void smc_rmb_sync_sg_for_device(struct smc_connection *conn); #endif diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 021f061609f5..547e0e113b17 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -13,6 +13,7 @@ #include #include +#include #include #include "smc_pnet.h" @@ -295,6 +296,46 @@ int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, return 0; } +/* synchronize buffer usage for cpu access */ +void smc_ib_sync_sg_for_cpu(struct smc_ib_device *smcibdev, + struct smc_buf_desc *buf_slot, + enum dma_data_direction data_direction) +{ + struct scatterlist *sg; + unsigned int i; + + /* for now there is just one DMA address */ + for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg, + buf_slot->sgt[SMC_SINGLE_LINK].nents, i) { + if (!sg_dma_len(sg)) + break; + ib_dma_sync_single_for_cpu(smcibdev->ibdev, + sg_dma_address(sg), + sg_dma_len(sg), + data_direction); + } +} + +/* synchronize buffer usage for device access */ +void smc_ib_sync_sg_for_device(struct smc_ib_device *smcibdev, + struct smc_buf_desc *buf_slot, + enum dma_data_direction data_direction) +{ + struct scatterlist *sg; + unsigned int i; + + /* for now there is just one DMA address */ + for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg, + buf_slot->sgt[SMC_SINGLE_LINK].nents, i) { + if (!sg_dma_len(sg)) + break; + ib_dma_sync_single_for_device(smcibdev->ibdev, + sg_dma_address(sg), + sg_dma_len(sg), + data_direction); + } +} + /* Map a new TX or RX buffer SG-table to DMA */ int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev, struct smc_buf_desc *buf_slot, diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index 72acb19ffc67..9b927a33d5e6 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -68,4 +68,10 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev); int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, struct smc_buf_desc *buf_slot); void smc_ib_put_memory_region(struct ib_mr *mr); +void smc_ib_sync_sg_for_cpu(struct smc_ib_device *smcibdev, + struct smc_buf_desc *buf_slot, + enum dma_data_direction data_direction); +void smc_ib_sync_sg_for_device(struct smc_ib_device *smcibdev, + struct smc_buf_desc *buf_slot, + enum dma_data_direction data_direction); #endif diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index f0c8b089f770..b17a333e9bb0 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c @@ -170,6 +170,7 @@ copy: copylen, conn->rmbe_size - cons.count); chunk_len_sum = chunk_len; chunk_off = cons.count; + smc_rmb_sync_sg_for_cpu(conn); for (chunk = 0; chunk < 2; chunk++) { if (!(flags & MSG_TRUNC)) { rc = memcpy_to_msg(msg, rcvbuf_base + chunk_off, @@ -177,6 +178,7 @@ copy: if (rc) { if (!read_done) read_done = -EFAULT; + smc_rmb_sync_sg_for_device(conn); goto out; } } @@ -190,6 +192,7 @@ copy: chunk_len_sum += chunk_len; chunk_off = 0; /* modulo offset in recv ring buffer */ } + smc_rmb_sync_sg_for_device(conn); /* update cursors */ if (!(flags & MSG_PEEK)) { diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index f4d58e2dd559..3c656beb8820 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -174,10 +174,12 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) copylen, conn->sndbuf_size - tx_cnt_prep); chunk_len_sum = chunk_len; chunk_off = tx_cnt_prep; + smc_sndbuf_sync_sg_for_cpu(conn); for (chunk = 0; chunk < 2; chunk++) { rc = memcpy_from_msg(sndbuf_base + chunk_off, msg, chunk_len); if (rc) { + smc_sndbuf_sync_sg_for_device(conn); if (send_done) return send_done; goto out_err; @@ -192,6 +194,7 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) chunk_len_sum += chunk_len; chunk_off = 0; /* modulo offset in send ring buffer */ } + smc_sndbuf_sync_sg_for_device(conn); /* update cursors */ smc_curs_add(conn->sndbuf_size, &prep, copylen); smc_curs_write(&conn->tx_curs_prep, -- cgit v1.2.3