summaryrefslogtreecommitdiff
path: root/net/smc/smc_tx.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-06-30 14:42:26 +0300
committerDavid S. Miller <davem@davemloft.net>2018-06-30 14:42:26 +0300
commitb5f16484dd1c99b63eb986121c2b0fc624182d52 (patch)
tree75c7a211188000f16cd750058b57c5405e722aad /net/smc/smc_tx.c
parentb0402f0113675ad78bc10c839f93a25348dd1f73 (diff)
parent684b89bc39ce4f204b1a2b180f39f2eb36a6b695 (diff)
downloadlinux-b5f16484dd1c99b63eb986121c2b0fc624182d52.tar.xz
Merge branch 'smc-pnetid-and-SMC-D-support'
Ursula Braun says: ==================== smc: pnetid and SMC-D support SMC requires a configured pnet table to map Ethernet interfaces to RoCE adapter ports. For s390 there exists hardware support to group such devices. The first three patches cover the s390 pnetid support, enabling SMC-R usage on s390 without configuring an extra pnet table. SMC currently requires RoCE adapters, and uses RDMA-techniques implemented with IB-verbs. But s390 offers another method for intra-CEC Shared Memory communication. The following seven patches implement a solution to run SMC traffic based on intra-CEC DMA, called SMC-D. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/smc/smc_tx.c')
-rw-r--r--net/smc/smc_tx.c205
1 files changed, 160 insertions, 45 deletions
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index cee666400752..142bcb134dd6 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -24,6 +24,7 @@
#include "smc.h"
#include "smc_wr.h"
#include "smc_cdc.h"
+#include "smc_ism.h"
#include "smc_tx.h"
#define SMC_TX_WORK_DELAY HZ
@@ -250,6 +251,24 @@ out_err:
/***************************** sndbuf consumer *******************************/
+/* sndbuf consumer: actual data transfer of one target chunk with ISM write */
+int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len,
+ u32 offset, int signal)
+{
+ struct smc_ism_position pos;
+ int rc;
+
+ memset(&pos, 0, sizeof(pos));
+ pos.token = conn->peer_token;
+ pos.index = conn->peer_rmbe_idx;
+ pos.offset = conn->tx_off + offset;
+ pos.signal = signal;
+ rc = smc_ism_write(conn->lgr->smcd, &pos, data, len);
+ if (rc)
+ conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
+ return rc;
+}
+
/* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
int num_sges, struct ib_sge sges[])
@@ -297,21 +316,104 @@ static inline void smc_tx_advance_cursors(struct smc_connection *conn,
smc_curs_add(conn->sndbuf_desc->len, sent, len);
}
+/* SMC-R helper for smc_tx_rdma_writes() */
+static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
+ size_t src_off, size_t src_len,
+ size_t dst_off, size_t dst_len)
+{
+ dma_addr_t dma_addr =
+ sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
+ struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
+ int src_len_sum = src_len, dst_len_sum = dst_len;
+ struct ib_sge sges[SMC_IB_MAX_SEND_SGE];
+ int sent_count = src_off;
+ int srcchunk, dstchunk;
+ int num_sges;
+ int rc;
+
+ for (dstchunk = 0; dstchunk < 2; dstchunk++) {
+ num_sges = 0;
+ for (srcchunk = 0; srcchunk < 2; srcchunk++) {
+ sges[srcchunk].addr = dma_addr + src_off;
+ sges[srcchunk].length = src_len;
+ sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
+ num_sges++;
+
+ src_off += src_len;
+ if (src_off >= conn->sndbuf_desc->len)
+ src_off -= conn->sndbuf_desc->len;
+ /* modulo in send ring */
+ if (src_len_sum == dst_len)
+ break; /* either on 1st or 2nd iteration */
+ /* prepare next (== 2nd) iteration */
+ src_len = dst_len - src_len; /* remainder */
+ src_len_sum += src_len;
+ }
+ rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges);
+ if (rc)
+ return rc;
+ if (dst_len_sum == len)
+ break; /* either on 1st or 2nd iteration */
+ /* prepare next (== 2nd) iteration */
+ dst_off = 0; /* modulo offset in RMBE ring buffer */
+ dst_len = len - dst_len; /* remainder */
+ dst_len_sum += dst_len;
+ src_len = min_t(int, dst_len, conn->sndbuf_desc->len -
+ sent_count);
+ src_len_sum = src_len;
+ }
+ return 0;
+}
+
+/* SMC-D helper for smc_tx_rdma_writes() */
+static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len,
+ size_t src_off, size_t src_len,
+ size_t dst_off, size_t dst_len)
+{
+ int src_len_sum = src_len, dst_len_sum = dst_len;
+ int srcchunk, dstchunk;
+ int rc;
+
+ for (dstchunk = 0; dstchunk < 2; dstchunk++) {
+ for (srcchunk = 0; srcchunk < 2; srcchunk++) {
+ void *data = conn->sndbuf_desc->cpu_addr + src_off;
+
+ rc = smcd_tx_ism_write(conn, data, src_len, dst_off +
+ sizeof(struct smcd_cdc_msg), 0);
+ if (rc)
+ return rc;
+ dst_off += src_len;
+ src_off += src_len;
+ if (src_off >= conn->sndbuf_desc->len)
+ src_off -= conn->sndbuf_desc->len;
+ /* modulo in send ring */
+ if (src_len_sum == dst_len)
+ break; /* either on 1st or 2nd iteration */
+ /* prepare next (== 2nd) iteration */
+ src_len = dst_len - src_len; /* remainder */
+ src_len_sum += src_len;
+ }
+ if (dst_len_sum == len)
+ break; /* either on 1st or 2nd iteration */
+ /* prepare next (== 2nd) iteration */
+ dst_off = 0; /* modulo offset in RMBE ring buffer */
+ dst_len = len - dst_len; /* remainder */
+ dst_len_sum += dst_len;
+ src_len = min_t(int, dst_len, conn->sndbuf_desc->len - src_off);
+ src_len_sum = src_len;
+ }
+ return 0;
+}
+
/* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
* usable snd_wnd as max transmit
*/
static int smc_tx_rdma_writes(struct smc_connection *conn)
{
- size_t src_off, src_len, dst_off, dst_len; /* current chunk values */
- size_t len, dst_len_sum, src_len_sum, dstchunk, srcchunk;
+ size_t len, src_len, dst_off, dst_len; /* current chunk values */
union smc_host_cursor sent, prep, prod, cons;
- struct ib_sge sges[SMC_IB_MAX_SEND_SGE];
- struct smc_link_group *lgr = conn->lgr;
struct smc_cdc_producer_flags *pflags;
int to_send, rmbespace;
- struct smc_link *link;
- dma_addr_t dma_addr;
- int num_sges;
int rc;
/* source: sndbuf */
@@ -341,7 +443,6 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
len = min(to_send, rmbespace);
/* initialize variables for first iteration of subsequent nested loop */
- link = &lgr->lnk[SMC_SINGLE_LINK];
dst_off = prod.count;
if (prod.wrap == cons.wrap) {
/* the filled destination area is unwrapped,
@@ -358,8 +459,6 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
*/
dst_len = len;
}
- dst_len_sum = dst_len;
- src_off = sent.count;
/* dst_len determines the maximum src_len */
if (sent.count + dst_len <= conn->sndbuf_desc->len) {
/* unwrapped src case: single chunk of entire dst_len */
@@ -368,38 +467,15 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
/* wrapped src case: 2 chunks of sum dst_len; start with 1st: */
src_len = conn->sndbuf_desc->len - sent.count;
}
- src_len_sum = src_len;
- dma_addr = sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
- for (dstchunk = 0; dstchunk < 2; dstchunk++) {
- num_sges = 0;
- for (srcchunk = 0; srcchunk < 2; srcchunk++) {
- sges[srcchunk].addr = dma_addr + src_off;
- sges[srcchunk].length = src_len;
- sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
- num_sges++;
- src_off += src_len;
- if (src_off >= conn->sndbuf_desc->len)
- src_off -= conn->sndbuf_desc->len;
- /* modulo in send ring */
- if (src_len_sum == dst_len)
- break; /* either on 1st or 2nd iteration */
- /* prepare next (== 2nd) iteration */
- src_len = dst_len - src_len; /* remainder */
- src_len_sum += src_len;
- }
- rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges);
- if (rc)
- return rc;
- if (dst_len_sum == len)
- break; /* either on 1st or 2nd iteration */
- /* prepare next (== 2nd) iteration */
- dst_off = 0; /* modulo offset in RMBE ring buffer */
- dst_len = len - dst_len; /* remainder */
- dst_len_sum += dst_len;
- src_len = min_t(int,
- dst_len, conn->sndbuf_desc->len - sent.count);
- src_len_sum = src_len;
- }
+
+ if (conn->lgr->is_smcd)
+ rc = smcd_tx_rdma_writes(conn, len, sent.count, src_len,
+ dst_off, dst_len);
+ else
+ rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len,
+ dst_off, dst_len);
+ if (rc)
+ return rc;
if (conn->urg_tx_pend && len == to_send)
pflags->urg_data_present = 1;
@@ -420,7 +496,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
/* Wakeup sndbuf consumers from any context (IRQ or process)
* since there is more data to transmit; usable snd_wnd as max transmit
*/
-int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
+static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
{
struct smc_cdc_producer_flags *pflags;
struct smc_cdc_tx_pend *pend;
@@ -467,6 +543,37 @@ out_unlock:
return rc;
}
+static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
+{
+ struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
+ int rc = 0;
+
+ spin_lock_bh(&conn->send_lock);
+ if (!pflags->urg_data_present)
+ rc = smc_tx_rdma_writes(conn);
+ if (!rc)
+ rc = smcd_cdc_msg_send(conn);
+
+ if (!rc && pflags->urg_data_present) {
+ pflags->urg_data_pending = 0;
+ pflags->urg_data_present = 0;
+ }
+ spin_unlock_bh(&conn->send_lock);
+ return rc;
+}
+
+int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
+{
+ int rc;
+
+ if (conn->lgr->is_smcd)
+ rc = smcd_tx_sndbuf_nonempty(conn);
+ else
+ rc = smcr_tx_sndbuf_nonempty(conn);
+
+ return rc;
+}
+
/* Wakeup sndbuf consumers from process context
* since there is more data to transmit
*/
@@ -495,7 +602,8 @@ out:
void smc_tx_consumer_update(struct smc_connection *conn, bool force)
{
- union smc_host_cursor cfed, cons;
+ union smc_host_cursor cfed, cons, prod;
+ int sender_free = conn->rmb_desc->len;
int to_confirm;
smc_curs_write(&cons,
@@ -505,11 +613,18 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
smc_curs_read(&conn->rx_curs_confirmed, conn),
conn);
to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons);
+ if (to_confirm > conn->rmbe_update_limit) {
+ smc_curs_write(&prod,
+ smc_curs_read(&conn->local_rx_ctrl.prod, conn),
+ conn);
+ sender_free = conn->rmb_desc->len -
+ smc_curs_diff(conn->rmb_desc->len, &prod, &cfed);
+ }
if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
force ||
((to_confirm > conn->rmbe_update_limit) &&
- ((to_confirm > (conn->rmb_desc->len / 2)) ||
+ ((sender_free <= (conn->rmb_desc->len / 2)) ||
conn->local_rx_ctrl.prod_flags.write_blocked))) {
if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
conn->alert_token_local) { /* connection healthy */