summaryrefslogtreecommitdiff
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h4
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c92
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c51
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c80
8 files changed, 179 insertions, 59 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 963e936da5e3..abe0522b7df4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -509,12 +509,10 @@ struct net_device *ipoib_intf_alloc(struct ib_device *hca, u32 port,
const char *format);
int ipoib_intf_init(struct ib_device *hca, u32 port, const char *format,
struct net_device *dev);
-void ipoib_ib_tx_timer_func(struct timer_list *t);
void ipoib_ib_dev_flush_light(struct work_struct *work);
void ipoib_ib_dev_flush_normal(struct work_struct *work);
void ipoib_ib_dev_flush_heavy(struct work_struct *work);
void ipoib_ib_tx_timeout_work(struct work_struct *work);
-void ipoib_pkey_event(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
int ipoib_ib_dev_open_default(struct net_device *dev);
@@ -533,7 +531,6 @@ void ipoib_mcast_restart_task(struct work_struct *work);
void ipoib_mcast_start_thread(struct net_device *dev);
void ipoib_mcast_stop_thread(struct net_device *dev);
-void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev);
int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
@@ -610,7 +607,6 @@ int ipoib_set_mode(struct net_device *dev, const char *buf);
void ipoib_setup_common(struct net_device *dev);
-void ipoib_pkey_open(struct ipoib_dev_priv *priv);
void ipoib_drain_cq(struct net_device *dev);
void ipoib_set_ethtool_ops(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 68429a5f796d..1d7ac24c4c00 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -507,10 +507,6 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *task);
void iser_free_rx_descriptors(struct iser_conn *iser_conn);
-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- enum iser_data_dir cmd_dir);
-
int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
enum iser_data_dir dir,
bool all_imm);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 88106cf5ce55..71387811b281 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -331,7 +331,7 @@ static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
if (wc->status != IB_WC_SUCCESS) {
- rtrs_err(con->c.path, "Failed IB_WR_REG_MR: %s\n",
+ rtrs_err_rl(con->c.path, "Failed IB_WR_REG_MR: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
@@ -351,11 +351,11 @@ static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
if (wc->status != IB_WC_SUCCESS) {
- rtrs_err(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n",
+ rtrs_err_rl(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
- req->need_inv = false;
+ req->mr->need_inval = false;
if (req->need_inv_comp)
complete(&req->inv_comp);
else
@@ -391,12 +391,13 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
clt_path = to_clt_path(con->c.path);
if (req->sg_cnt) {
- if (req->dir == DMA_FROM_DEVICE && req->need_inv) {
+ if (req->mr->need_inval) {
/*
- * We are here to invalidate read requests
+ * We are here to invalidate read/write requests
* ourselves. In normal scenario server should
- * send INV for all read requests, but
- * we are here, thus two things could happen:
+ * send INV for all read requests, we do local
+ * invalidate for write requests ourselves, but
+ * we are here, thus three things could happen:
*
* 1. this is failover, when errno != 0
* and can_wait == 1,
@@ -404,6 +405,9 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
* 2. something totally bad happened and
* server forgot to send INV, so we
* should do that ourselves.
+ *
+ * 3. write request finishes, we need to do local
+ * invalidate
*/
if (can_wait) {
@@ -418,18 +422,10 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
refcount_inc(&req->ref);
err = rtrs_inv_rkey(req);
if (err) {
- rtrs_err(con->c.path, "Send INV WR key=%#x: %d\n",
+ rtrs_err_rl(con->c.path, "Send INV WR key=%#x: %d\n",
req->mr->rkey, err);
} else if (can_wait) {
wait_for_completion(&req->inv_comp);
- } else {
- /*
- * Something went wrong, so request will be
- * completed from INV callback.
- */
- WARN_ON_ONCE(1);
-
- return;
}
if (!refcount_dec_and_test(&req->ref))
return;
@@ -446,8 +442,10 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
req->con = NULL;
if (errno) {
- rtrs_err_rl(con->c.path, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n",
- errno, kobject_name(&clt_path->kobj), clt_path->hca_name,
+ rtrs_err_rl(con->c.path,
+ "IO %s request failed: error=%d path=%s [%s:%u] notify=%d\n",
+ req->dir == DMA_TO_DEVICE ? "write" : "read", errno,
+ kobject_name(&clt_path->kobj), clt_path->hca_name,
clt_path->hca_port, notify);
}
@@ -501,7 +499,7 @@ static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id,
req = &clt_path->reqs[msg_id];
/* Drop need_inv if server responded with send with invalidation */
- req->need_inv &= !w_inval;
+ req->mr->need_inval &= !w_inval;
complete_rdma_req(req, errno, true, false);
}
@@ -626,6 +624,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
*/
if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
return;
+ clt_path->s.hb_missed_cnt = 0;
rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
&imm_type, &imm_payload);
if (imm_type == RTRS_IO_RSP_IMM ||
@@ -643,7 +642,6 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
return rtrs_clt_recv_done(con, wc);
} else if (imm_type == RTRS_HB_ACK_IMM) {
WARN_ON(con->c.cid);
- clt_path->s.hb_missed_cnt = 0;
clt_path->s.hb_cur_latency =
ktime_sub(ktime_get(), clt_path->s.hb_last_sent);
if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
@@ -670,6 +668,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
/*
* Key invalidations from server side
*/
+ clt_path->s.hb_missed_cnt = 0;
WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
wc->wc_flags & IB_WC_WITH_IMM));
WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
@@ -967,7 +966,7 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
req->dir = dir;
req->con = rtrs_permit_to_clt_con(clt_path, permit);
req->conf = conf;
- req->need_inv = false;
+ req->mr->need_inval = false;
req->need_inv_comp = false;
req->inv_errno = 0;
refcount_set(&req->ref, 1);
@@ -1089,7 +1088,6 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
int ret, count = 0;
u32 imm, buf_id;
struct ib_reg_wr rwr;
- struct ib_send_wr inv_wr;
struct ib_send_wr *wr = NULL;
bool fr_en = false;
@@ -1130,13 +1128,6 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
req->sg_cnt, req->dir);
return ret;
}
- inv_wr = (struct ib_send_wr) {
- .opcode = IB_WR_LOCAL_INV,
- .wr_cqe = &req->inv_cqe,
- .send_flags = IB_SEND_SIGNALED,
- .ex.invalidate_rkey = req->mr->rkey,
- };
- req->inv_cqe.done = rtrs_clt_inv_rkey_done;
rwr = (struct ib_reg_wr) {
.wr.opcode = IB_WR_REG_MR,
.wr.wr_cqe = &fast_reg_cqe,
@@ -1146,7 +1137,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
};
wr = &rwr.wr;
fr_en = true;
- refcount_inc(&req->ref);
+ req->mr->need_inval = true;
}
/*
* Update stats now, after request is successfully sent it is not
@@ -1156,7 +1147,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count,
req->usr_len + sizeof(*msg),
- imm, wr, &inv_wr);
+ imm, wr, NULL);
if (ret) {
rtrs_err_rl(s,
"Write request failed: error=%d path=%s [%s:%u]\n",
@@ -1164,6 +1155,10 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
clt_path->hca_port);
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_dec(&clt_path->stats->inflight);
+ if (req->mr->need_inval) {
+ req->mr->need_inval = false;
+ refcount_dec(&req->ref);
+ }
if (req->sg_cnt)
ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
@@ -1213,7 +1208,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
ret = rtrs_map_sg_fr(req, count);
if (ret < 0) {
rtrs_err_rl(s,
- "Read request failed, failed to map fast reg. data, err: %d\n",
+ "Read request failed, failed to map fast reg. data, err: %d\n",
ret);
ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt,
req->dir);
@@ -1237,7 +1232,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
msg->desc[0].len = cpu_to_le32(req->mr->length);
/* Further invalidation is required */
- req->need_inv = !!RTRS_MSG_NEED_INVAL_F;
+ req->mr->need_inval = !!RTRS_MSG_NEED_INVAL_F;
} else {
msg->sg_cnt = 0;
@@ -1270,7 +1265,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
clt_path->hca_port);
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_dec(&clt_path->stats->inflight);
- req->need_inv = false;
+ req->mr->need_inval = false;
if (req->sg_cnt)
ib_dma_unmap_sg(dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
@@ -1494,7 +1489,9 @@ static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path,
static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
{
struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ rtrs_err(con->c.path, "HB err handler for path=%s\n", kobject_name(&clt_path->kobj));
rtrs_rdma_error_recovery(con);
}
@@ -2346,6 +2343,12 @@ static int init_conns(struct rtrs_clt_path *clt_path)
if (err)
goto destroy;
}
+
+ /*
+ * Set the cid to con_num - 1, since if we fail later, we want to stay in bounds.
+ */
+ cid = clt_path->s.con_num - 1;
+
err = alloc_path_reqs(clt_path);
if (err)
goto destroy;
@@ -3140,8 +3143,20 @@ close_path:
return err;
}
+void rtrs_clt_ib_event_handler(struct ib_event_handler *handler,
+ struct ib_event *ibevent)
+{
+ pr_info("Handling event: %s (%d).\n", ib_event_msg(ibevent->event),
+ ibevent->event);
+}
+
+
static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
{
+ INIT_IB_EVENT_HANDLER(&dev->event_handler, dev->ib_dev,
+ rtrs_clt_ib_event_handler);
+ ib_register_event_handler(&dev->event_handler);
+
if (!(dev->ib_dev->attrs.device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS)) {
pr_err("Memory registrations not supported.\n");
@@ -3151,8 +3166,15 @@ static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
return 0;
}
+static void rtrs_clt_ib_dev_deinit(struct rtrs_ib_dev *dev)
+{
+ ib_unregister_event_handler(&dev->event_handler);
+}
+
+
static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
- .init = rtrs_clt_ib_dev_init
+ .init = rtrs_clt_ib_dev_init,
+ .deinit = rtrs_clt_ib_dev_deinit
};
static int __init rtrs_client_init(void)
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
index f848c0392d98..0f57759b3080 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -115,7 +115,6 @@ struct rtrs_clt_io_req {
struct completion inv_comp;
int inv_errno;
bool need_inv_comp;
- bool need_inv;
refcount_t ref;
};
@@ -213,6 +212,8 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *path,
void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value);
int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt);
void free_path(struct rtrs_clt_path *clt_path);
+void rtrs_clt_ib_event_handler(struct ib_event_handler *handler,
+ struct ib_event *ibevent);
/* rtrs-clt-stats.c */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index ab25619261d2..ef29bd483b5a 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -69,6 +69,7 @@ struct rtrs_ib_dev;
struct rtrs_rdma_dev_pd_ops {
int (*init)(struct rtrs_ib_dev *dev);
+ void (*deinit)(struct rtrs_ib_dev *dev);
};
struct rtrs_rdma_dev_pd {
@@ -84,6 +85,7 @@ struct rtrs_ib_dev {
struct kref ref;
struct list_head entry;
struct rtrs_rdma_dev_pd *pool;
+ struct ib_event_handler event_handler;
};
struct rtrs_con {
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 1d33efb8fb03..e83d95647852 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -26,7 +26,10 @@ MODULE_LICENSE("GPL");
#define DEFAULT_SESS_QUEUE_DEPTH 512
#define MAX_HDR_SIZE PAGE_SIZE
-static struct rtrs_rdma_dev_pd dev_pd;
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
+static struct rtrs_rdma_dev_pd dev_pd = {
+ .ops = &dev_pd_ops
+};
const struct class rtrs_dev_class = {
.name = "rtrs-server",
};
@@ -672,6 +675,10 @@ err:
static void rtrs_srv_hb_err_handler(struct rtrs_con *c)
{
+ struct rtrs_srv_con *con = container_of(c, typeof(*con), c);
+ struct rtrs_srv_path *srv_path = to_srv_path(con->c.path);
+
+ rtrs_err(con->c.path, "HB err handler for path=%s\n", kobject_name(&srv_path->kobj));
close_path(to_srv_path(c->path));
}
@@ -931,12 +938,11 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
if (err)
goto close;
-out:
rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
return;
close:
+ rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
close_path(srv_path);
- goto out;
}
static int post_recv_info_req(struct rtrs_srv_con *con)
@@ -987,6 +993,16 @@ static int post_recv_path(struct rtrs_srv_path *srv_path)
q_size = SERVICE_CON_QUEUE_DEPTH;
else
q_size = srv->queue_depth;
+ if (srv_path->state != RTRS_SRV_CONNECTING) {
+ rtrs_err(s, "Path state invalid. state %s\n",
+ rtrs_srv_state_str(srv_path->state));
+ return -EIO;
+ }
+
+ if (!srv_path->s.con[cid]) {
+ rtrs_err(s, "Conn not set for %d\n", cid);
+ return -EIO;
+ }
err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size);
if (err) {
@@ -1229,6 +1245,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
*/
if (WARN_ON(wc->wr_cqe != &io_comp_cqe))
return;
+ srv_path->s.hb_missed_cnt = 0;
err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
if (err) {
rtrs_err(s, "rtrs_post_recv(), err: %d\n", err);
@@ -2255,6 +2272,34 @@ static int check_module_params(void)
return 0;
}
+void rtrs_srv_ib_event_handler(struct ib_event_handler *handler,
+ struct ib_event *ibevent)
+{
+ pr_info("Handling event: %s (%d).\n", ib_event_msg(ibevent->event),
+ ibevent->event);
+}
+
+static int rtrs_srv_ib_dev_init(struct rtrs_ib_dev *dev)
+{
+ INIT_IB_EVENT_HANDLER(&dev->event_handler, dev->ib_dev,
+ rtrs_srv_ib_event_handler);
+ ib_register_event_handler(&dev->event_handler);
+
+ return 0;
+}
+
+static void rtrs_srv_ib_dev_deinit(struct rtrs_ib_dev *dev)
+{
+ ib_unregister_event_handler(&dev->event_handler);
+}
+
+
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
+ .init = rtrs_srv_ib_dev_init,
+ .deinit = rtrs_srv_ib_dev_deinit
+};
+
+
static int __init rtrs_server_init(void)
{
int err;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index 5e325b82ff33..014f85681f37 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -132,6 +132,8 @@ struct rtrs_srv_ib_ctx {
extern const struct class rtrs_dev_class;
void close_path(struct rtrs_srv_path *srv_path);
+void rtrs_srv_ib_event_handler(struct ib_event_handler *handler,
+ struct ib_event *ibevent);
static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
size_t size, int d)
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 9632afbd727b..5dfb4644446b 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -68,6 +68,8 @@ MODULE_LICENSE("Dual BSD/GPL");
static u64 srpt_service_guid;
static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */
static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */
+static DEFINE_MUTEX(srpt_mc_mutex); /* Protects srpt_memory_caches. */
+static DEFINE_XARRAY(srpt_memory_caches); /* See also srpt_memory_cache_entry */
static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
module_param(srp_max_req_size, int, 0444);
@@ -105,6 +107,63 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
+/* Type of the entries in srpt_memory_caches. */
+struct srpt_memory_cache_entry {
+ refcount_t ref;
+ struct kmem_cache *c;
+};
+
+static struct kmem_cache *srpt_cache_get(unsigned int object_size)
+{
+ struct srpt_memory_cache_entry *e;
+ char name[32];
+ void *res;
+
+ guard(mutex)(&srpt_mc_mutex);
+ e = xa_load(&srpt_memory_caches, object_size);
+ if (e) {
+ refcount_inc(&e->ref);
+ return e->c;
+ }
+ snprintf(name, sizeof(name), "srpt-%u", object_size);
+ e = kmalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return NULL;
+ refcount_set(&e->ref, 1);
+ e->c = kmem_cache_create(name, object_size, /*align=*/512, 0, NULL);
+ if (!e->c)
+ goto free_entry;
+ res = xa_store(&srpt_memory_caches, object_size, e, GFP_KERNEL);
+ if (xa_is_err(res))
+ goto destroy_cache;
+ return e->c;
+
+destroy_cache:
+ kmem_cache_destroy(e->c);
+
+free_entry:
+ kfree(e);
+ return NULL;
+}
+
+static void srpt_cache_put(struct kmem_cache *c)
+{
+ struct srpt_memory_cache_entry *e = NULL;
+ unsigned long object_size;
+
+ guard(mutex)(&srpt_mc_mutex);
+ xa_for_each(&srpt_memory_caches, object_size, e)
+ if (e->c == c)
+ break;
+ if (WARN_ON_ONCE(!e))
+ return;
+ if (!refcount_dec_and_test(&e->ref))
+ return;
+ WARN_ON_ONCE(xa_erase(&srpt_memory_caches, object_size) != e);
+ kmem_cache_destroy(e->c);
+ kfree(e);
+}
+
/*
* The only allowed channel state changes are those that change the channel
* state into a state with a higher numerical value. Hence the new > prev test.
@@ -2119,13 +2178,13 @@ static void srpt_release_channel_work(struct work_struct *w)
ch->sport->sdev, ch->rq_size,
ch->rsp_buf_cache, DMA_TO_DEVICE);
- kmem_cache_destroy(ch->rsp_buf_cache);
+ srpt_cache_put(ch->rsp_buf_cache);
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
sdev, ch->rq_size,
ch->req_buf_cache, DMA_FROM_DEVICE);
- kmem_cache_destroy(ch->req_buf_cache);
+ srpt_cache_put(ch->req_buf_cache);
kref_put(&ch->kref, srpt_free_ch);
}
@@ -2245,8 +2304,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
INIT_LIST_HEAD(&ch->cmd_wait_list);
ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
- ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size,
- 512, 0, NULL);
+ ch->rsp_buf_cache = srpt_cache_get(ch->max_rsp_size);
if (!ch->rsp_buf_cache)
goto free_ch;
@@ -2280,8 +2338,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
alignment_offset = round_up(imm_data_offset, 512) -
imm_data_offset;
req_sz = alignment_offset + imm_data_offset + srp_max_req_size;
- ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz,
- 512, 0, NULL);
+ ch->req_buf_cache = srpt_cache_get(req_sz);
if (!ch->req_buf_cache)
goto free_rsp_ring;
@@ -2478,7 +2535,7 @@ free_recv_ring:
ch->req_buf_cache, DMA_FROM_DEVICE);
free_recv_cache:
- kmem_cache_destroy(ch->req_buf_cache);
+ srpt_cache_put(ch->req_buf_cache);
free_rsp_ring:
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
@@ -2486,7 +2543,7 @@ free_rsp_ring:
ch->rsp_buf_cache, DMA_TO_DEVICE);
free_rsp_cache:
- kmem_cache_destroy(ch->rsp_buf_cache);
+ srpt_cache_put(ch->rsp_buf_cache);
free_ch:
if (rdma_cm_id)
@@ -3055,7 +3112,7 @@ static void srpt_free_srq(struct srpt_device *sdev)
srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
sdev->srq_size, sdev->req_buf_cache,
DMA_FROM_DEVICE);
- kmem_cache_destroy(sdev->req_buf_cache);
+ srpt_cache_put(sdev->req_buf_cache);
sdev->srq = NULL;
}
@@ -3082,8 +3139,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
- sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf",
- srp_max_req_size, 0, 0, NULL);
+ sdev->req_buf_cache = srpt_cache_get(srp_max_req_size);
if (!sdev->req_buf_cache)
goto free_srq;
@@ -3105,7 +3161,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
return 0;
free_cache:
- kmem_cache_destroy(sdev->req_buf_cache);
+ srpt_cache_put(sdev->req_buf_cache);
free_srq:
ib_destroy_srq(srq);