diff options
Diffstat (limited to 'drivers/infiniband')
47 files changed, 3014 insertions, 194 deletions
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index a0f29c1d03bc..c85b56c28099 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -59,5 +59,6 @@ source "drivers/infiniband/ulp/srp/Kconfig" source "drivers/infiniband/ulp/srpt/Kconfig" source "drivers/infiniband/ulp/iser/Kconfig" +source "drivers/infiniband/ulp/isert/Kconfig" endif # INFINIBAND diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile index bf846a14b9d3..b126fefe0b1c 100644 --- a/drivers/infiniband/Makefile +++ b/drivers/infiniband/Makefile @@ -13,3 +13,4 @@ obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/ obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ +obj-$(CONFIG_INFINIBAND_ISERT) += ulp/isert/ diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 0bb99bb38809..c47c2034ca71 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -878,6 +878,8 @@ static void cm_work_handler(struct work_struct *_work) } return; } + if (empty) + return; spin_lock_irqsave(&cm_id_priv->lock, flags); } spin_unlock_irqrestore(&cm_id_priv->lock, flags); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index a8fdd3381405..22192deb8828 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -348,7 +348,8 @@ static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) struct ib_qp *qp = context; list_for_each_entry(event->element.qp, &qp->open_list, open_list) - event->element.qp->event_handler(event, event->element.qp->qp_context); + if (event->element.qp->event_handler) + event->element.qp->event_handler(event, event->element.qp->qp_context); } static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h index ba7a1208ff9e..d619d735838b 100644 --- a/drivers/infiniband/hw/amso1100/c2.h +++ b/drivers/infiniband/hw/amso1100/c2.h @@ -265,7 +265,6 @@ struct c2_pd_table { struct c2_qp_table { struct idr idr; spinlock_t lock; - int last; }; struct c2_element { diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c index 0ab826b280b2..86708dee58b1 100644 --- a/drivers/infiniband/hw/amso1100/c2_qp.c +++ b/drivers/infiniband/hw/amso1100/c2_qp.c @@ -385,8 +385,7 @@ static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp) idr_preload(GFP_KERNEL); spin_lock_irq(&c2dev->qp_table.lock); - ret = idr_alloc(&c2dev->qp_table.idr, qp, c2dev->qp_table.last++, 0, - GFP_NOWAIT); + ret = idr_alloc_cyclic(&c2dev->qp_table.idr, qp, 0, 0, GFP_NOWAIT); if (ret >= 0) qp->qpn = ret; diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c index 31f9201b2980..c40088ecf9f3 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_resource.c +++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c @@ -62,13 +62,13 @@ static int __cxio_init_resource_fifo(struct kfifo *fifo, kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32)); if (random) { j = 0; - random_bytes = random32(); + random_bytes = prandom_u32(); for (i = 0; i < RANDOM_SIZE; i++) rarray[i] = i + skip_low; for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) { if (j >= RANDOM_SIZE) { j = 0; - random_bytes = random32(); + random_bytes = prandom_u32(); } idx = (random_bytes >> (j * 2)) & 0xF; kfifo_in(fifo, diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 9c12da0cbd32..e87f2201b220 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -559,7 +559,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr, __be64 *page_list = NULL; int shift = 0; u64 total_size; - int npages; + int npages = 0; int ret; PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd); diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 565bfb161c1a..65c30ea8c1a1 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -511,12 +511,16 @@ static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst, static int send_connect(struct c4iw_ep *ep) { struct cpl_act_open_req *req; + struct cpl_t5_act_open_req *t5_req; struct sk_buff *skb; u64 opt0; u32 opt2; unsigned int mtu_idx; int wscale; - int wrlen = roundup(sizeof *req, 16); + int size = is_t4(ep->com.dev->rdev.lldi.adapter_type) ? + sizeof(struct cpl_act_open_req) : + sizeof(struct cpl_t5_act_open_req); + int wrlen = roundup(size, 16); PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); @@ -552,17 +556,36 @@ static int send_connect(struct c4iw_ep *ep) opt2 |= WND_SCALE_EN(1); t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); - req = (struct cpl_act_open_req *) skb_put(skb, wrlen); - INIT_TP_WR(req, 0); - OPCODE_TID(req) = cpu_to_be32( - MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid))); - req->local_port = ep->com.local_addr.sin_port; - req->peer_port = ep->com.remote_addr.sin_port; - req->local_ip = ep->com.local_addr.sin_addr.s_addr; - req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; - req->opt0 = cpu_to_be64(opt0); - req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t)); - req->opt2 = cpu_to_be32(opt2); + if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { + req = (struct cpl_act_open_req *) skb_put(skb, wrlen); + INIT_TP_WR(req, 0); + OPCODE_TID(req) = cpu_to_be32( + MK_OPCODE_TID(CPL_ACT_OPEN_REQ, + ((ep->rss_qid << 14) | ep->atid))); + req->local_port = ep->com.local_addr.sin_port; + req->peer_port = ep->com.remote_addr.sin_port; + req->local_ip = ep->com.local_addr.sin_addr.s_addr; + req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; + req->opt0 = cpu_to_be64(opt0); + req->params = cpu_to_be32(select_ntuple(ep->com.dev, + ep->dst, ep->l2t)); + req->opt2 = cpu_to_be32(opt2); + } else { + t5_req = (struct cpl_t5_act_open_req *) skb_put(skb, wrlen); + INIT_TP_WR(t5_req, 0); + OPCODE_TID(t5_req) = cpu_to_be32( + MK_OPCODE_TID(CPL_ACT_OPEN_REQ, + ((ep->rss_qid << 14) | ep->atid))); + t5_req->local_port = ep->com.local_addr.sin_port; + t5_req->peer_port = ep->com.remote_addr.sin_port; + t5_req->local_ip = ep->com.local_addr.sin_addr.s_addr; + t5_req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; + t5_req->opt0 = cpu_to_be64(opt0); + t5_req->params = cpu_to_be64(V_FILTER_TUPLE( + select_ntuple(ep->com.dev, ep->dst, ep->l2t))); + t5_req->opt2 = cpu_to_be32(opt2); + } + set_bit(ACT_OPEN_REQ, &ep->com.history); return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } @@ -1575,6 +1598,12 @@ static int c4iw_reconnect(struct c4iw_ep *ep) neigh = dst_neigh_lookup(ep->dst, &ep->com.cm_id->remote_addr.sin_addr.s_addr); + if (!neigh) { + pr_err("%s - cannot alloc neigh.\n", __func__); + err = -ENOMEM; + goto fail4; + } + /* get a l2t entry */ if (neigh->dev->flags & IFF_LOOPBACK) { PDBG("%s LOOPBACK\n", __func__); @@ -1670,9 +1699,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) case CPL_ERR_CONN_TIMEDOUT: break; case CPL_ERR_TCAM_FULL: + dev->rdev.stats.tcam_full++; if (dev->rdev.lldi.enable_fw_ofld_conn) { mutex_lock(&dev->rdev.stats.lock); - dev->rdev.stats.tcam_full++; mutex_unlock(&dev->rdev.stats.lock); send_fw_act_open_req(ep, GET_TID_TID(GET_AOPEN_ATID( @@ -2869,12 +2898,14 @@ static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) { u32 l2info; - u16 vlantag, len, hdr_len; + u16 vlantag, len, hdr_len, eth_hdr_len; u8 intf; struct cpl_rx_pkt *cpl = cplhdr(skb); struct cpl_pass_accept_req *req; struct tcp_options_received tmp_opt; + struct c4iw_dev *dev; + dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); /* Store values from cpl_rx_pkt in temporary location. */ vlantag = (__force u16) cpl->vlan; len = (__force u16) cpl->len; @@ -2890,7 +2921,7 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) */ memset(&tmp_opt, 0, sizeof(tmp_opt)); tcp_clear_options(&tmp_opt); - tcp_parse_options(skb, &tmp_opt, NULL, 0, NULL); + tcp_parse_options(skb, &tmp_opt, 0, NULL); req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); memset(req, 0, sizeof(*req)); @@ -2898,14 +2929,16 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) V_SYN_MAC_IDX(G_RX_MACIDX( (__force int) htonl(l2info))) | F_SYN_XACT_MATCH); + eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? + G_RX_ETHHDR_LEN((__force int) htonl(l2info)) : + G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info)); req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN( (__force int) htonl(l2info))) | V_TCP_HDR_LEN(G_RX_TCPHDR_LEN( (__force int) htons(hdr_len))) | V_IP_HDR_LEN(G_RX_IPHDR_LEN( (__force int) htons(hdr_len))) | - V_ETH_HDR_LEN(G_RX_ETHHDR_LEN( - (__force int) htonl(l2info)))); + V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len))); req->vlan = (__force __be16) vlantag; req->len = (__force __be16) len; req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | @@ -2993,7 +3026,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) u16 window; struct port_info *pi; struct net_device *pdev; - u16 rss_qid; + u16 rss_qid, eth_hdr_len; int step; u32 tx_chan; struct neighbour *neigh; @@ -3022,7 +3055,10 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) goto reject; } - if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) { + eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? + G_RX_ETHHDR_LEN(htonl(cpl->l2info)) : + G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info)); + if (eth_hdr_len == ETH_HLEN) { eh = (struct ethhdr *)(req + 1); iph = (struct iphdr *)(eh + 1); } else { @@ -3053,6 +3089,12 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) dst = &rt->dst; neigh = dst_neigh_lookup_skb(dst, skb); + if (!neigh) { + pr_err("%s - failed to allocate neigh!\n", + __func__); + goto free_dst; + } + if (neigh->dev->flags & IFF_LOOPBACK) { pdev = ip_dev_find(&init_net, iph->daddr); e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 80069ad595c1..ae656016e1ae 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -41,10 +41,20 @@ #define DRV_VERSION "0.1" MODULE_AUTHOR("Steve Wise"); -MODULE_DESCRIPTION("Chelsio T4 RDMA Driver"); +MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); +static int allow_db_fc_on_t5; +module_param(allow_db_fc_on_t5, int, 0644); +MODULE_PARM_DESC(allow_db_fc_on_t5, + "Allow DB Flow Control on T5 (default = 0)"); + +static int allow_db_coalescing_on_t5; +module_param(allow_db_coalescing_on_t5, int, 0644); +MODULE_PARM_DESC(allow_db_coalescing_on_t5, + "Allow DB Coalescing on T5 (default = 0)"); + struct uld_ctx { struct list_head entry; struct cxgb4_lld_info lldi; @@ -614,7 +624,7 @@ static int rdma_supported(const struct cxgb4_lld_info *infop) { return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 && infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && - infop->vr->cq.size > 0 && infop->vr->ocq.size > 0; + infop->vr->cq.size > 0; } static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) @@ -627,6 +637,22 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) pci_name(infop->pdev)); return ERR_PTR(-ENOSYS); } + if (!ocqp_supported(infop)) + pr_info("%s: On-Chip Queues not supported on this device.\n", + pci_name(infop->pdev)); + + if (!is_t4(infop->adapter_type)) { + if (!allow_db_fc_on_t5) { + db_fc_threshold = 100000; + pr_info("DB Flow Control Disabled.\n"); + } + + if (!allow_db_coalescing_on_t5) { + db_coalescing_threshold = -1; + pr_info("DB Coalescing Disabled.\n"); + } + } + devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); if (!devp) { printk(KERN_ERR MOD "Cannot allocate ib device\n"); @@ -678,8 +704,8 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) int i; if (!vers_printed++) - printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", - DRV_VERSION); + pr_info("Chelsio T4/T5 RDMA Driver - version %s\n", + DRV_VERSION); ctx = kzalloc(sizeof *ctx, GFP_KERNEL); if (!ctx) { diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c index f95e5df30db2..0161ae6ad629 100644 --- a/drivers/infiniband/hw/cxgb4/id_table.c +++ b/drivers/infiniband/hw/cxgb4/id_table.c @@ -54,7 +54,7 @@ u32 c4iw_id_alloc(struct c4iw_id_table *alloc) if (obj < alloc->max) { if (alloc->flags & C4IW_ID_TABLE_F_RANDOM) - alloc->last += random32() % RANDOM_SKIP; + alloc->last += prandom_u32() % RANDOM_SKIP; else alloc->last = obj + 1; if (alloc->last >= alloc->max) @@ -88,7 +88,7 @@ int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, alloc->start = start; alloc->flags = flags; if (flags & C4IW_ID_TABLE_F_RANDOM) - alloc->last = random32() % RANDOM_SKIP; + alloc->last = prandom_u32() % RANDOM_SKIP; else alloc->last = 0; alloc->max = num; diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 7eec5e13fa8c..485183ad34cd 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -162,7 +162,7 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev) return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5)); } -#define C4IW_WR_TO (10*HZ) +#define C4IW_WR_TO (30*HZ) struct c4iw_wr_wait { struct completion completion; @@ -369,7 +369,6 @@ struct c4iw_fr_page_list { DEFINE_DMA_UNMAP_ADDR(mapping); dma_addr_t dma_addr; struct c4iw_dev *dev; - int size; }; static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list( @@ -817,6 +816,15 @@ static inline int compute_wscale(int win) return wscale; } +static inline int ocqp_supported(const struct cxgb4_lld_info *infop) +{ +#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64) + return infop->vr->ocq.size > 0; +#else + return 0; +#endif +} + u32 c4iw_id_alloc(struct c4iw_id_table *alloc); void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj); int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, @@ -930,6 +938,8 @@ extern struct cxgb4_client t4c_client; extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS]; extern int c4iw_max_read_depth; extern int db_fc_threshold; +extern int db_coalescing_threshold; +extern int use_dsgl; #endif diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 903a92d6f91d..4cb8eb24497c 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -30,16 +30,76 @@ * SOFTWARE. */ +#include <linux/module.h> +#include <linux/moduleparam.h> #include <rdma/ib_umem.h> #include <linux/atomic.h> #include "iw_cxgb4.h" +int use_dsgl = 1; +module_param(use_dsgl, int, 0644); +MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1)"); + #define T4_ULPTX_MIN_IO 32 #define C4IW_MAX_INLINE_SIZE 96 +#define T4_ULPTX_MAX_DMA 1024 +#define C4IW_INLINE_THRESHOLD 128 -static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, - void *data) +static int inline_threshold = C4IW_INLINE_THRESHOLD; +module_param(inline_threshold, int, 0644); +MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)"); + +static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, + u32 len, dma_addr_t data, int wait) +{ + struct sk_buff *skb; + struct ulp_mem_io *req; + struct ulptx_sgl *sgl; + u8 wr_len; + int ret = 0; + struct c4iw_wr_wait wr_wait; + + addr &= 0x7FFFFFF; + + if (wait) + c4iw_init_wr_wait(&wr_wait); + wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16); + + skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); + if (!skb) + return -ENOMEM; + set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); + + req = (struct ulp_mem_io *)__skb_put(skb, wr_len); + memset(req, 0, wr_len); + INIT_ULPTX_WR(req, wr_len, 0, 0); + req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) | + (wait ? FW_WR_COMPL(1) : 0)); + req->wr.wr_lo = wait ? (__force __be64)&wr_wait : 0; + req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); + req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE)); + req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1)); + req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5)); + req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); + req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr)); + + sgl = (struct ulptx_sgl *)(req + 1); + sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) | + ULPTX_NSGE(1)); + sgl->len0 = cpu_to_be32(len); + sgl->addr0 = cpu_to_be64(data); + + ret = c4iw_ofld_send(rdev, skb); + if (ret) + return ret; + if (wait) + ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); + return ret; +} + +static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, + void *data) { struct sk_buff *skb; struct ulp_mem_io *req; @@ -47,6 +107,12 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, u8 wr_len, *to_dp, *from_dp; int copy_len, num_wqe, i, ret = 0; struct c4iw_wr_wait wr_wait; + __be32 cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE)); + + if (is_t4(rdev->lldi.adapter_type)) + cmd |= cpu_to_be32(ULP_MEMIO_ORDER(1)); + else + cmd |= cpu_to_be32(V_T5_ULP_MEMIO_IMM(1)); addr &= 0x7FFFFFF; PDBG("%s addr 0x%x len %u\n", __func__, addr, len); @@ -77,7 +143,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, req->wr.wr_mid = cpu_to_be32( FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); - req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1<<23)); + req->cmd = cmd; req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN( DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO))); req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), @@ -107,6 +173,67 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, return ret; } +int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) +{ + u32 remain = len; + u32 dmalen; + int ret = 0; + dma_addr_t daddr; + dma_addr_t save; + + daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr)) + return -1; + save = daddr; + + while (remain > inline_threshold) { + if (remain < T4_ULPTX_MAX_DMA) { + if (remain & ~T4_ULPTX_MIN_IO) + dmalen = remain & ~(T4_ULPTX_MIN_IO-1); + else + dmalen = remain; + } else + dmalen = T4_ULPTX_MAX_DMA; + remain -= dmalen; + ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr, + !remain); + if (ret) + goto out; + addr += dmalen >> 5; + data += dmalen; + daddr += dmalen; + } + if (remain) + ret = _c4iw_write_mem_inline(rdev, addr, remain, data); +out: + dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE); + return ret; +} + +/* + * write len bytes of data into addr (32B aligned address) + * If data is NULL, clear len byte of memory to zero. + */ +static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, + void *data) +{ + if (is_t5(rdev->lldi.adapter_type) && use_dsgl) { + if (len > inline_threshold) { + if (_c4iw_write_mem_dma(rdev, addr, len, data)) { + printk_ratelimited(KERN_WARNING + "%s: dma map" + " failure (non fatal)\n", + pci_name(rdev->lldi.pdev)); + return _c4iw_write_mem_inline(rdev, addr, len, + data); + } else + return 0; + } else + return _c4iw_write_mem_inline(rdev, addr, len, data); + } else + return _c4iw_write_mem_inline(rdev, addr, len, data); +} + /* * Build and write a TPT entry. * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size, @@ -760,19 +887,23 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device, struct c4iw_fr_page_list *c4pl; struct c4iw_dev *dev = to_c4iw_dev(device); dma_addr_t dma_addr; - int size = sizeof *c4pl + page_list_len * sizeof(u64); + int pll_len = roundup(page_list_len * sizeof(u64), 32); - c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size, - &dma_addr, GFP_KERNEL); + c4pl = kmalloc(sizeof(*c4pl), GFP_KERNEL); if (!c4pl) return ERR_PTR(-ENOMEM); + c4pl->ibpl.page_list = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, + pll_len, &dma_addr, + GFP_KERNEL); + if (!c4pl->ibpl.page_list) { + kfree(c4pl); + return ERR_PTR(-ENOMEM); + } dma_unmap_addr_set(c4pl, mapping, dma_addr); c4pl->dma_addr = dma_addr; c4pl->dev = dev; - c4pl->size = size; - c4pl->ibpl.page_list = (u64 *)(c4pl + 1); - c4pl->ibpl.max_page_list_len = page_list_len; + c4pl->ibpl.max_page_list_len = pll_len; return &c4pl->ibpl; } @@ -781,8 +912,10 @@ void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl) { struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl); - dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size, - c4pl, dma_unmap_addr(c4pl, mapping)); + dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, + c4pl->ibpl.max_page_list_len, + c4pl->ibpl.page_list, dma_unmap_addr(c4pl, mapping)); + kfree(c4pl); } int c4iw_dereg_mr(struct ib_mr *ib_mr) diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index e084fdc6da7f..7e94c9a656a1 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -162,8 +162,14 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) */ if (addr >= rdev->oc_mw_pa) vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot); - else - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + else { + if (is_t5(rdev->lldi.adapter_type)) + vma->vm_page_prot = + t4_pgprot_wc(vma->vm_page_prot); + else + vma->vm_page_prot = + pgprot_noncached(vma->vm_page_prot); + } ret = io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, len, vma->vm_page_prot); @@ -263,7 +269,7 @@ static int c4iw_query_device(struct ib_device *ibdev, dev = to_c4iw_dev(ibdev); memset(props, 0, sizeof *props); memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); - props->hw_ver = dev->rdev.lldi.adapter_type; + props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type); props->fw_ver = dev->rdev.lldi.fw_vers; props->device_cap_flags = dev->device_cap_flags; props->page_size_cap = T4_PAGESIZE_MASK; @@ -346,7 +352,8 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr, struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, ibdev.dev); PDBG("%s dev 0x%p\n", __func__, dev); - return sprintf(buf, "%d\n", c4iw_dev->rdev.lldi.adapter_type); + return sprintf(buf, "%d\n", + CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type)); } static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 17ba4f8bc12d..232040447e8a 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -42,10 +42,21 @@ static int ocqp_support = 1; module_param(ocqp_support, int, 0644); MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)"); -int db_fc_threshold = 2000; +int db_fc_threshold = 1000; module_param(db_fc_threshold, int, 0644); -MODULE_PARM_DESC(db_fc_threshold, "QP count/threshold that triggers automatic " - "db flow control mode (default = 2000)"); +MODULE_PARM_DESC(db_fc_threshold, + "QP count/threshold that triggers" + " automatic db flow control mode (default = 1000)"); + +int db_coalescing_threshold; +module_param(db_coalescing_threshold, int, 0644); +MODULE_PARM_DESC(db_coalescing_threshold, + "QP count/threshold that triggers" + " disabling db coalescing (default = 0)"); + +static int max_fr_immd = T4_MAX_FR_IMMD; +module_param(max_fr_immd, int, 0644); +MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate"); static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) { @@ -76,7 +87,7 @@ static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) { - if (!ocqp_support || !t4_ocqp_supported()) + if (!ocqp_support || !ocqp_supported(&rdev->lldi)) return -ENOSYS; sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize); if (!sq->dma_addr) @@ -100,6 +111,16 @@ static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) return 0; } +static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user) +{ + int ret = -ENOSYS; + if (user) + ret = alloc_oc_sq(rdev, sq); + if (ret) + ret = alloc_host_sq(rdev, sq); + return ret; +} + static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct c4iw_dev_ucontext *uctx) { @@ -129,7 +150,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, int wr_len; struct c4iw_wr_wait wr_wait; struct sk_buff *skb; - int ret; + int ret = 0; int eqsize; wq->sq.qid = c4iw_get_qpid(rdev, uctx); @@ -168,26 +189,19 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, goto free_sw_rq; } - if (user) { - ret = alloc_oc_sq(rdev, &wq->sq); - if (ret) - goto free_hwaddr; - - ret = alloc_host_sq(rdev, &wq->sq); - if (ret) - goto free_sq; - } else - ret = alloc_host_sq(rdev, &wq->sq); - if (ret) - goto free_hwaddr; + ret = alloc_sq(rdev, &wq->sq, user); + if (ret) + goto free_hwaddr; memset(wq->sq.queue, 0, wq->sq.memsize); dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), wq->rq.memsize, &(wq->rq.dma_addr), GFP_KERNEL); - if (!wq->rq.queue) + if (!wq->rq.queue) { + ret = -ENOMEM; goto free_sq; + } PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", __func__, wq->sq.queue, (unsigned long long)virt_to_phys(wq->sq.queue), @@ -532,7 +546,7 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, } static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, - struct ib_send_wr *wr, u8 *len16) + struct ib_send_wr *wr, u8 *len16, u8 t5dev) { struct fw_ri_immd *imdp; @@ -554,28 +568,51 @@ static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start & 0xffffffff); - WARN_ON(pbllen > T4_MAX_FR_IMMD); - imdp = (struct fw_ri_immd *)(&wqe->fr + 1); - imdp->op = FW_RI_DATA_IMMD; - imdp->r1 = 0; - imdp->r2 = 0; - imdp->immdlen = cpu_to_be32(pbllen); - p = (__be64 *)(imdp + 1); - rem = pbllen; - for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { - *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]); - rem -= sizeof *p; - if (++p == (__be64 *)&sq->queue[sq->size]) - p = (__be64 *)sq->queue; - } - BUG_ON(rem < 0); - while (rem) { - *p = 0; - rem -= sizeof *p; - if (++p == (__be64 *)&sq->queue[sq->size]) - p = (__be64 *)sq->queue; + + if (t5dev && use_dsgl && (pbllen > max_fr_immd)) { + struct c4iw_fr_page_list *c4pl = + to_c4iw_fr_page_list(wr->wr.fast_reg.page_list); + struct fw_ri_dsgl *sglp; + + for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { + wr->wr.fast_reg.page_list->page_list[i] = (__force u64) + cpu_to_be64((u64) + wr->wr.fast_reg.page_list->page_list[i]); + } + + sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1); + sglp->op = FW_RI_DATA_DSGL; + sglp->r1 = 0; + sglp->nsge = cpu_to_be16(1); + sglp->addr0 = cpu_to_be64(c4pl->dma_addr); + sglp->len0 = cpu_to_be32(pbllen); + + *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16); + } else { + imdp = (struct fw_ri_immd *)(&wqe->fr + 1); + imdp->op = FW_RI_DATA_IMMD; + imdp->r1 = 0; + imdp->r2 = 0; + imdp->immdlen = cpu_to_be32(pbllen); + p = (__be64 *)(imdp + 1); + rem = pbllen; + for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { + *p = cpu_to_be64( + (u64)wr->wr.fast_reg.page_list->page_list[i]); + rem -= sizeof(*p); + if (++p == (__be64 *)&sq->queue[sq->size]) + p = (__be64 *)sq->queue; + } + BUG_ON(rem < 0); + while (rem) { + *p = 0; + rem -= sizeof(*p); + if (++p == (__be64 *)&sq->queue[sq->size]) + p = (__be64 *)sq->queue; + } + *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp) + + pbllen, 16); } - *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16); return 0; } @@ -676,7 +713,10 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, case IB_WR_FAST_REG_MR: fw_opcode = FW_RI_FR_NSMR_WR; swsqe->opcode = FW_RI_FAST_REGISTER; - err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16); + err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16, + is_t5( + qhp->rhp->rdev.lldi.adapter_type) ? + 1 : 0); break; case IB_WR_LOCAL_INV: if (wr->send_flags & IB_SEND_FENCE) @@ -1448,6 +1488,9 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) rhp->db_state = NORMAL; idr_for_each(&rhp->qpidr, enable_qp_db, NULL); } + if (db_coalescing_threshold >= 0) + if (rhp->qpcnt <= db_coalescing_threshold) + cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]); spin_unlock_irq(&rhp->lock); atomic_dec(&qhp->refcnt); wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); @@ -1559,11 +1602,15 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, spin_lock_irq(&rhp->lock); if (rhp->db_state != NORMAL) t4_disable_wq_db(&qhp->wq); - if (++rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) { + rhp->qpcnt++; + if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) { rhp->rdev.stats.db_state_transitions++; rhp->db_state = FLOW_CONTROL; idr_for_each(&rhp->qpidr, disable_qp_db, NULL); } + if (db_coalescing_threshold >= 0) + if (rhp->qpcnt > db_coalescing_threshold) + cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]); ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); spin_unlock_irq(&rhp->lock); if (ret) diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 16f26ab29302..ebcb03bd1b72 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -84,7 +84,7 @@ struct t4_status_page { sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ sizeof(struct fw_ri_immd)) & ~31UL) -#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64)) +#define T4_MAX_FR_DEPTH (1024 / sizeof(u64)) #define T4_RQ_NUM_SLOTS 2 #define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS) @@ -280,15 +280,6 @@ static inline pgprot_t t4_pgprot_wc(pgprot_t prot) #endif } -static inline int t4_ocqp_supported(void) -{ -#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64) - return 1; -#else - return 0; -#endif -} - enum { T4_SQ_ONCHIP = (1<<0), }; diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index aed8afee56da..6d7f453b4d05 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c @@ -40,6 +40,7 @@ #include <linux/slab.h> #include <linux/highmem.h> #include <linux/io.h> +#include <linux/aio.h> #include <linux/jiffies.h> #include <linux/cpu.h> #include <asm/pgtable.h> diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 439c35d4a669..44ea9390417c 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -620,7 +620,7 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data, goto bail; } - opcode = be32_to_cpu(ohdr->bth[0]) >> 24; + opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f; dev->opstats[opcode].n_bytes += tlen; dev->opstats[opcode].n_packets++; @@ -2187,7 +2187,8 @@ int ipath_register_ib_device(struct ipath_devdata *dd) if (ret) goto err_reg; - if (ipath_verbs_register_sysfs(dev)) + ret = ipath_verbs_register_sysfs(dev); + if (ret) goto err_class; enable_timer(dd); @@ -2327,15 +2328,15 @@ static int ipath_verbs_register_sysfs(struct ib_device *dev) int i; int ret; - for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) - if (device_create_file(&dev->dev, - ipath_class_attributes[i])) { - ret = 1; + for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) { + ret = device_create_file(&dev->dev, + ipath_class_attributes[i]); + if (ret) goto bail; - } - - ret = 0; - + } + return 0; bail: + for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) + device_remove_file(&dev->dev, ipath_class_attributes[i]); return ret; } diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c index add98d01476c..d1f5f1dd77b0 100644 --- a/drivers/infiniband/hw/mlx4/cm.c +++ b/drivers/infiniband/hw/mlx4/cm.c @@ -204,7 +204,6 @@ static struct id_map_entry * id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) { int ret; - static int next_id; struct id_map_entry *ent; struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; @@ -223,9 +222,8 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) idr_preload(GFP_KERNEL); spin_lock(&to_mdev(ibdev)->sriov.id_map_lock); - ret = idr_alloc(&sriov->pv_id_table, ent, next_id, 0, GFP_NOWAIT); + ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT); if (ret >= 0) { - next_id = max(ret + 1, 0); ent->pv_cm_id = (u32)ret; sl_id_map_add(ibdev, ent); list_add_tail(&ent->list, &sriov->cm_list); diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index ae67df35dd4d..d5e60f44ba5a 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -33,6 +33,7 @@ #include <linux/mlx4/cq.h> #include <linux/mlx4/qp.h> +#include <linux/mlx4/srq.h> #include <linux/slab.h> #include "mlx4_ib.h" @@ -228,7 +229,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector vector = dev->eq_table[vector % ibdev->num_comp_vectors]; err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, - cq->db.dma, &cq->mcq, vector, 0); + cq->db.dma, &cq->mcq, vector, 0, 0); if (err) goto err_dbmap; @@ -585,6 +586,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, struct mlx4_qp *mqp; struct mlx4_ib_wq *wq; struct mlx4_ib_srq *srq; + struct mlx4_srq *msrq = NULL; int is_send; int is_error; u32 g_mlpath_rqpn; @@ -653,6 +655,20 @@ repoll: wc->qp = &(*cur_qp)->ibqp; + if (wc->qp->qp_type == IB_QPT_XRC_TGT) { + u32 srq_num; + g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); + srq_num = g_mlpath_rqpn & 0xffffff; + /* SRQ is also in the radix tree */ + msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, + srq_num); + if (unlikely(!msrq)) { + pr_warn("CQ %06x with entry for unknown SRQN %06x\n", + cq->mcq.cqn, srq_num); + return -EINVAL; + } + } + if (is_send) { wq = &(*cur_qp)->sq; if (!(*cur_qp)->sq_signal_bits) { @@ -666,6 +682,11 @@ repoll: wqe_ctr = be16_to_cpu(cqe->wqe_index); wc->wr_id = srq->wrid[wqe_ctr]; mlx4_ib_free_srq_wqe(srq, wqe_ctr); + } else if (msrq) { + srq = to_mibsrq(msrq); + wqe_ctr = be16_to_cpu(cqe->wqe_index); + wc->wr_id = srq->wrid[wqe_ctr]; + mlx4_ib_free_srq_wqe(srq, wqe_ctr); } else { wq = &(*cur_qp)->rq; tail = wq->tail & (wq->wqe_cnt - 1); diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 934792c477bc..4d599cedbb0b 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -93,7 +93,7 @@ static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num, __be64 mlx4_ib_gen_node_guid(void) { #define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40)) - return cpu_to_be64(NODE_GUID_HI | random32()); + return cpu_to_be64(NODE_GUID_HI | prandom_u32()); } __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 35cced2a4da8..4f10af2905b5 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1292,6 +1292,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { context->sq_size_stride |= !!qp->sq_no_prefetch << 7; context->xrcd = cpu_to_be32((u32) qp->xrcdn); + if (ibqp->qp_type == IB_QPT_RAW_PACKET) + context->param3 |= cpu_to_be32(1 << 30); } if (qp->ibqp.uobject) @@ -1458,6 +1460,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, } } + if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) + context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | + MLX4_IB_LINK_TYPE_ETH; + if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) sqd_event = 1; diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 67647e264611..418004c93feb 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -2948,7 +2948,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n", nesvnic->netdev->name, vlan_tag); - __vlan_hwaccel_put_tag(rx_skb, vlan_tag); + __vlan_hwaccel_put_tag(rx_skb, htons(ETH_P_8021Q), vlan_tag); } if (nes_use_lro) lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL); diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 85cf4d1ac442..49eb5111d2cd 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -1599,7 +1599,7 @@ static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, /* Enable/Disable VLAN Stripping */ u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG); - if (features & NETIF_F_HW_VLAN_RX) + if (features & NETIF_F_HW_VLAN_CTAG_RX) u32temp &= 0xfdffffff; else u32temp |= 0x02000000; @@ -1614,10 +1614,10 @@ static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_feat * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ - if (features & NETIF_F_HW_VLAN_RX) - features |= NETIF_F_HW_VLAN_TX; + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; else - features &= ~NETIF_F_HW_VLAN_TX; + features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } @@ -1628,7 +1628,7 @@ static int nes_set_features(struct net_device *netdev, netdev_features_t feature struct nes_device *nesdev = nesvnic->nesdev; u32 changed = netdev->features ^ features; - if (changed & NETIF_F_HW_VLAN_RX) + if (changed & NETIF_F_HW_VLAN_CTAG_RX) nes_vlan_mode(netdev, nesdev, features); return 0; @@ -1706,11 +1706,11 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev, netdev->dev_addr[4] = (u8)(u64temp>>8); netdev->dev_addr[5] = (u8)u64temp; - netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX; + netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX; if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) netdev->hw_features |= NETIF_F_TSO; - netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX; + netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX; netdev->hw_features |= NETIF_F_LRO; nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d," diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig index 8349f9c5064c..1e603a375069 100644 --- a/drivers/infiniband/hw/qib/Kconfig +++ b/drivers/infiniband/hw/qib/Kconfig @@ -1,7 +1,7 @@ config INFINIBAND_QIB - tristate "QLogic PCIe HCA support" + tristate "Intel PCIe HCA support" depends on 64BIT ---help--- - This is a low-level driver for QLogic PCIe QLE InfiniBand host - channel adapters. This driver does not support the QLogic + This is a low-level driver for Intel PCIe QLE InfiniBand host + channel adapters. This driver does not support the Intel HyperTransport card (model QHT7140). diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index 5423edcab51f..216092477dfc 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2013 Intel Corporation. All rights reserved. * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * @@ -63,8 +64,8 @@ MODULE_PARM_DESC(compat_ddr_negotiate, "Attempt pre-IBTA 1.2 DDR speed negotiation"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("QLogic <support@qlogic.com>"); -MODULE_DESCRIPTION("QLogic IB driver"); +MODULE_AUTHOR("Intel <ibsupport@intel.com>"); +MODULE_DESCRIPTION("Intel IB driver"); MODULE_VERSION(QIB_DRIVER_VERSION); /* diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 4f7aa301b3b1..b56c9428f3c5 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -39,7 +39,7 @@ #include <linux/vmalloc.h> #include <linux/highmem.h> #include <linux/io.h> -#include <linux/uio.h> +#include <linux/aio.h> #include <linux/jiffies.h> #include <asm/pgtable.h> #include <linux/delay.h> diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index a099ac171e22..0232ae56b1fa 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2013 Intel Corporation. All rights reserved. * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. @@ -51,7 +52,7 @@ static u32 qib_6120_iblink_state(u64); /* * This file contains all the chip-specific register information and - * access functions for the QLogic QLogic_IB PCI-Express chip. + * access functions for the Intel Intel_IB PCI-Express chip. * */ diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 50e33aa0b4e3..173f805790da 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * @@ -1138,7 +1138,7 @@ void qib_disable_after_error(struct qib_devdata *dd) static void qib_remove_one(struct pci_dev *); static int qib_init_one(struct pci_dev *, const struct pci_device_id *); -#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: " +#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: " #define PFX QIB_DRV_NAME ": " static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = { @@ -1355,7 +1355,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dd = qib_init_iba6120_funcs(pdev, ent); #else qib_early_err(&pdev->dev, - "QLogic PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n", + "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n", ent->device); dd = ERR_PTR(-ENODEV); #endif @@ -1371,7 +1371,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) default: qib_early_err(&pdev->dev, - "Failing on unknown QLogic deviceid 0x%x\n", + "Failing on unknown Intel deviceid 0x%x\n", ent->device); ret = -ENODEV; } diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c index 81c7b73695d2..3b9afccaaade 100644 --- a/drivers/infiniband/hw/qib/qib_keys.c +++ b/drivers/infiniband/hw/qib/qib_keys.c @@ -61,7 +61,7 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region) if (dma_region) { struct qib_mregion *tmr; - tmr = rcu_dereference(dev->dma_mr); + tmr = rcu_access_pointer(dev->dma_mr); if (!tmr) { qib_get_mr(mr); rcu_assign_pointer(dev->dma_mr, mr); diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c index 50a8a0d4fe67..911205d3d5a0 100644 --- a/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2013 Intel Corporation. All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index 034cc821de5c..3c8e4e3caca6 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -808,10 +808,14 @@ int qib_verbs_register_sysfs(struct qib_devdata *dd) for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) { ret = device_create_file(&dev->dev, qib_attributes[i]); if (ret) - return ret; + goto bail; } return 0; +bail: + for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) + device_remove_file(&dev->dev, qib_attributes[i]); + return ret; } /* diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index ba51a4715a1d..904c384aa361 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Intel Corporation. All rights reserved. + * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * @@ -2224,7 +2224,7 @@ int qib_register_ib_device(struct qib_devdata *dd) ibdev->dma_ops = &qib_dma_mapping_ops; snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), - "QLogic Infiniband HCA %s", init_utsname()->nodename); + "Intel Infiniband HCA %s", init_utsname()->nodename); ret = ib_register_device(ibdev, qib_create_port_files); if (ret) @@ -2234,7 +2234,8 @@ int qib_register_ib_device(struct qib_devdata *dd) if (ret) goto err_agents; - if (qib_verbs_register_sysfs(dd)) + ret = qib_verbs_register_sysfs(dd); + if (ret) goto err_class; goto bail; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 67b0c1d23678..3eceb61e3532 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -460,7 +460,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even goto err_qp; } - psn = random32() & 0xffffff; + psn = prandom_u32() & 0xffffff; ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn); if (ret) goto err_modify; @@ -758,9 +758,13 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ if (++priv->tx_outstanding == ipoib_sendq_size) { ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", tx->qp->qp_num); - if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) - ipoib_warn(priv, "request notify on send CQ failed\n"); netif_stop_queue(dev); + rc = ib_req_notify_cq(priv->send_cq, + IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); + if (rc < 0) + ipoib_warn(priv, "request notify on send CQ failed\n"); + else if (rc) + ipoib_send_comp_handler(priv->send_cq, dev); } } } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 8534afd04e7c..b6e049a3c7a8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -730,7 +730,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) if ((header->proto != htons(ETH_P_IP)) && (header->proto != htons(ETH_P_IPV6)) && (header->proto != htons(ETH_P_ARP)) && - (header->proto != htons(ETH_P_RARP))) { + (header->proto != htons(ETH_P_RARP)) && + (header->proto != htons(ETH_P_TIPC))) { /* ethertype not supported by IPoIB */ ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); @@ -751,6 +752,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) switch (header->proto) { case htons(ETH_P_IP): case htons(ETH_P_IPV6): + case htons(ETH_P_TIPC): neigh = ipoib_neigh_get(dev, cb->hwaddr); if (unlikely(!neigh)) { neigh_add_path(skb, cb->hwaddr, dev); @@ -828,7 +830,7 @@ static int ipoib_hard_header(struct sk_buff *skb, */ memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN); - return 0; + return sizeof *header; } static void ipoib_set_mcast_list(struct net_device *dev) diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 0ab8c9cc3a78..2e84ef859c5b 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -5,6 +5,7 @@ * Copyright (C) 2004 Alex Aizman * Copyright (C) 2005 Mike Christie * Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2013 Mellanox Technologies. All rights reserved. * maintained by openib-general@openib.org * * This software is available to you under a choice of one of two @@ -82,10 +83,10 @@ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); int iser_debug_level = 0; -MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover " - "v" DRV_VER " (" DRV_DATE ")"); +MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz"); +MODULE_VERSION(DRV_VER); module_param_named(debug_level, iser_debug_level, int, 0644); MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)"); @@ -370,8 +371,8 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, /* binds the iSER connection retrieved from the previously * connected ep_handle to the iSCSI layer connection. exchanges * connection pointers */ - iser_err("binding iscsi/iser conn %p %p to ib_conn %p\n", - conn, conn->dd_data, ib_conn); + iser_info("binding iscsi/iser conn %p %p to ib_conn %p\n", + conn, conn->dd_data, ib_conn); iser_conn = conn->dd_data; ib_conn->iser_conn = iser_conn; iser_conn->ib_conn = ib_conn; @@ -475,28 +476,28 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn, case ISCSI_PARAM_HDRDGST_EN: sscanf(buf, "%d", &value); if (value) { - printk(KERN_ERR "DataDigest wasn't negotiated to None"); + iser_err("DataDigest wasn't negotiated to None"); return -EPROTO; } break; case ISCSI_PARAM_DATADGST_EN: sscanf(buf, "%d", &value); if (value) { - printk(KERN_ERR "DataDigest wasn't negotiated to None"); + iser_err("DataDigest wasn't negotiated to None"); return -EPROTO; } break; case ISCSI_PARAM_IFMARKER_EN: sscanf(buf, "%d", &value); if (value) { - printk(KERN_ERR "IFMarker wasn't negotiated to No"); + iser_err("IFMarker wasn't negotiated to No"); return -EPROTO; } break; case ISCSI_PARAM_OFMARKER_EN: sscanf(buf, "%d", &value); if (value) { - printk(KERN_ERR "OFMarker wasn't negotiated to No"); + iser_err("OFMarker wasn't negotiated to No"); return -EPROTO; } break; @@ -596,7 +597,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) ib_conn->state == ISER_CONN_DOWN)) rc = -1; - iser_err("ib conn %p rc = %d\n", ib_conn, rc); + iser_info("ib conn %p rc = %d\n", ib_conn, rc); if (rc > 0) return 1; /* success, this is the equivalent of POLLOUT */ @@ -623,7 +624,7 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn); - iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); + iser_info("ib conn %p state %d\n", ib_conn, ib_conn->state); iser_conn_terminate(ib_conn); } @@ -682,7 +683,7 @@ static umode_t iser_attr_is_visible(int param_type, int param) static struct scsi_host_template iscsi_iser_sht = { .module = THIS_MODULE, - .name = "iSCSI Initiator over iSER, v." DRV_VER, + .name = "iSCSI Initiator over iSER", .queuecommand = iscsi_queuecommand, .change_queue_depth = iscsi_change_queue_depth, .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, @@ -740,7 +741,7 @@ static int __init iser_init(void) iser_dbg("Starting iSER datamover...\n"); if (iscsi_max_lun < 1) { - printk(KERN_ERR "Invalid max_lun value of %u\n", iscsi_max_lun); + iser_err("Invalid max_lun value of %u\n", iscsi_max_lun); return -EINVAL; } diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 5babdb35bda7..4f069c0d4c04 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -8,6 +8,7 @@ * * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. + * Copyright (c) 2013 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -42,6 +43,7 @@ #include <linux/types.h> #include <linux/net.h> +#include <linux/printk.h> #include <scsi/libiscsi.h> #include <scsi/scsi_transport_iscsi.h> @@ -65,20 +67,26 @@ #define DRV_NAME "iser" #define PFX DRV_NAME ": " -#define DRV_VER "0.1" -#define DRV_DATE "May 7th, 2006" +#define DRV_VER "1.1" #define iser_dbg(fmt, arg...) \ do { \ - if (iser_debug_level > 1) \ + if (iser_debug_level > 2) \ printk(KERN_DEBUG PFX "%s:" fmt,\ __func__ , ## arg); \ } while (0) #define iser_warn(fmt, arg...) \ do { \ + if (iser_debug_level > 1) \ + pr_warn(PFX "%s:" fmt, \ + __func__ , ## arg); \ + } while (0) + +#define iser_info(fmt, arg...) \ + do { \ if (iser_debug_level > 0) \ - printk(KERN_DEBUG PFX "%s:" fmt,\ + pr_info(PFX "%s:" fmt, \ __func__ , ## arg); \ } while (0) @@ -133,6 +141,15 @@ struct iser_hdr { __be64 read_va; } __attribute__((packed)); + +#define ISER_ZBVA_NOT_SUPPORTED 0x80 +#define ISER_SEND_W_INV_NOT_SUPPORTED 0x40 + +struct iser_cm_hdr { + u8 flags; + u8 rsvd[3]; +} __packed; + /* Constant PDU lengths calculations */ #define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr)) diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index a00ccd1ca333..b6d81a86c976 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2013 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index be1edb04b085..7827baf455a1 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2013 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -416,8 +417,9 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task, for (i=0 ; i<ib_conn->page_vec->length ; i++) iser_err("page_vec[%d] = 0x%llx\n", i, (unsigned long long) ib_conn->page_vec->pages[i]); - return err; } + if (err) + return err; } return 0; } diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 4debadc53106..2c4941d0656b 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. + * Copyright (c) 2013 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -74,8 +75,9 @@ static int iser_create_device_ib_res(struct iser_device *device) struct iser_cq_desc *cq_desc; device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors); - iser_err("using %d CQs, device %s supports %d vectors\n", device->cqs_used, - device->ib_device->name, device->ib_device->num_comp_vectors); + iser_info("using %d CQs, device %s supports %d vectors\n", + device->cqs_used, device->ib_device->name, + device->ib_device->num_comp_vectors); device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used, GFP_KERNEL); @@ -262,7 +264,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) min_index = index; device->cq_active_qps[min_index]++; mutex_unlock(&ig.connlist_mutex); - iser_err("cq index %d used for ib_conn %p\n", min_index, ib_conn); + iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn); init_attr.event_handler = iser_qp_event_callback; init_attr.qp_context = (void *)ib_conn; @@ -280,9 +282,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) goto out_err; ib_conn->qp = ib_conn->cma_id->qp; - iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n", - ib_conn, ib_conn->cma_id, - ib_conn->fmr_pool, ib_conn->cma_id->qp); + iser_info("setting conn %p cma_id %p: fmr_pool %p qp %p\n", + ib_conn, ib_conn->cma_id, + ib_conn->fmr_pool, ib_conn->cma_id->qp); return ret; out_err: @@ -291,17 +293,17 @@ out_err: } /** - * releases the FMR pool, QP and CMA ID objects, returns 0 on success, + * releases the FMR pool and QP objects, returns 0 on success, * -1 on failure */ -static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) +static int iser_free_ib_conn_res(struct iser_conn *ib_conn) { int cq_index; BUG_ON(ib_conn == NULL); - iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n", - ib_conn, ib_conn->cma_id, - ib_conn->fmr_pool, ib_conn->qp); + iser_info("freeing conn %p cma_id %p fmr pool %p qp %p\n", + ib_conn, ib_conn->cma_id, + ib_conn->fmr_pool, ib_conn->qp); /* qp is created only once both addr & route are resolved */ if (ib_conn->fmr_pool != NULL) @@ -313,13 +315,9 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) rdma_destroy_qp(ib_conn->cma_id); } - /* if cma handler context, the caller acts s.t the cma destroy the id */ - if (ib_conn->cma_id != NULL && can_destroy_id) - rdma_destroy_id(ib_conn->cma_id); ib_conn->fmr_pool = NULL; ib_conn->qp = NULL; - ib_conn->cma_id = NULL; kfree(ib_conn->page_vec); if (ib_conn->login_buf) { @@ -379,7 +377,7 @@ static void iser_device_try_release(struct iser_device *device) { mutex_lock(&ig.device_list_mutex); device->refcount--; - iser_err("device %p refcount %d\n",device,device->refcount); + iser_info("device %p refcount %d\n", device, device->refcount); if (!device->refcount) { iser_free_device_ib_res(device); list_del(&device->ig_list); @@ -414,11 +412,16 @@ static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id) list_del(&ib_conn->conn_list); mutex_unlock(&ig.connlist_mutex); iser_free_rx_descriptors(ib_conn); - iser_free_ib_conn_res(ib_conn, can_destroy_id); + iser_free_ib_conn_res(ib_conn); ib_conn->device = NULL; /* on EVENT_ADDR_ERROR there's no device yet for this conn */ if (device != NULL) iser_device_try_release(device); + /* if cma handler context, the caller actually destroy the id */ + if (ib_conn->cma_id != NULL && can_destroy_id) { + rdma_destroy_id(ib_conn->cma_id); + ib_conn->cma_id = NULL; + } iscsi_destroy_endpoint(ib_conn->ep); } @@ -498,6 +501,7 @@ static int iser_route_handler(struct rdma_cm_id *cma_id) { struct rdma_conn_param conn_param; int ret; + struct iser_cm_hdr req_hdr; ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context); if (ret) @@ -509,6 +513,12 @@ static int iser_route_handler(struct rdma_cm_id *cma_id) conn_param.retry_count = 7; conn_param.rnr_retry_count = 6; + memset(&req_hdr, 0, sizeof(req_hdr)); + req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED | + ISER_SEND_W_INV_NOT_SUPPORTED); + conn_param.private_data = (void *)&req_hdr; + conn_param.private_data_len = sizeof(struct iser_cm_hdr); + ret = rdma_connect(cma_id, &conn_param); if (ret) { iser_err("failure connecting: %d\n", ret); @@ -558,8 +568,8 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve { int ret = 0; - iser_err("event %d status %d conn %p id %p\n", - event->event, event->status, cma_id->context, cma_id); + iser_info("event %d status %d conn %p id %p\n", + event->event, event->status, cma_id->context, cma_id); switch (event->event) { case RDMA_CM_EVENT_ADDR_RESOLVED: @@ -619,8 +629,8 @@ int iser_connect(struct iser_conn *ib_conn, /* the device is known only --after-- address resolution */ ib_conn->device = NULL; - iser_err("connecting to: %pI4, port 0x%x\n", - &dst_addr->sin_addr, dst_addr->sin_port); + iser_info("connecting to: %pI4, port 0x%x\n", + &dst_addr->sin_addr, dst_addr->sin_port); ib_conn->state = ISER_CONN_PENDING; diff --git a/drivers/infiniband/ulp/isert/Kconfig b/drivers/infiniband/ulp/isert/Kconfig new file mode 100644 index 000000000000..ce3fd32167dc --- /dev/null +++ b/drivers/infiniband/ulp/isert/Kconfig @@ -0,0 +1,5 @@ +config INFINIBAND_ISERT + tristate "iSCSI Extentions for RDMA (iSER) target support" + depends on INET && INFINIBAND_ADDR_TRANS && TARGET_CORE && ISCSI_TARGET + ---help--- + Support for iSCSI Extentions for RDMA (iSER) Target on Infiniband fabrics. diff --git a/drivers/infiniband/ulp/isert/Makefile b/drivers/infiniband/ulp/isert/Makefile new file mode 100644 index 000000000000..c8bf2421f5bc --- /dev/null +++ b/drivers/infiniband/ulp/isert/Makefile @@ -0,0 +1,2 @@ +ccflags-y := -Idrivers/target -Idrivers/target/iscsi +obj-$(CONFIG_INFINIBAND_ISERT) += ib_isert.o diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c new file mode 100644 index 000000000000..41712f096515 --- /dev/null +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -0,0 +1,2281 @@ +/******************************************************************************* + * This file contains iSCSI extentions for RDMA (iSER) Verbs + * + * (c) Copyright 2013 RisingTide Systems LLC. + * + * Nicholas A. Bellinger <nab@linux-iscsi.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + ****************************************************************************/ + +#include <linux/string.h> +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <linux/socket.h> +#include <linux/in.h> +#include <linux/in6.h> +#include <rdma/ib_verbs.h> +#include <rdma/rdma_cm.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> +#include <target/iscsi/iscsi_transport.h> + +#include "isert_proto.h" +#include "ib_isert.h" + +#define ISERT_MAX_CONN 8 +#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) +#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN) + +static DEFINE_MUTEX(device_list_mutex); +static LIST_HEAD(device_list); +static struct workqueue_struct *isert_rx_wq; +static struct workqueue_struct *isert_comp_wq; +static struct kmem_cache *isert_cmd_cache; + +static void +isert_qp_event_callback(struct ib_event *e, void *context) +{ + struct isert_conn *isert_conn = (struct isert_conn *)context; + + pr_err("isert_qp_event_callback event: %d\n", e->event); + switch (e->event) { + case IB_EVENT_COMM_EST: + rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST); + break; + case IB_EVENT_QP_LAST_WQE_REACHED: + pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n"); + break; + default: + break; + } +} + +static int +isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr) +{ + int ret; + + ret = ib_query_device(ib_dev, devattr); + if (ret) { + pr_err("ib_query_device() failed: %d\n", ret); + return ret; + } + pr_debug("devattr->max_sge: %d\n", devattr->max_sge); + pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd); + + return 0; +} + +static int +isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) +{ + struct isert_device *device = isert_conn->conn_device; + struct ib_qp_init_attr attr; + struct ib_device_attr devattr; + int ret, index, min_index = 0; + + memset(&devattr, 0, sizeof(struct ib_device_attr)); + ret = isert_query_device(cma_id->device, &devattr); + if (ret) + return ret; + + mutex_lock(&device_list_mutex); + for (index = 0; index < device->cqs_used; index++) + if (device->cq_active_qps[index] < + device->cq_active_qps[min_index]) + min_index = index; + device->cq_active_qps[min_index]++; + pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index); + mutex_unlock(&device_list_mutex); + + memset(&attr, 0, sizeof(struct ib_qp_init_attr)); + attr.event_handler = isert_qp_event_callback; + attr.qp_context = isert_conn; + attr.send_cq = device->dev_tx_cq[min_index]; + attr.recv_cq = device->dev_rx_cq[min_index]; + attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; + attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; + /* + * FIXME: Use devattr.max_sge - 2 for max_send_sge as + * work-around for RDMA_READ.. + */ + attr.cap.max_send_sge = devattr.max_sge - 2; + isert_conn->max_sge = attr.cap.max_send_sge; + + attr.cap.max_recv_sge = 1; + attr.sq_sig_type = IB_SIGNAL_REQ_WR; + attr.qp_type = IB_QPT_RC; + + pr_debug("isert_conn_setup_qp cma_id->device: %p\n", + cma_id->device); + pr_debug("isert_conn_setup_qp conn_pd->device: %p\n", + isert_conn->conn_pd->device); + + ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr); + if (ret) { + pr_err("rdma_create_qp failed for cma_id %d\n", ret); + return ret; + } + isert_conn->conn_qp = cma_id->qp; + pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n"); + + return 0; +} + +static void +isert_cq_event_callback(struct ib_event *e, void *context) +{ + pr_debug("isert_cq_event_callback event: %d\n", e->event); +} + +static int +isert_alloc_rx_descriptors(struct isert_conn *isert_conn) +{ + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct iser_rx_desc *rx_desc; + struct ib_sge *rx_sg; + u64 dma_addr; + int i, j; + + isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS * + sizeof(struct iser_rx_desc), GFP_KERNEL); + if (!isert_conn->conn_rx_descs) + goto fail; + + rx_desc = isert_conn->conn_rx_descs; + + for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { + dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + if (ib_dma_mapping_error(ib_dev, dma_addr)) + goto dma_map_fail; + + rx_desc->dma_addr = dma_addr; + + rx_sg = &rx_desc->rx_sg; + rx_sg->addr = rx_desc->dma_addr; + rx_sg->length = ISER_RX_PAYLOAD_SIZE; + rx_sg->lkey = isert_conn->conn_mr->lkey; + } + + isert_conn->conn_rx_desc_head = 0; + return 0; + +dma_map_fail: + rx_desc = isert_conn->conn_rx_descs; + for (j = 0; j < i; j++, rx_desc++) { + ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + } + kfree(isert_conn->conn_rx_descs); + isert_conn->conn_rx_descs = NULL; +fail: + return -ENOMEM; +} + +static void +isert_free_rx_descriptors(struct isert_conn *isert_conn) +{ + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct iser_rx_desc *rx_desc; + int i; + + if (!isert_conn->conn_rx_descs) + return; + + rx_desc = isert_conn->conn_rx_descs; + for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { + ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + } + + kfree(isert_conn->conn_rx_descs); + isert_conn->conn_rx_descs = NULL; +} + +static void isert_cq_tx_callback(struct ib_cq *, void *); +static void isert_cq_rx_callback(struct ib_cq *, void *); + +static int +isert_create_device_ib_res(struct isert_device *device) +{ + struct ib_device *ib_dev = device->ib_device; + struct isert_cq_desc *cq_desc; + int ret = 0, i, j; + + device->cqs_used = min_t(int, num_online_cpus(), + device->ib_device->num_comp_vectors); + device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); + pr_debug("Using %d CQs, device %s supports %d vectors\n", + device->cqs_used, device->ib_device->name, + device->ib_device->num_comp_vectors); + device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * + device->cqs_used, GFP_KERNEL); + if (!device->cq_desc) { + pr_err("Unable to allocate device->cq_desc\n"); + return -ENOMEM; + } + cq_desc = device->cq_desc; + + device->dev_pd = ib_alloc_pd(ib_dev); + if (IS_ERR(device->dev_pd)) { + ret = PTR_ERR(device->dev_pd); + pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret); + goto out_cq_desc; + } + + for (i = 0; i < device->cqs_used; i++) { + cq_desc[i].device = device; + cq_desc[i].cq_index = i; + + device->dev_rx_cq[i] = ib_create_cq(device->ib_device, + isert_cq_rx_callback, + isert_cq_event_callback, + (void *)&cq_desc[i], + ISER_MAX_RX_CQ_LEN, i); + if (IS_ERR(device->dev_rx_cq[i])) + goto out_cq; + + device->dev_tx_cq[i] = ib_create_cq(device->ib_device, + isert_cq_tx_callback, + isert_cq_event_callback, + (void *)&cq_desc[i], + ISER_MAX_TX_CQ_LEN, i); + if (IS_ERR(device->dev_tx_cq[i])) + goto out_cq; + + if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP)) + goto out_cq; + + if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP)) + goto out_cq; + } + + device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(device->dev_mr)) { + ret = PTR_ERR(device->dev_mr); + pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret); + goto out_cq; + } + + return 0; + +out_cq: + for (j = 0; j < i; j++) { + cq_desc = &device->cq_desc[j]; + + if (device->dev_rx_cq[j]) { + cancel_work_sync(&cq_desc->cq_rx_work); + ib_destroy_cq(device->dev_rx_cq[j]); + } + if (device->dev_tx_cq[j]) { + cancel_work_sync(&cq_desc->cq_tx_work); + ib_destroy_cq(device->dev_tx_cq[j]); + } + } + ib_dealloc_pd(device->dev_pd); + +out_cq_desc: + kfree(device->cq_desc); + + return ret; +} + +static void +isert_free_device_ib_res(struct isert_device *device) +{ + struct isert_cq_desc *cq_desc; + int i; + + for (i = 0; i < device->cqs_used; i++) { + cq_desc = &device->cq_desc[i]; + + cancel_work_sync(&cq_desc->cq_rx_work); + cancel_work_sync(&cq_desc->cq_tx_work); + ib_destroy_cq(device->dev_rx_cq[i]); + ib_destroy_cq(device->dev_tx_cq[i]); + device->dev_rx_cq[i] = NULL; + device->dev_tx_cq[i] = NULL; + } + + ib_dereg_mr(device->dev_mr); + ib_dealloc_pd(device->dev_pd); + kfree(device->cq_desc); +} + +static void +isert_device_try_release(struct isert_device *device) +{ + mutex_lock(&device_list_mutex); + device->refcount--; + if (!device->refcount) { + isert_free_device_ib_res(device); + list_del(&device->dev_node); + kfree(device); + } + mutex_unlock(&device_list_mutex); +} + +static struct isert_device * +isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id) +{ + struct isert_device *device; + int ret; + + mutex_lock(&device_list_mutex); + list_for_each_entry(device, &device_list, dev_node) { + if (device->ib_device->node_guid == cma_id->device->node_guid) { + device->refcount++; + mutex_unlock(&device_list_mutex); + return device; + } + } + + device = kzalloc(sizeof(struct isert_device), GFP_KERNEL); + if (!device) { + mutex_unlock(&device_list_mutex); + return ERR_PTR(-ENOMEM); + } + + INIT_LIST_HEAD(&device->dev_node); + + device->ib_device = cma_id->device; + ret = isert_create_device_ib_res(device); + if (ret) { + kfree(device); + mutex_unlock(&device_list_mutex); + return ERR_PTR(ret); + } + + device->refcount++; + list_add_tail(&device->dev_node, &device_list); + mutex_unlock(&device_list_mutex); + + return device; +} + +static int +isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) +{ + struct iscsi_np *np = cma_id->context; + struct isert_np *isert_np = np->np_context; + struct isert_conn *isert_conn; + struct isert_device *device; + struct ib_device *ib_dev = cma_id->device; + int ret = 0; + + pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", + cma_id, cma_id->context); + + isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); + if (!isert_conn) { + pr_err("Unable to allocate isert_conn\n"); + return -ENOMEM; + } + isert_conn->state = ISER_CONN_INIT; + INIT_LIST_HEAD(&isert_conn->conn_accept_node); + init_completion(&isert_conn->conn_login_comp); + init_waitqueue_head(&isert_conn->conn_wait); + init_waitqueue_head(&isert_conn->conn_wait_comp_err); + kref_init(&isert_conn->conn_kref); + kref_get(&isert_conn->conn_kref); + + cma_id->context = isert_conn; + isert_conn->conn_cm_id = cma_id; + isert_conn->responder_resources = event->param.conn.responder_resources; + isert_conn->initiator_depth = event->param.conn.initiator_depth; + pr_debug("Using responder_resources: %u initiator_depth: %u\n", + isert_conn->responder_resources, isert_conn->initiator_depth); + + isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + + ISER_RX_LOGIN_SIZE, GFP_KERNEL); + if (!isert_conn->login_buf) { + pr_err("Unable to allocate isert_conn->login_buf\n"); + ret = -ENOMEM; + goto out; + } + + isert_conn->login_req_buf = isert_conn->login_buf; + isert_conn->login_rsp_buf = isert_conn->login_buf + + ISCSI_DEF_MAX_RECV_SEG_LEN; + pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n", + isert_conn->login_buf, isert_conn->login_req_buf, + isert_conn->login_rsp_buf); + + isert_conn->login_req_dma = ib_dma_map_single(ib_dev, + (void *)isert_conn->login_req_buf, + ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE); + + ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); + if (ret) { + pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n", + ret); + isert_conn->login_req_dma = 0; + goto out_login_buf; + } + + isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, + (void *)isert_conn->login_rsp_buf, + ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); + + ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); + if (ret) { + pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n", + ret); + isert_conn->login_rsp_dma = 0; + goto out_req_dma_map; + } + + device = isert_device_find_by_ib_dev(cma_id); + if (IS_ERR(device)) { + ret = PTR_ERR(device); + goto out_rsp_dma_map; + } + + isert_conn->conn_device = device; + isert_conn->conn_pd = device->dev_pd; + isert_conn->conn_mr = device->dev_mr; + + ret = isert_conn_setup_qp(isert_conn, cma_id); + if (ret) + goto out_conn_dev; + + mutex_lock(&isert_np->np_accept_mutex); + list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node); + mutex_unlock(&isert_np->np_accept_mutex); + + pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np); + wake_up(&isert_np->np_accept_wq); + return 0; + +out_conn_dev: + isert_device_try_release(device); +out_rsp_dma_map: + ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, + ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); +out_req_dma_map: + ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, + ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE); +out_login_buf: + kfree(isert_conn->login_buf); +out: + kfree(isert_conn); + return ret; +} + +static void +isert_connect_release(struct isert_conn *isert_conn) +{ + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct isert_device *device = isert_conn->conn_device; + int cq_index; + + pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); + + if (isert_conn->conn_qp) { + cq_index = ((struct isert_cq_desc *) + isert_conn->conn_qp->recv_cq->cq_context)->cq_index; + pr_debug("isert_connect_release: cq_index: %d\n", cq_index); + isert_conn->conn_device->cq_active_qps[cq_index]--; + + rdma_destroy_qp(isert_conn->conn_cm_id); + } + + isert_free_rx_descriptors(isert_conn); + rdma_destroy_id(isert_conn->conn_cm_id); + + if (isert_conn->login_buf) { + ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, + ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); + ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, + ISCSI_DEF_MAX_RECV_SEG_LEN, + DMA_FROM_DEVICE); + kfree(isert_conn->login_buf); + } + kfree(isert_conn); + + if (device) + isert_device_try_release(device); + + pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n"); +} + +static void +isert_connected_handler(struct rdma_cm_id *cma_id) +{ + return; +} + +static void +isert_release_conn_kref(struct kref *kref) +{ + struct isert_conn *isert_conn = container_of(kref, + struct isert_conn, conn_kref); + + pr_debug("Calling isert_connect_release for final kref %s/%d\n", + current->comm, current->pid); + + isert_connect_release(isert_conn); +} + +static void +isert_put_conn(struct isert_conn *isert_conn) +{ + kref_put(&isert_conn->conn_kref, isert_release_conn_kref); +} + +static void +isert_disconnect_work(struct work_struct *work) +{ + struct isert_conn *isert_conn = container_of(work, + struct isert_conn, conn_logout_work); + + pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); + + isert_conn->state = ISER_CONN_DOWN; + + if (isert_conn->post_recv_buf_count == 0 && + atomic_read(&isert_conn->post_send_buf_count) == 0) { + pr_debug("Calling wake_up(&isert_conn->conn_wait);\n"); + wake_up(&isert_conn->conn_wait); + } + + isert_put_conn(isert_conn); +} + +static void +isert_disconnected_handler(struct rdma_cm_id *cma_id) +{ + struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; + + INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); + schedule_work(&isert_conn->conn_logout_work); +} + +static int +isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) +{ + int ret = 0; + + pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n", + event->event, event->status, cma_id->context, cma_id); + + switch (event->event) { + case RDMA_CM_EVENT_CONNECT_REQUEST: + pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n"); + ret = isert_connect_request(cma_id, event); + break; + case RDMA_CM_EVENT_ESTABLISHED: + pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n"); + isert_connected_handler(cma_id); + break; + case RDMA_CM_EVENT_DISCONNECTED: + pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n"); + isert_disconnected_handler(cma_id); + break; + case RDMA_CM_EVENT_DEVICE_REMOVAL: + case RDMA_CM_EVENT_ADDR_CHANGE: + break; + case RDMA_CM_EVENT_CONNECT_ERROR: + default: + pr_err("Unknown RDMA CMA event: %d\n", event->event); + break; + } + + if (ret != 0) { + pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", + event->event, ret); + dump_stack(); + } + + return ret; +} + +static int +isert_post_recv(struct isert_conn *isert_conn, u32 count) +{ + struct ib_recv_wr *rx_wr, *rx_wr_failed; + int i, ret; + unsigned int rx_head = isert_conn->conn_rx_desc_head; + struct iser_rx_desc *rx_desc; + + for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) { + rx_desc = &isert_conn->conn_rx_descs[rx_head]; + rx_wr->wr_id = (unsigned long)rx_desc; + rx_wr->sg_list = &rx_desc->rx_sg; + rx_wr->num_sge = 1; + rx_wr->next = rx_wr + 1; + rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1); + } + + rx_wr--; + rx_wr->next = NULL; /* mark end of work requests list */ + + isert_conn->post_recv_buf_count += count; + ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr, + &rx_wr_failed); + if (ret) { + pr_err("ib_post_recv() failed with ret: %d\n", ret); + isert_conn->post_recv_buf_count -= count; + } else { + pr_debug("isert_post_recv(): Posted %d RX buffers\n", count); + isert_conn->conn_rx_desc_head = rx_head; + } + return ret; +} + +static int +isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) +{ + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct ib_send_wr send_wr, *send_wr_failed; + int ret; + + ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, + ISER_HEADERS_LEN, DMA_TO_DEVICE); + + send_wr.next = NULL; + send_wr.wr_id = (unsigned long)tx_desc; + send_wr.sg_list = tx_desc->tx_sg; + send_wr.num_sge = tx_desc->num_sge; + send_wr.opcode = IB_WR_SEND; + send_wr.send_flags = IB_SEND_SIGNALED; + + atomic_inc(&isert_conn->post_send_buf_count); + + ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed); + if (ret) { + pr_err("ib_post_send() failed, ret: %d\n", ret); + atomic_dec(&isert_conn->post_send_buf_count); + } + + return ret; +} + +static void +isert_create_send_desc(struct isert_conn *isert_conn, + struct isert_cmd *isert_cmd, + struct iser_tx_desc *tx_desc) +{ + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + + ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, + ISER_HEADERS_LEN, DMA_TO_DEVICE); + + memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr)); + tx_desc->iser_header.flags = ISER_VER; + + tx_desc->num_sge = 1; + tx_desc->isert_cmd = isert_cmd; + + if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) { + tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; + pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc); + } +} + +static int +isert_init_tx_hdrs(struct isert_conn *isert_conn, + struct iser_tx_desc *tx_desc) +{ + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + u64 dma_addr; + + dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, + ISER_HEADERS_LEN, DMA_TO_DEVICE); + if (ib_dma_mapping_error(ib_dev, dma_addr)) { + pr_err("ib_dma_mapping_error() failed\n"); + return -ENOMEM; + } + + tx_desc->dma_addr = dma_addr; + tx_desc->tx_sg[0].addr = tx_desc->dma_addr; + tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; + tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; + + pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u" + " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr, + tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey); + + return 0; +} + +static void +isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr) +{ + isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND; + send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; + send_wr->opcode = IB_WR_SEND; + send_wr->send_flags = IB_SEND_SIGNALED; + send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0]; + send_wr->num_sge = isert_cmd->tx_desc.num_sge; +} + +static int +isert_rdma_post_recvl(struct isert_conn *isert_conn) +{ + struct ib_recv_wr rx_wr, *rx_wr_fail; + struct ib_sge sge; + int ret; + + memset(&sge, 0, sizeof(struct ib_sge)); + sge.addr = isert_conn->login_req_dma; + sge.length = ISER_RX_LOGIN_SIZE; + sge.lkey = isert_conn->conn_mr->lkey; + + pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n", + sge.addr, sge.length, sge.lkey); + + memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); + rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf; + rx_wr.sg_list = &sge; + rx_wr.num_sge = 1; + + isert_conn->post_recv_buf_count++; + ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail); + if (ret) { + pr_err("ib_post_recv() failed: %d\n", ret); + isert_conn->post_recv_buf_count--; + } + + pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n"); + return ret; +} + +static int +isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, + u32 length) +{ + struct isert_conn *isert_conn = conn->context; + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc; + int ret; + + isert_create_send_desc(isert_conn, NULL, tx_desc); + + memcpy(&tx_desc->iscsi_header, &login->rsp[0], + sizeof(struct iscsi_hdr)); + + isert_init_tx_hdrs(isert_conn, tx_desc); + + if (length > 0) { + struct ib_sge *tx_dsg = &tx_desc->tx_sg[1]; + + ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma, + length, DMA_TO_DEVICE); + + memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length); + + ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma, + length, DMA_TO_DEVICE); + + tx_dsg->addr = isert_conn->login_rsp_dma; + tx_dsg->length = length; + tx_dsg->lkey = isert_conn->conn_mr->lkey; + tx_desc->num_sge = 2; + } + if (!login->login_failed) { + if (login->login_complete) { + ret = isert_alloc_rx_descriptors(isert_conn); + if (ret) + return ret; + + ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX); + if (ret) + return ret; + + isert_conn->state = ISER_CONN_UP; + goto post_send; + } + + ret = isert_rdma_post_recvl(isert_conn); + if (ret) + return ret; + } +post_send: + ret = isert_post_send(isert_conn, tx_desc); + if (ret) + return ret; + + return 0; +} + +static void +isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen, + struct isert_conn *isert_conn) +{ + struct iscsi_conn *conn = isert_conn->conn; + struct iscsi_login *login = conn->conn_login; + int size; + + if (!login) { + pr_err("conn->conn_login is NULL\n"); + dump_stack(); + return; + } + + if (login->first_request) { + struct iscsi_login_req *login_req = + (struct iscsi_login_req *)&rx_desc->iscsi_header; + /* + * Setup the initial iscsi_login values from the leading + * login request PDU. + */ + login->leading_connection = (!login_req->tsih) ? 1 : 0; + login->current_stage = + (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) + >> 2; + login->version_min = login_req->min_version; + login->version_max = login_req->max_version; + memcpy(login->isid, login_req->isid, 6); + login->cmd_sn = be32_to_cpu(login_req->cmdsn); + login->init_task_tag = login_req->itt; + login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn); + login->cid = be16_to_cpu(login_req->cid); + login->tsih = be16_to_cpu(login_req->tsih); + } + + memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); + + size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); + pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n", + size, rx_buflen, MAX_KEY_VALUE_PAIRS); + memcpy(login->req_buf, &rx_desc->data[0], size); + + complete(&isert_conn->conn_login_comp); +} + +static void +isert_release_cmd(struct iscsi_cmd *cmd) +{ + struct isert_cmd *isert_cmd = container_of(cmd, struct isert_cmd, + iscsi_cmd); + + pr_debug("Entering isert_release_cmd %p >>>>>>>>>>>>>>>.\n", isert_cmd); + + kfree(cmd->buf_ptr); + kfree(cmd->tmr_req); + + kmem_cache_free(isert_cmd_cache, isert_cmd); +} + +static struct iscsi_cmd +*isert_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp) +{ + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; + struct isert_cmd *isert_cmd; + + isert_cmd = kmem_cache_zalloc(isert_cmd_cache, gfp); + if (!isert_cmd) { + pr_err("Unable to allocate isert_cmd\n"); + return NULL; + } + isert_cmd->conn = isert_conn; + isert_cmd->iscsi_cmd.release_cmd = &isert_release_cmd; + + return &isert_cmd->iscsi_cmd; +} + +static int +isert_handle_scsi_cmd(struct isert_conn *isert_conn, + struct isert_cmd *isert_cmd, struct iser_rx_desc *rx_desc, + unsigned char *buf) +{ + struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; + struct iscsi_conn *conn = isert_conn->conn; + struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; + struct scatterlist *sg; + int imm_data, imm_data_len, unsol_data, sg_nents, rc; + bool dump_payload = false; + + rc = iscsit_setup_scsi_cmd(conn, cmd, buf); + if (rc < 0) + return rc; + + imm_data = cmd->immediate_data; + imm_data_len = cmd->first_burst_len; + unsol_data = cmd->unsolicited_data; + + rc = iscsit_process_scsi_cmd(conn, cmd, hdr); + if (rc < 0) { + return 0; + } else if (rc > 0) { + dump_payload = true; + goto sequence_cmd; + } + + if (!imm_data) + return 0; + + sg = &cmd->se_cmd.t_data_sg[0]; + sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); + + pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n", + sg, sg_nents, &rx_desc->data[0], imm_data_len); + + sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len); + + cmd->write_data_done += imm_data_len; + + if (cmd->write_data_done == cmd->se_cmd.data_length) { + spin_lock_bh(&cmd->istate_lock); + cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; + cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; + spin_unlock_bh(&cmd->istate_lock); + } + +sequence_cmd: + rc = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); + + if (!rc && dump_payload == false && unsol_data) + iscsit_set_unsoliticed_dataout(cmd); + + if (rc == CMDSN_ERROR_CANNOT_RECOVER) + return iscsit_add_reject_from_cmd( + ISCSI_REASON_PROTOCOL_ERROR, + 1, 0, (unsigned char *)hdr, cmd); + + return 0; +} + +static int +isert_handle_iscsi_dataout(struct isert_conn *isert_conn, + struct iser_rx_desc *rx_desc, unsigned char *buf) +{ + struct scatterlist *sg_start; + struct iscsi_conn *conn = isert_conn->conn; + struct iscsi_cmd *cmd = NULL; + struct iscsi_data *hdr = (struct iscsi_data *)buf; + u32 unsol_data_len = ntoh24(hdr->dlength); + int rc, sg_nents, sg_off, page_off; + + rc = iscsit_check_dataout_hdr(conn, buf, &cmd); + if (rc < 0) + return rc; + else if (!cmd) + return 0; + /* + * FIXME: Unexpected unsolicited_data out + */ + if (!cmd->unsolicited_data) { + pr_err("Received unexpected solicited data payload\n"); + dump_stack(); + return -1; + } + + pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n", + unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length); + + sg_off = cmd->write_data_done / PAGE_SIZE; + sg_start = &cmd->se_cmd.t_data_sg[sg_off]; + sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE)); + page_off = cmd->write_data_done % PAGE_SIZE; + /* + * FIXME: Non page-aligned unsolicited_data out + */ + if (page_off) { + pr_err("Received unexpected non-page aligned data payload\n"); + dump_stack(); + return -1; + } + pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n", + sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len); + + sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], + unsol_data_len); + + rc = iscsit_check_dataout_payload(cmd, hdr, false); + if (rc < 0) + return rc; + + return 0; +} + +static int +isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, + uint32_t read_stag, uint64_t read_va, + uint32_t write_stag, uint64_t write_va) +{ + struct iscsi_hdr *hdr = &rx_desc->iscsi_header; + struct iscsi_conn *conn = isert_conn->conn; + struct iscsi_cmd *cmd; + struct isert_cmd *isert_cmd; + int ret = -EINVAL; + u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK); + + switch (opcode) { + case ISCSI_OP_SCSI_CMD: + cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + if (!cmd) + break; + + isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd); + isert_cmd->read_stag = read_stag; + isert_cmd->read_va = read_va; + isert_cmd->write_stag = write_stag; + isert_cmd->write_va = write_va; + + ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, + rx_desc, (unsigned char *)hdr); + break; + case ISCSI_OP_NOOP_OUT: + cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + if (!cmd) + break; + + ret = iscsit_handle_nop_out(conn, cmd, (unsigned char *)hdr); + break; + case ISCSI_OP_SCSI_DATA_OUT: + ret = isert_handle_iscsi_dataout(isert_conn, rx_desc, + (unsigned char *)hdr); + break; + case ISCSI_OP_SCSI_TMFUNC: + cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + if (!cmd) + break; + + ret = iscsit_handle_task_mgt_cmd(conn, cmd, + (unsigned char *)hdr); + break; + case ISCSI_OP_LOGOUT: + cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + if (!cmd) + break; + + ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); + if (ret > 0) + wait_for_completion_timeout(&conn->conn_logout_comp, + SECONDS_FOR_LOGOUT_COMP * + HZ); + break; + default: + pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); + dump_stack(); + break; + } + + return ret; +} + +static void +isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn) +{ + struct iser_hdr *iser_hdr = &rx_desc->iser_header; + uint64_t read_va = 0, write_va = 0; + uint32_t read_stag = 0, write_stag = 0; + int rc; + + switch (iser_hdr->flags & 0xF0) { + case ISCSI_CTRL: + if (iser_hdr->flags & ISER_RSV) { + read_stag = be32_to_cpu(iser_hdr->read_stag); + read_va = be64_to_cpu(iser_hdr->read_va); + pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n", + read_stag, (unsigned long long)read_va); + } + if (iser_hdr->flags & ISER_WSV) { + write_stag = be32_to_cpu(iser_hdr->write_stag); + write_va = be64_to_cpu(iser_hdr->write_va); + pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n", + write_stag, (unsigned long long)write_va); + } + + pr_debug("ISER ISCSI_CTRL PDU\n"); + break; + case ISER_HELLO: + pr_err("iSER Hello message\n"); + break; + default: + pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags); + break; + } + + rc = isert_rx_opcode(isert_conn, rx_desc, + read_stag, read_va, write_stag, write_va); +} + +static void +isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, + unsigned long xfer_len) +{ + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct iscsi_hdr *hdr; + u64 rx_dma; + int rx_buflen, outstanding; + + if ((char *)desc == isert_conn->login_req_buf) { + rx_dma = isert_conn->login_req_dma; + rx_buflen = ISER_RX_LOGIN_SIZE; + pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", + rx_dma, rx_buflen); + } else { + rx_dma = desc->dma_addr; + rx_buflen = ISER_RX_PAYLOAD_SIZE; + pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", + rx_dma, rx_buflen); + } + + ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE); + + hdr = &desc->iscsi_header; + pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", + hdr->opcode, hdr->itt, hdr->flags, + (int)(xfer_len - ISER_HEADERS_LEN)); + + if ((char *)desc == isert_conn->login_req_buf) + isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN, + isert_conn); + else + isert_rx_do_work(desc, isert_conn); + + ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen, + DMA_FROM_DEVICE); + + isert_conn->post_recv_buf_count--; + pr_debug("iSERT: Decremented post_recv_buf_count: %d\n", + isert_conn->post_recv_buf_count); + + if ((char *)desc == isert_conn->login_req_buf) + return; + + outstanding = isert_conn->post_recv_buf_count; + if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) { + int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding, + ISERT_MIN_POSTED_RX); + err = isert_post_recv(isert_conn, count); + if (err) { + pr_err("isert_post_recv() count: %d failed, %d\n", + count, err); + } + } +} + +static void +isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) +{ + struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + + pr_debug("isert_unmap_cmd >>>>>>>>>>>>>>>>>>>>>>>\n"); + + if (wr->sge) { + ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE); + wr->sge = NULL; + } + + kfree(wr->send_wr); + wr->send_wr = NULL; + + kfree(isert_cmd->ib_sge); + isert_cmd->ib_sge = NULL; +} + +static void +isert_put_cmd(struct isert_cmd *isert_cmd) +{ + struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; + struct isert_conn *isert_conn = isert_cmd->conn; + struct iscsi_conn *conn; + + pr_debug("Entering isert_put_cmd: %p\n", isert_cmd); + + switch (cmd->iscsi_opcode) { + case ISCSI_OP_SCSI_CMD: + conn = isert_conn->conn; + + spin_lock_bh(&conn->cmd_lock); + if (!list_empty(&cmd->i_conn_node)) + list_del(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + + if (cmd->data_direction == DMA_TO_DEVICE) + iscsit_stop_dataout_timer(cmd); + + isert_unmap_cmd(isert_cmd, isert_conn); + /* + * Fall-through + */ + case ISCSI_OP_SCSI_TMFUNC: + transport_generic_free_cmd(&cmd->se_cmd, 0); + break; + case ISCSI_OP_REJECT: + case ISCSI_OP_NOOP_OUT: + conn = isert_conn->conn; + + spin_lock_bh(&conn->cmd_lock); + if (!list_empty(&cmd->i_conn_node)) + list_del(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + + /* + * Handle special case for REJECT when iscsi_add_reject*() has + * overwritten the original iscsi_opcode assignment, and the + * associated cmd->se_cmd needs to be released. + */ + if (cmd->se_cmd.se_tfo != NULL) { + transport_generic_free_cmd(&cmd->se_cmd, 0); + break; + } + /* + * Fall-through + */ + default: + isert_release_cmd(cmd); + break; + } +} + +static void +isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) +{ + if (tx_desc->dma_addr != 0) { + pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n"); + ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, + ISER_HEADERS_LEN, DMA_TO_DEVICE); + tx_desc->dma_addr = 0; + } +} + +static void +isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, + struct ib_device *ib_dev) +{ + if (isert_cmd->sense_buf_dma != 0) { + pr_debug("Calling ib_dma_unmap_single for isert_cmd->sense_buf_dma\n"); + ib_dma_unmap_single(ib_dev, isert_cmd->sense_buf_dma, + isert_cmd->sense_buf_len, DMA_TO_DEVICE); + isert_cmd->sense_buf_dma = 0; + } + + isert_unmap_tx_desc(tx_desc, ib_dev); + isert_put_cmd(isert_cmd); +} + +static void +isert_completion_rdma_read(struct iser_tx_desc *tx_desc, + struct isert_cmd *isert_cmd) +{ + struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; + struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; + struct se_cmd *se_cmd = &cmd->se_cmd; + struct ib_device *ib_dev = isert_cmd->conn->conn_cm_id->device; + + iscsit_stop_dataout_timer(cmd); + + if (wr->sge) { + pr_debug("isert_do_rdma_read_comp: Unmapping wr->sge from t_data_sg\n"); + ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE); + wr->sge = NULL; + } + + if (isert_cmd->ib_sge) { + pr_debug("isert_do_rdma_read_comp: Freeing isert_cmd->ib_sge\n"); + kfree(isert_cmd->ib_sge); + isert_cmd->ib_sge = NULL; + } + + cmd->write_data_done = se_cmd->data_length; + + pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n"); + spin_lock_bh(&cmd->istate_lock); + cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; + cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; + spin_unlock_bh(&cmd->istate_lock); + + target_execute_cmd(se_cmd); +} + +static void +isert_do_control_comp(struct work_struct *work) +{ + struct isert_cmd *isert_cmd = container_of(work, + struct isert_cmd, comp_work); + struct isert_conn *isert_conn = isert_cmd->conn; + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; + + switch (cmd->i_state) { + case ISTATE_SEND_TASKMGTRSP: + pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n"); + + atomic_dec(&isert_conn->post_send_buf_count); + iscsit_tmr_post_handler(cmd, cmd->conn); + + cmd->i_state = ISTATE_SENT_STATUS; + isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); + break; + case ISTATE_SEND_REJECT: + pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n"); + atomic_dec(&isert_conn->post_send_buf_count); + + cmd->i_state = ISTATE_SENT_STATUS; + complete(&cmd->reject_comp); + isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); + case ISTATE_SEND_LOGOUTRSP: + pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); + /* + * Call atomic_dec(&isert_conn->post_send_buf_count) + * from isert_free_conn() + */ + isert_conn->logout_posted = true; + iscsit_logout_post_handler(cmd, cmd->conn); + break; + default: + pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state); + dump_stack(); + break; + } +} + +static void +isert_response_completion(struct iser_tx_desc *tx_desc, + struct isert_cmd *isert_cmd, + struct isert_conn *isert_conn, + struct ib_device *ib_dev) +{ + struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; + + if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || + cmd->i_state == ISTATE_SEND_LOGOUTRSP) { + isert_unmap_tx_desc(tx_desc, ib_dev); + + INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp); + queue_work(isert_comp_wq, &isert_cmd->comp_work); + return; + } + atomic_dec(&isert_conn->post_send_buf_count); + + cmd->i_state = ISTATE_SENT_STATUS; + isert_completion_put(tx_desc, isert_cmd, ib_dev); +} + +static void +isert_send_completion(struct iser_tx_desc *tx_desc, + struct isert_conn *isert_conn) +{ + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct isert_cmd *isert_cmd = tx_desc->isert_cmd; + struct isert_rdma_wr *wr; + + if (!isert_cmd) { + atomic_dec(&isert_conn->post_send_buf_count); + isert_unmap_tx_desc(tx_desc, ib_dev); + return; + } + wr = &isert_cmd->rdma_wr; + + switch (wr->iser_ib_op) { + case ISER_IB_RECV: + pr_err("isert_send_completion: Got ISER_IB_RECV\n"); + dump_stack(); + break; + case ISER_IB_SEND: + pr_debug("isert_send_completion: Got ISER_IB_SEND\n"); + isert_response_completion(tx_desc, isert_cmd, + isert_conn, ib_dev); + break; + case ISER_IB_RDMA_WRITE: + pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n"); + dump_stack(); + break; + case ISER_IB_RDMA_READ: + pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); + + atomic_dec(&isert_conn->post_send_buf_count); + isert_completion_rdma_read(tx_desc, isert_cmd); + break; + default: + pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op); + dump_stack(); + break; + } +} + +static void +isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) +{ + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + + if (tx_desc) { + struct isert_cmd *isert_cmd = tx_desc->isert_cmd; + + if (!isert_cmd) + isert_unmap_tx_desc(tx_desc, ib_dev); + else + isert_completion_put(tx_desc, isert_cmd, ib_dev); + } + + if (isert_conn->post_recv_buf_count == 0 && + atomic_read(&isert_conn->post_send_buf_count) == 0) { + pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); + pr_debug("Calling wake_up from isert_cq_comp_err\n"); + + isert_conn->state = ISER_CONN_TERMINATING; + wake_up(&isert_conn->conn_wait_comp_err); + } +} + +static void +isert_cq_tx_work(struct work_struct *work) +{ + struct isert_cq_desc *cq_desc = container_of(work, + struct isert_cq_desc, cq_tx_work); + struct isert_device *device = cq_desc->device; + int cq_index = cq_desc->cq_index; + struct ib_cq *tx_cq = device->dev_tx_cq[cq_index]; + struct isert_conn *isert_conn; + struct iser_tx_desc *tx_desc; + struct ib_wc wc; + + while (ib_poll_cq(tx_cq, 1, &wc) == 1) { + tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id; + isert_conn = wc.qp->qp_context; + + if (wc.status == IB_WC_SUCCESS) { + isert_send_completion(tx_desc, isert_conn); + } else { + pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); + pr_debug("TX wc.status: 0x%08x\n", wc.status); + atomic_dec(&isert_conn->post_send_buf_count); + isert_cq_comp_err(tx_desc, isert_conn); + } + } + + ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP); +} + +static void +isert_cq_tx_callback(struct ib_cq *cq, void *context) +{ + struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; + + INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work); + queue_work(isert_comp_wq, &cq_desc->cq_tx_work); +} + +static void +isert_cq_rx_work(struct work_struct *work) +{ + struct isert_cq_desc *cq_desc = container_of(work, + struct isert_cq_desc, cq_rx_work); + struct isert_device *device = cq_desc->device; + int cq_index = cq_desc->cq_index; + struct ib_cq *rx_cq = device->dev_rx_cq[cq_index]; + struct isert_conn *isert_conn; + struct iser_rx_desc *rx_desc; + struct ib_wc wc; + unsigned long xfer_len; + + while (ib_poll_cq(rx_cq, 1, &wc) == 1) { + rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id; + isert_conn = wc.qp->qp_context; + + if (wc.status == IB_WC_SUCCESS) { + xfer_len = (unsigned long)wc.byte_len; + isert_rx_completion(rx_desc, isert_conn, xfer_len); + } else { + pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); + if (wc.status != IB_WC_WR_FLUSH_ERR) + pr_debug("RX wc.status: 0x%08x\n", wc.status); + + isert_conn->post_recv_buf_count--; + isert_cq_comp_err(NULL, isert_conn); + } + } + + ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP); +} + +static void +isert_cq_rx_callback(struct ib_cq *cq, void *context) +{ + struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; + + INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work); + queue_work(isert_rx_wq, &cq_desc->cq_rx_work); +} + +static int +isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) +{ + struct ib_send_wr *wr_failed; + int ret; + + atomic_inc(&isert_conn->post_send_buf_count); + + ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr, + &wr_failed); + if (ret) { + pr_err("ib_post_send failed with %d\n", ret); + atomic_dec(&isert_conn->post_send_buf_count); + return ret; + } + return ret; +} + +static int +isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) +{ + struct isert_cmd *isert_cmd = container_of(cmd, + struct isert_cmd, iscsi_cmd); + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; + struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; + struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *) + &isert_cmd->tx_desc.iscsi_header; + + isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); + iscsit_build_rsp_pdu(cmd, conn, true, hdr); + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); + /* + * Attach SENSE DATA payload to iSCSI Response PDU + */ + if (cmd->se_cmd.sense_buffer && + ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || + (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1]; + u32 padding, sense_len; + + put_unaligned_be16(cmd->se_cmd.scsi_sense_length, + cmd->sense_buffer); + cmd->se_cmd.scsi_sense_length += sizeof(__be16); + + padding = -(cmd->se_cmd.scsi_sense_length) & 3; + hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); + sense_len = cmd->se_cmd.scsi_sense_length + padding; + + isert_cmd->sense_buf_dma = ib_dma_map_single(ib_dev, + (void *)cmd->sense_buffer, sense_len, + DMA_TO_DEVICE); + + isert_cmd->sense_buf_len = sense_len; + tx_dsg->addr = isert_cmd->sense_buf_dma; + tx_dsg->length = sense_len; + tx_dsg->lkey = isert_conn->conn_mr->lkey; + isert_cmd->tx_desc.num_sge = 2; + } + + isert_init_send_wr(isert_cmd, send_wr); + + pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + + return isert_post_response(isert_conn, isert_cmd); +} + +static int +isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, + bool nopout_response) +{ + struct isert_cmd *isert_cmd = container_of(cmd, + struct isert_cmd, iscsi_cmd); + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; + struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; + + isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); + iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *) + &isert_cmd->tx_desc.iscsi_header, + nopout_response); + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); + isert_init_send_wr(isert_cmd, send_wr); + + pr_debug("Posting NOPIN Reponse IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + + return isert_post_response(isert_conn, isert_cmd); +} + +static int +isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) +{ + struct isert_cmd *isert_cmd = container_of(cmd, + struct isert_cmd, iscsi_cmd); + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; + struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; + + isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); + iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) + &isert_cmd->tx_desc.iscsi_header); + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); + isert_init_send_wr(isert_cmd, send_wr); + + pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + + return isert_post_response(isert_conn, isert_cmd); +} + +static int +isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) +{ + struct isert_cmd *isert_cmd = container_of(cmd, + struct isert_cmd, iscsi_cmd); + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; + struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; + + isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); + iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) + &isert_cmd->tx_desc.iscsi_header); + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); + isert_init_send_wr(isert_cmd, send_wr); + + pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + + return isert_post_response(isert_conn, isert_cmd); +} + +static int +isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) +{ + struct isert_cmd *isert_cmd = container_of(cmd, + struct isert_cmd, iscsi_cmd); + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; + struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr; + + isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); + iscsit_build_reject(cmd, conn, (struct iscsi_reject *) + &isert_cmd->tx_desc.iscsi_header); + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); + isert_init_send_wr(isert_cmd, send_wr); + + pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + + return isert_post_response(isert_conn, isert_cmd); +} + +static int +isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, + struct ib_sge *ib_sge, struct ib_send_wr *send_wr, + u32 data_left, u32 offset) +{ + struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd; + struct scatterlist *sg_start, *tmp_sg; + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + u32 sg_off, page_off; + int i = 0, sg_nents; + + sg_off = offset / PAGE_SIZE; + sg_start = &cmd->se_cmd.t_data_sg[sg_off]; + sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge); + page_off = offset % PAGE_SIZE; + + send_wr->sg_list = ib_sge; + send_wr->num_sge = sg_nents; + send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; + /* + * Perform mapping of TCM scatterlist memory ib_sge dma_addr. + */ + for_each_sg(sg_start, tmp_sg, sg_nents, i) { + pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n", + (unsigned long long)tmp_sg->dma_address, + tmp_sg->length, page_off); + + ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off; + ib_sge->length = min_t(u32, data_left, + ib_sg_dma_len(ib_dev, tmp_sg) - page_off); + ib_sge->lkey = isert_conn->conn_mr->lkey; + + pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u\n", + ib_sge->addr, ib_sge->length); + page_off = 0; + data_left -= ib_sge->length; + ib_sge++; + pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge); + } + + pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", + send_wr->sg_list, send_wr->num_sge); + + return sg_nents; +} + +static int +isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) +{ + struct se_cmd *se_cmd = &cmd->se_cmd; + struct isert_cmd *isert_cmd = container_of(cmd, + struct isert_cmd, iscsi_cmd); + struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; + struct ib_send_wr *wr_failed, *send_wr; + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct ib_sge *ib_sge; + struct scatterlist *sg; + u32 offset = 0, data_len, data_left, rdma_write_max; + int rc, ret = 0, count, sg_nents, i, ib_sge_cnt; + + pr_debug("RDMA_WRITE: data_length: %u\n", se_cmd->data_length); + + sg = &se_cmd->t_data_sg[0]; + sg_nents = se_cmd->t_data_nents; + + count = ib_dma_map_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE); + if (unlikely(!count)) { + pr_err("Unable to map put_datain SGs\n"); + return -EINVAL; + } + wr->sge = sg; + wr->num_sge = sg_nents; + pr_debug("Mapped IB count: %u sg: %p sg_nents: %u for RDMA_WRITE\n", + count, sg, sg_nents); + + ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL); + if (!ib_sge) { + pr_warn("Unable to allocate datain ib_sge\n"); + ret = -ENOMEM; + goto unmap_sg; + } + isert_cmd->ib_sge = ib_sge; + + pr_debug("Allocated ib_sge: %p from t_data_ents: %d for RDMA_WRITE\n", + ib_sge, se_cmd->t_data_nents); + + wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge); + wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, + GFP_KERNEL); + if (!wr->send_wr) { + pr_err("Unable to allocate wr->send_wr\n"); + ret = -ENOMEM; + goto unmap_sg; + } + pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n", + wr->send_wr, wr->send_wr_num); + + iscsit_increment_maxcmdsn(cmd, conn->sess); + cmd->stat_sn = conn->stat_sn++; + + wr->isert_cmd = isert_cmd; + rdma_write_max = isert_conn->max_sge * PAGE_SIZE; + data_left = se_cmd->data_length; + + for (i = 0; i < wr->send_wr_num; i++) { + send_wr = &isert_cmd->rdma_wr.send_wr[i]; + data_len = min(data_left, rdma_write_max); + + send_wr->opcode = IB_WR_RDMA_WRITE; + send_wr->send_flags = 0; + send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset; + send_wr->wr.rdma.rkey = isert_cmd->read_stag; + + ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, + send_wr, data_len, offset); + ib_sge += ib_sge_cnt; + + if (i + 1 == wr->send_wr_num) + send_wr->next = &isert_cmd->tx_desc.send_wr; + else + send_wr->next = &wr->send_wr[i + 1]; + + offset += data_len; + data_left -= data_len; + } + /* + * Build isert_conn->tx_desc for iSCSI response PDU and attach + */ + isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); + iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *) + &isert_cmd->tx_desc.iscsi_header); + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); + isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr); + + atomic_inc(&isert_conn->post_send_buf_count); + + rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); + if (rc) { + pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); + atomic_dec(&isert_conn->post_send_buf_count); + } + pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n"); + return 1; + +unmap_sg: + ib_dma_unmap_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE); + return ret; +} + +static int +isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) +{ + struct se_cmd *se_cmd = &cmd->se_cmd; + struct isert_cmd *isert_cmd = container_of(cmd, + struct isert_cmd, iscsi_cmd); + struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; + struct ib_send_wr *wr_failed, *send_wr; + struct ib_sge *ib_sge; + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct scatterlist *sg_start; + u32 sg_off, sg_nents, page_off, va_offset = 0; + u32 offset = 0, data_len, data_left, rdma_write_max; + int rc, ret = 0, count, i, ib_sge_cnt; + + pr_debug("RDMA_READ: data_length: %u write_data_done: %u\n", + se_cmd->data_length, cmd->write_data_done); + + sg_off = cmd->write_data_done / PAGE_SIZE; + sg_start = &cmd->se_cmd.t_data_sg[sg_off]; + page_off = cmd->write_data_done % PAGE_SIZE; + + pr_debug("RDMA_READ: sg_off: %d, sg_start: %p page_off: %d\n", + sg_off, sg_start, page_off); + + data_left = se_cmd->data_length - cmd->write_data_done; + sg_nents = se_cmd->t_data_nents - sg_off; + + pr_debug("RDMA_READ: data_left: %d, sg_nents: %d\n", + data_left, sg_nents); + + count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE); + if (unlikely(!count)) { + pr_err("Unable to map get_dataout SGs\n"); + return -EINVAL; + } + wr->sge = sg_start; + wr->num_sge = sg_nents; + pr_debug("Mapped IB count: %u sg_start: %p sg_nents: %u for RDMA_READ\n", + count, sg_start, sg_nents); + + ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL); + if (!ib_sge) { + pr_warn("Unable to allocate dataout ib_sge\n"); + ret = -ENOMEM; + goto unmap_sg; + } + isert_cmd->ib_sge = ib_sge; + + pr_debug("Using ib_sge: %p from sg_ents: %d for RDMA_READ\n", + ib_sge, sg_nents); + + wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge); + wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, + GFP_KERNEL); + if (!wr->send_wr) { + pr_debug("Unable to allocate wr->send_wr\n"); + ret = -ENOMEM; + goto unmap_sg; + } + pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n", + wr->send_wr, wr->send_wr_num); + + isert_cmd->tx_desc.isert_cmd = isert_cmd; + + wr->iser_ib_op = ISER_IB_RDMA_READ; + wr->isert_cmd = isert_cmd; + rdma_write_max = isert_conn->max_sge * PAGE_SIZE; + offset = cmd->write_data_done; + + for (i = 0; i < wr->send_wr_num; i++) { + send_wr = &isert_cmd->rdma_wr.send_wr[i]; + data_len = min(data_left, rdma_write_max); + + send_wr->opcode = IB_WR_RDMA_READ; + send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset; + send_wr->wr.rdma.rkey = isert_cmd->write_stag; + + ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, + send_wr, data_len, offset); + ib_sge += ib_sge_cnt; + + if (i + 1 == wr->send_wr_num) + send_wr->send_flags = IB_SEND_SIGNALED; + else + send_wr->next = &wr->send_wr[i + 1]; + + offset += data_len; + va_offset += data_len; + data_left -= data_len; + } + + atomic_inc(&isert_conn->post_send_buf_count); + + rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); + if (rc) { + pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); + atomic_dec(&isert_conn->post_send_buf_count); + } + pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n"); + return 0; + +unmap_sg: + ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE); + return ret; +} + +static int +isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) +{ + int ret; + + switch (state) { + case ISTATE_SEND_NOPIN_WANT_RESPONSE: + ret = isert_put_nopin(cmd, conn, false); + break; + default: + pr_err("Unknown immediate state: 0x%02x\n", state); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) +{ + int ret; + + switch (state) { + case ISTATE_SEND_LOGOUTRSP: + ret = isert_put_logout_rsp(cmd, conn); + if (!ret) { + pr_debug("Returning iSER Logout -EAGAIN\n"); + ret = -EAGAIN; + } + break; + case ISTATE_SEND_NOPIN: + ret = isert_put_nopin(cmd, conn, true); + break; + case ISTATE_SEND_TASKMGTRSP: + ret = isert_put_tm_rsp(cmd, conn); + break; + case ISTATE_SEND_REJECT: + ret = isert_put_reject(cmd, conn); + break; + case ISTATE_SEND_STATUS: + /* + * Special case for sending non GOOD SCSI status from TX thread + * context during pre se_cmd excecution failure. + */ + ret = isert_put_response(conn, cmd); + break; + default: + pr_err("Unknown response state: 0x%02x\n", state); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +isert_setup_np(struct iscsi_np *np, + struct __kernel_sockaddr_storage *ksockaddr) +{ + struct isert_np *isert_np; + struct rdma_cm_id *isert_lid; + struct sockaddr *sa; + int ret; + + isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); + if (!isert_np) { + pr_err("Unable to allocate struct isert_np\n"); + return -ENOMEM; + } + init_waitqueue_head(&isert_np->np_accept_wq); + mutex_init(&isert_np->np_accept_mutex); + INIT_LIST_HEAD(&isert_np->np_accept_list); + init_completion(&isert_np->np_login_comp); + + sa = (struct sockaddr *)ksockaddr; + pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa); + /* + * Setup the np->np_sockaddr from the passed sockaddr setup + * in iscsi_target_configfs.c code.. + */ + memcpy(&np->np_sockaddr, ksockaddr, + sizeof(struct __kernel_sockaddr_storage)); + + isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP, + IB_QPT_RC); + if (IS_ERR(isert_lid)) { + pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n", + PTR_ERR(isert_lid)); + ret = PTR_ERR(isert_lid); + goto out; + } + + ret = rdma_bind_addr(isert_lid, sa); + if (ret) { + pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret); + goto out_lid; + } + + ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG); + if (ret) { + pr_err("rdma_listen() for isert_lid failed: %d\n", ret); + goto out_lid; + } + + isert_np->np_cm_id = isert_lid; + np->np_context = isert_np; + pr_debug("Setup isert_lid->context: %p\n", isert_lid->context); + + return 0; + +out_lid: + rdma_destroy_id(isert_lid); +out: + kfree(isert_np); + return ret; +} + +static int +isert_check_accept_queue(struct isert_np *isert_np) +{ + int empty; + + mutex_lock(&isert_np->np_accept_mutex); + empty = list_empty(&isert_np->np_accept_list); + mutex_unlock(&isert_np->np_accept_mutex); + + return empty; +} + +static int +isert_rdma_accept(struct isert_conn *isert_conn) +{ + struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; + struct rdma_conn_param cp; + int ret; + + memset(&cp, 0, sizeof(struct rdma_conn_param)); + cp.responder_resources = isert_conn->responder_resources; + cp.initiator_depth = isert_conn->initiator_depth; + cp.retry_count = 7; + cp.rnr_retry_count = 7; + + pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n"); + + ret = rdma_accept(cm_id, &cp); + if (ret) { + pr_err("rdma_accept() failed with: %d\n", ret); + return ret; + } + + pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n"); + + return 0; +} + +static int +isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) +{ + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; + int ret; + + pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn); + + ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp); + if (ret) + return ret; + + pr_debug("isert_get_login_rx processing login->req: %p\n", login->req); + return 0; +} + +static void +isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn, + struct isert_conn *isert_conn) +{ + struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; + struct rdma_route *cm_route = &cm_id->route; + struct sockaddr_in *sock_in; + struct sockaddr_in6 *sock_in6; + + conn->login_family = np->np_sockaddr.ss_family; + + if (np->np_sockaddr.ss_family == AF_INET6) { + sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr; + snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", + &sock_in6->sin6_addr.in6_u); + conn->login_port = ntohs(sock_in6->sin6_port); + + sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr; + snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c", + &sock_in6->sin6_addr.in6_u); + conn->local_port = ntohs(sock_in6->sin6_port); + } else { + sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr; + sprintf(conn->login_ip, "%pI4", + &sock_in->sin_addr.s_addr); + conn->login_port = ntohs(sock_in->sin_port); + + sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr; + sprintf(conn->local_ip, "%pI4", + &sock_in->sin_addr.s_addr); + conn->local_port = ntohs(sock_in->sin_port); + } +} + +static int +isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) +{ + struct isert_np *isert_np = (struct isert_np *)np->np_context; + struct isert_conn *isert_conn; + int max_accept = 0, ret; + +accept_wait: + ret = wait_event_interruptible(isert_np->np_accept_wq, + !isert_check_accept_queue(isert_np) || + np->np_thread_state == ISCSI_NP_THREAD_RESET); + if (max_accept > 5) + return -ENODEV; + + spin_lock_bh(&np->np_thread_lock); + if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { + spin_unlock_bh(&np->np_thread_lock); + pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); + return -ENODEV; + } + spin_unlock_bh(&np->np_thread_lock); + + mutex_lock(&isert_np->np_accept_mutex); + if (list_empty(&isert_np->np_accept_list)) { + mutex_unlock(&isert_np->np_accept_mutex); + max_accept++; + goto accept_wait; + } + isert_conn = list_first_entry(&isert_np->np_accept_list, + struct isert_conn, conn_accept_node); + list_del_init(&isert_conn->conn_accept_node); + mutex_unlock(&isert_np->np_accept_mutex); + + conn->context = isert_conn; + isert_conn->conn = conn; + max_accept = 0; + + ret = isert_rdma_post_recvl(isert_conn); + if (ret) + return ret; + + ret = isert_rdma_accept(isert_conn); + if (ret) + return ret; + + isert_set_conn_info(np, conn, isert_conn); + + pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn); + return 0; +} + +static void +isert_free_np(struct iscsi_np *np) +{ + struct isert_np *isert_np = (struct isert_np *)np->np_context; + + rdma_destroy_id(isert_np->np_cm_id); + + np->np_context = NULL; + kfree(isert_np); +} + +static void isert_free_conn(struct iscsi_conn *conn) +{ + struct isert_conn *isert_conn = conn->context; + + pr_debug("isert_free_conn: Starting \n"); + /* + * Decrement post_send_buf_count for special case when called + * from isert_do_control_comp() -> iscsit_logout_post_handler() + */ + if (isert_conn->logout_posted) + atomic_dec(&isert_conn->post_send_buf_count); + + if (isert_conn->conn_cm_id) + rdma_disconnect(isert_conn->conn_cm_id); + /* + * Only wait for conn_wait_comp_err if the isert_conn made it + * into full feature phase.. + */ + if (isert_conn->state > ISER_CONN_INIT) { + pr_debug("isert_free_conn: Before wait_event comp_err %d\n", + isert_conn->state); + wait_event(isert_conn->conn_wait_comp_err, + isert_conn->state == ISER_CONN_TERMINATING); + pr_debug("isert_free_conn: After wait_event #1 >>>>>>>>>>>>\n"); + } + + pr_debug("isert_free_conn: wait_event conn_wait %d\n", isert_conn->state); + wait_event(isert_conn->conn_wait, isert_conn->state == ISER_CONN_DOWN); + pr_debug("isert_free_conn: After wait_event #2 >>>>>>>>>>>>>>>>>>>>\n"); + + isert_put_conn(isert_conn); +} + +static struct iscsit_transport iser_target_transport = { + .name = "IB/iSER", + .transport_type = ISCSI_INFINIBAND, + .owner = THIS_MODULE, + .iscsit_setup_np = isert_setup_np, + .iscsit_accept_np = isert_accept_np, + .iscsit_free_np = isert_free_np, + .iscsit_free_conn = isert_free_conn, + .iscsit_alloc_cmd = isert_alloc_cmd, + .iscsit_get_login_rx = isert_get_login_rx, + .iscsit_put_login_tx = isert_put_login_tx, + .iscsit_immediate_queue = isert_immediate_queue, + .iscsit_response_queue = isert_response_queue, + .iscsit_get_dataout = isert_get_dataout, + .iscsit_queue_data_in = isert_put_datain, + .iscsit_queue_status = isert_put_response, +}; + +static int __init isert_init(void) +{ + int ret; + + isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0); + if (!isert_rx_wq) { + pr_err("Unable to allocate isert_rx_wq\n"); + return -ENOMEM; + } + + isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); + if (!isert_comp_wq) { + pr_err("Unable to allocate isert_comp_wq\n"); + ret = -ENOMEM; + goto destroy_rx_wq; + } + + isert_cmd_cache = kmem_cache_create("isert_cmd_cache", + sizeof(struct isert_cmd), __alignof__(struct isert_cmd), + 0, NULL); + if (!isert_cmd_cache) { + pr_err("Unable to create isert_cmd_cache\n"); + ret = -ENOMEM; + goto destroy_tx_cq; + } + + iscsit_register_transport(&iser_target_transport); + pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n"); + return 0; + +destroy_tx_cq: + destroy_workqueue(isert_comp_wq); +destroy_rx_wq: + destroy_workqueue(isert_rx_wq); + return ret; +} + +static void __exit isert_exit(void) +{ + kmem_cache_destroy(isert_cmd_cache); + destroy_workqueue(isert_comp_wq); + destroy_workqueue(isert_rx_wq); + iscsit_unregister_transport(&iser_target_transport); + pr_debug("iSER_TARGET[0] - Released iser_target_transport\n"); +} + +MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); +MODULE_VERSION("0.1"); +MODULE_AUTHOR("nab@Linux-iSCSI.org"); +MODULE_LICENSE("GPL"); + +module_init(isert_init); +module_exit(isert_exit); diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h new file mode 100644 index 000000000000..b104f4c2cd38 --- /dev/null +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -0,0 +1,138 @@ +#include <linux/socket.h> +#include <linux/in.h> +#include <linux/in6.h> +#include <rdma/ib_verbs.h> +#include <rdma/rdma_cm.h> + +#define ISERT_RDMA_LISTEN_BACKLOG 10 + +enum isert_desc_type { + ISCSI_TX_CONTROL, + ISCSI_TX_DATAIN +}; + +enum iser_ib_op_code { + ISER_IB_RECV, + ISER_IB_SEND, + ISER_IB_RDMA_WRITE, + ISER_IB_RDMA_READ, +}; + +enum iser_conn_state { + ISER_CONN_INIT, + ISER_CONN_UP, + ISER_CONN_TERMINATING, + ISER_CONN_DOWN, +}; + +struct iser_rx_desc { + struct iser_hdr iser_header; + struct iscsi_hdr iscsi_header; + char data[ISER_RECV_DATA_SEG_LEN]; + u64 dma_addr; + struct ib_sge rx_sg; + char pad[ISER_RX_PAD_SIZE]; +} __packed; + +struct iser_tx_desc { + struct iser_hdr iser_header; + struct iscsi_hdr iscsi_header; + enum isert_desc_type type; + u64 dma_addr; + struct ib_sge tx_sg[2]; + int num_sge; + struct isert_cmd *isert_cmd; + struct ib_send_wr send_wr; +} __packed; + +struct isert_rdma_wr { + struct list_head wr_list; + struct isert_cmd *isert_cmd; + enum iser_ib_op_code iser_ib_op; + struct ib_sge *ib_sge; + int num_sge; + struct scatterlist *sge; + int send_wr_num; + struct ib_send_wr *send_wr; +}; + +struct isert_cmd { + uint32_t read_stag; + uint32_t write_stag; + uint64_t read_va; + uint64_t write_va; + u64 sense_buf_dma; + u32 sense_buf_len; + u32 read_va_off; + u32 write_va_off; + u32 rdma_wr_num; + struct isert_conn *conn; + struct iscsi_cmd iscsi_cmd; + struct ib_sge *ib_sge; + struct iser_tx_desc tx_desc; + struct isert_rdma_wr rdma_wr; + struct work_struct comp_work; +}; + +struct isert_device; + +struct isert_conn { + enum iser_conn_state state; + bool logout_posted; + int post_recv_buf_count; + atomic_t post_send_buf_count; + u32 responder_resources; + u32 initiator_depth; + u32 max_sge; + char *login_buf; + char *login_req_buf; + char *login_rsp_buf; + u64 login_req_dma; + u64 login_rsp_dma; + unsigned int conn_rx_desc_head; + struct iser_rx_desc *conn_rx_descs; + struct ib_recv_wr conn_rx_wr[ISERT_MIN_POSTED_RX]; + struct iscsi_conn *conn; + struct list_head conn_accept_node; + struct completion conn_login_comp; + struct iser_tx_desc conn_login_tx_desc; + struct rdma_cm_id *conn_cm_id; + struct ib_pd *conn_pd; + struct ib_mr *conn_mr; + struct ib_qp *conn_qp; + struct isert_device *conn_device; + struct work_struct conn_logout_work; + wait_queue_head_t conn_wait; + wait_queue_head_t conn_wait_comp_err; + struct kref conn_kref; +}; + +#define ISERT_MAX_CQ 64 + +struct isert_cq_desc { + struct isert_device *device; + int cq_index; + struct work_struct cq_rx_work; + struct work_struct cq_tx_work; +}; + +struct isert_device { + int cqs_used; + int refcount; + int cq_active_qps[ISERT_MAX_CQ]; + struct ib_device *ib_device; + struct ib_pd *dev_pd; + struct ib_mr *dev_mr; + struct ib_cq *dev_rx_cq[ISERT_MAX_CQ]; + struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; + struct isert_cq_desc *cq_desc; + struct list_head dev_node; +}; + +struct isert_np { + wait_queue_head_t np_accept_wq; + struct rdma_cm_id *np_cm_id; + struct mutex np_accept_mutex; + struct list_head np_accept_list; + struct completion np_login_comp; +}; diff --git a/drivers/infiniband/ulp/isert/isert_proto.h b/drivers/infiniband/ulp/isert/isert_proto.h new file mode 100644 index 000000000000..4dccd313b777 --- /dev/null +++ b/drivers/infiniband/ulp/isert/isert_proto.h @@ -0,0 +1,47 @@ +/* From iscsi_iser.h */ + +struct iser_hdr { + u8 flags; + u8 rsvd[3]; + __be32 write_stag; /* write rkey */ + __be64 write_va; + __be32 read_stag; /* read rkey */ + __be64 read_va; +} __packed; + +/*Constant PDU lengths calculations */ +#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr)) + +#define ISER_RECV_DATA_SEG_LEN 8192 +#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN) +#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN) + +/* QP settings */ +/* Maximal bounds on received asynchronous PDUs */ +#define ISERT_MAX_TX_MISC_PDUS 4 /* NOOP_IN(2) , ASYNC_EVENT(2) */ + +#define ISERT_MAX_RX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), * + * SCSI_TMFUNC(2), LOGOUT(1) */ + +#define ISCSI_DEF_XMIT_CMDS_MAX 128 /* from libiscsi.h, must be power of 2 */ + +#define ISERT_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX) + +#define ISERT_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2) + +#define ISERT_INFLIGHT_DATAOUTS 8 + +#define ISERT_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \ + (1 + ISERT_INFLIGHT_DATAOUTS) + \ + ISERT_MAX_TX_MISC_PDUS + \ + ISERT_MAX_RX_MISC_PDUS) + +#define ISER_RX_PAD_SIZE (ISER_RECV_DATA_SEG_LEN + 4096 - \ + (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge))) + +#define ISER_VER 0x10 +#define ISER_WSV 0x08 +#define ISER_RSV 0x04 +#define ISCSI_CTRL 0x10 +#define ISER_HELLO 0x20 +#define ISER_HELLORPLY 0x30 diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index c09d41b1a2ff..3f3f0416fbdd 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1374,7 +1374,7 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); break; default: - WARN_ON("ERROR: unexpected command state"); + WARN(1, "Unexpected command state (%d)", state); break; } @@ -2227,6 +2227,27 @@ static void srpt_close_ch(struct srpt_rdma_ch *ch) } /** + * srpt_shutdown_session() - Whether or not a session may be shut down. + */ +static int srpt_shutdown_session(struct se_session *se_sess) +{ + struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr; + unsigned long flags; + + spin_lock_irqsave(&ch->spinlock, flags); + if (ch->in_shutdown) { + spin_unlock_irqrestore(&ch->spinlock, flags); + return true; + } + + ch->in_shutdown = true; + target_sess_cmd_list_set_waiting(se_sess); + spin_unlock_irqrestore(&ch->spinlock, flags); + + return true; +} + +/** * srpt_drain_channel() - Drain a channel by resetting the IB queue pair. * @cm_id: Pointer to the CM ID of the channel to be drained. * @@ -2264,6 +2285,9 @@ static void srpt_drain_channel(struct ib_cm_id *cm_id) spin_unlock_irq(&sdev->spinlock); if (do_reset) { + if (ch->sess) + srpt_shutdown_session(ch->sess); + ret = srpt_ch_qp_err(ch); if (ret < 0) printk(KERN_ERR "Setting queue pair in error state" @@ -2328,7 +2352,7 @@ static void srpt_release_channel_work(struct work_struct *w) se_sess = ch->sess; BUG_ON(!se_sess); - target_wait_for_sess_cmds(se_sess, 0); + target_wait_for_sess_cmds(se_sess); transport_deregister_session_configfs(se_sess); transport_deregister_session(se_sess); @@ -3467,14 +3491,6 @@ static void srpt_release_cmd(struct se_cmd *se_cmd) } /** - * srpt_shutdown_session() - Whether or not a session may be shut down. - */ -static int srpt_shutdown_session(struct se_session *se_sess) -{ - return true; -} - -/** * srpt_close_session() - Forcibly close a session. * * Callback function invoked by the TCM core to clean up sessions associated diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h index 4caf55cda7b1..3dae156905de 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.h +++ b/drivers/infiniband/ulp/srpt/ib_srpt.h @@ -325,6 +325,7 @@ struct srpt_rdma_ch { u8 sess_name[36]; struct work_struct release_work; struct completion *release_done; + bool in_shutdown; }; /** |