summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-07 01:55:33 +0300
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-07 01:55:33 +0300
commit0c7d3757116c59b3eedd9aa6dfd7ae0a1341f5c2 (patch)
tree61dbcee09d925a839b34df3b8149e26972dc5402 /drivers
parent2442d3109943bafbdfc4f0495e3d10eeedc8390c (diff)
parentb45bfcc1ae084aa98c0350b8c33c8b57540b0acc (diff)
downloadlinux-0c7d3757116c59b3eedd9aa6dfd7ae0a1341f5c2.tar.xz
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: IB/ehca: Remove obsolete prototypes IB/ehca: Remove use of do_mmap() RDMA/addr: Handle ethernet neighbour updates during route resolution IB: Make sure struct ib_user_mad.data is aligned IB/srp: Don't wait for response when QP is in error state. IB: Return qp pointer as part of ib_wc IB: Include <linux/kref.h> explicitly in <rdma/ib_verbs.h>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/core/addr.c3
-rw-r--r--drivers/infiniband/core/mad.c11
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h29
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c65
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c78
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c395
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c7
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h1
20 files changed, 240 insertions, 403 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index af939796750d..d2bb5a9a303f 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -360,8 +360,7 @@ static int netevent_callback(struct notifier_block *self, unsigned long event,
if (event == NETEVENT_NEIGH_UPDATE) {
struct neighbour *neigh = ctx;
- if (neigh->dev->type == ARPHRD_INFINIBAND &&
- (neigh->nud_state & NUD_VALID)) {
+ if (neigh->nud_state & NUD_VALID) {
set_timeout(jiffies);
}
}
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 5ed141ebd1c8..13efd4170349 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -642,7 +642,8 @@ static void snoop_recv(struct ib_mad_qp_info *qp_info,
spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
}
-static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
+static void build_smp_wc(struct ib_qp *qp,
+ u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
struct ib_wc *wc)
{
memset(wc, 0, sizeof *wc);
@@ -652,7 +653,7 @@ static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
wc->pkey_index = pkey_index;
wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
wc->src_qp = IB_QP0;
- wc->qp_num = IB_QP0;
+ wc->qp = qp;
wc->slid = slid;
wc->sl = 0;
wc->dlid_path_bits = 0;
@@ -713,7 +714,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
goto out;
}
- build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid),
+ build_smp_wc(mad_agent_priv->agent.qp,
+ send_wr->wr_id, be16_to_cpu(smp->dr_slid),
send_wr->wr.ud.pkey_index,
send_wr->wr.ud.port_num, &mad_wc);
@@ -2355,7 +2357,8 @@ static void local_completions(struct work_struct *work)
* Defined behavior is to complete response
* before request
*/
- build_smp_wc((unsigned long) local->mad_send_wr,
+ build_smp_wc(recv_mad_agent->agent.qp,
+ (unsigned long) local->mad_send_wr,
be16_to_cpu(IB_LID_PERMISSIVE),
0, recv_mad_agent->agent.port_num, &wc);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 743247ec065e..df1efbc10882 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -933,7 +933,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
resp->wc[i].vendor_err = wc[i].vendor_err;
resp->wc[i].byte_len = wc[i].byte_len;
resp->wc[i].imm_data = (__u32 __force) wc[i].imm_data;
- resp->wc[i].qp_num = wc[i].qp_num;
+ resp->wc[i].qp_num = wc[i].qp->qp_num;
resp->wc[i].src_qp = wc[i].src_qp;
resp->wc[i].wc_flags = wc[i].wc_flags;
resp->wc[i].pkey_index = wc[i].pkey_index;
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index 05c9154d46f4..5175c99ee586 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -153,7 +153,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev,
entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
entry->wr_id = ce->hdr.context;
- entry->qp_num = ce->handle;
+ entry->qp = &qp->ibqp;
entry->wc_flags = 0;
entry->slid = 0;
entry->sl = 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 1c722032319c..cf95ee474b0f 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -119,13 +119,14 @@ struct ehca_qp {
struct ipz_qp_handle ipz_qp_handle;
struct ehca_pfqp pf;
struct ib_qp_init_attr init_attr;
- u64 uspace_squeue;
- u64 uspace_rqueue;
- u64 uspace_fwh;
struct ehca_cq *send_cq;
struct ehca_cq *recv_cq;
unsigned int sqerr_purgeflag;
struct hlist_node list_entries;
+ /* mmap counter for resources mapped into user space */
+ u32 mm_count_squeue;
+ u32 mm_count_rqueue;
+ u32 mm_count_galpa;
};
/* must be power of 2 */
@@ -142,13 +143,14 @@ struct ehca_cq {
struct ipz_cq_handle ipz_cq_handle;
struct ehca_pfcq pf;
spinlock_t cb_lock;
- u64 uspace_queue;
- u64 uspace_fwh;
struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
struct list_head entry;
u32 nr_callbacks;
spinlock_t task_lock;
u32 ownpid;
+ /* mmap counter for resources mapped into user space */
+ u32 mm_count_queue;
+ u32 mm_count_galpa;
};
enum ehca_mr_flag {
@@ -248,20 +250,6 @@ struct ehca_ucontext {
struct ib_ucontext ib_ucontext;
};
-struct ehca_module *ehca_module_new(void);
-
-int ehca_module_delete(struct ehca_module *me);
-
-int ehca_eq_ctor(struct ehca_eq *eq);
-
-int ehca_eq_dtor(struct ehca_eq *eq);
-
-struct ehca_shca *ehca_shca_new(void);
-
-int ehca_shca_delete(struct ehca_shca *me);
-
-struct ehca_sport *ehca_sport_new(struct ehca_shca *anchor);
-
int ehca_init_pd_cache(void);
void ehca_cleanup_pd_cache(void);
int ehca_init_cq_cache(void);
@@ -283,7 +271,6 @@ extern int ehca_port_act_time;
extern int ehca_use_hp_mr;
struct ipzu_queue_resp {
- u64 queue; /* points to first queue entry */
u32 qe_size; /* queue entry size */
u32 act_nr_of_sg;
u32 queue_length; /* queue length allocated in bytes */
@@ -296,7 +283,6 @@ struct ehca_create_cq_resp {
u32 cq_number;
u32 token;
struct ipzu_queue_resp ipz_queue;
- struct h_galpas galpas;
};
struct ehca_create_qp_resp {
@@ -309,7 +295,6 @@ struct ehca_create_qp_resp {
u32 dummy; /* padding for 8 byte alignment */
struct ipzu_queue_resp ipz_squeue;
struct ipzu_queue_resp ipz_rqueue;
- struct h_galpas galpas;
};
struct ehca_alloc_cq_parms {
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 6074c897f51c..9291a86ca053 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -267,7 +267,6 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
if (context) {
struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
struct ehca_create_cq_resp resp;
- struct vm_area_struct *vma;
memset(&resp, 0, sizeof(resp));
resp.cq_number = my_cq->cq_number;
resp.token = my_cq->token;
@@ -276,40 +275,14 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
resp.ipz_queue.queue_length = ipz_queue->queue_length;
resp.ipz_queue.pagesize = ipz_queue->pagesize;
resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
- ret = ehca_mmap_nopage(((u64)(my_cq->token) << 32) | 0x12000000,
- ipz_queue->queue_length,
- (void**)&resp.ipz_queue.queue,
- &vma);
- if (ret) {
- ehca_err(device, "Could not mmap queue pages");
- cq = ERR_PTR(ret);
- goto create_cq_exit4;
- }
- my_cq->uspace_queue = resp.ipz_queue.queue;
- resp.galpas = my_cq->galpas;
- ret = ehca_mmap_register(my_cq->galpas.user.fw_handle,
- (void**)&resp.galpas.kernel.fw_handle,
- &vma);
- if (ret) {
- ehca_err(device, "Could not mmap fw_handle");
- cq = ERR_PTR(ret);
- goto create_cq_exit5;
- }
- my_cq->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
ehca_err(device, "Copy to udata failed.");
- goto create_cq_exit6;
+ goto create_cq_exit4;
}
}
return cq;
-create_cq_exit6:
- ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
-
-create_cq_exit5:
- ehca_munmap(my_cq->uspace_queue, my_cq->ipz_queue.queue_length);
-
create_cq_exit4:
ipz_queue_dtor(&my_cq->ipz_queue);
@@ -333,7 +306,6 @@ create_cq_exit1:
int ehca_destroy_cq(struct ib_cq *cq)
{
u64 h_ret;
- int ret;
struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
int cq_num = my_cq->cq_number;
struct ib_device *device = cq->device;
@@ -343,6 +315,20 @@ int ehca_destroy_cq(struct ib_cq *cq)
u32 cur_pid = current->tgid;
unsigned long flags;
+ if (cq->uobject) {
+ if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
+ ehca_err(device, "Resources still referenced in "
+ "user space cq_num=%x", my_cq->cq_number);
+ return -EINVAL;
+ }
+ if (my_cq->ownpid != cur_pid) {
+ ehca_err(device, "Invalid caller pid=%x ownpid=%x "
+ "cq_num=%x",
+ cur_pid, my_cq->ownpid, my_cq->cq_number);
+ return -EINVAL;
+ }
+ }
+
spin_lock_irqsave(&ehca_cq_idr_lock, flags);
while (my_cq->nr_callbacks) {
spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
@@ -353,25 +339,6 @@ int ehca_destroy_cq(struct ib_cq *cq)
idr_remove(&ehca_cq_idr, my_cq->token);
spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
- if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
- ehca_err(device, "Invalid caller pid=%x ownpid=%x",
- cur_pid, my_cq->ownpid);
- return -EINVAL;
- }
-
- /* un-mmap if vma alloc */
- if (my_cq->uspace_queue ) {
- ret = ehca_munmap(my_cq->uspace_queue,
- my_cq->ipz_queue.queue_length);
- if (ret)
- ehca_err(device, "Could not munmap queue ehca_cq=%p "
- "cq_num=%x", my_cq, cq_num);
- ret = ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
- if (ret)
- ehca_err(device, "Could not munmap fwh ehca_cq=%p "
- "cq_num=%x", my_cq, cq_num);
- }
-
h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
if (h_ret == H_R_STATE) {
/* cq in err: read err data and destroy it forcibly */
@@ -400,7 +367,7 @@ int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
u32 cur_pid = current->tgid;
- if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
+ if (cq->uobject && my_cq->ownpid != cur_pid) {
ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x",
cur_pid, my_cq->ownpid);
return -EINVAL;
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index cd7789f0d08e..95fd59fb4528 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -171,14 +171,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
void ehca_poll_eqs(unsigned long data);
-int ehca_mmap_nopage(u64 foffset,u64 length,void **mapped,
- struct vm_area_struct **vma);
-
-int ehca_mmap_register(u64 physical,void **mapped,
- struct vm_area_struct **vma);
-
-int ehca_munmap(unsigned long addr, size_t len);
-
#ifdef CONFIG_PPC_64K_PAGES
void *ehca_alloc_fw_ctrlblock(gfp_t flags);
void ehca_free_fw_ctrlblock(void *ptr);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 6574fbbaead5..1155bcf48212 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
-MODULE_VERSION("SVNEHCA_0019");
+MODULE_VERSION("SVNEHCA_0020");
int ehca_open_aqp1 = 0;
int ehca_debug_level = 0;
@@ -288,7 +288,7 @@ int ehca_init_device(struct ehca_shca *shca)
strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
shca->ib_device.owner = THIS_MODULE;
- shca->ib_device.uverbs_abi_ver = 5;
+ shca->ib_device.uverbs_abi_ver = 6;
shca->ib_device.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
@@ -790,7 +790,7 @@ int __init ehca_module_init(void)
int ret;
printk(KERN_INFO "eHCA Infiniband Device Driver "
- "(Rel.: SVNEHCA_0019)\n");
+ "(Rel.: SVNEHCA_0020)\n");
idr_init(&ehca_qp_idr);
idr_init(&ehca_cq_idr);
spin_lock_init(&ehca_qp_idr_lock);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 34b85556d01e..95efef921f1d 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -637,7 +637,6 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue;
struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue;
struct ehca_create_qp_resp resp;
- struct vm_area_struct * vma;
memset(&resp, 0, sizeof(resp));
resp.qp_num = my_qp->real_qp_num;
@@ -651,59 +650,21 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length;
resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize;
resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state;
- ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x22000000,
- ipz_rqueue->queue_length,
- (void**)&resp.ipz_rqueue.queue,
- &vma);
- if (ret) {
- ehca_err(pd->device, "Could not mmap rqueue pages");
- goto create_qp_exit3;
- }
- my_qp->uspace_rqueue = resp.ipz_rqueue.queue;
/* squeue properties */
resp.ipz_squeue.qe_size = ipz_squeue->qe_size;
resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg;
resp.ipz_squeue.queue_length = ipz_squeue->queue_length;
resp.ipz_squeue.pagesize = ipz_squeue->pagesize;
resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state;
- ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x23000000,
- ipz_squeue->queue_length,
- (void**)&resp.ipz_squeue.queue,
- &vma);
- if (ret) {
- ehca_err(pd->device, "Could not mmap squeue pages");
- goto create_qp_exit4;
- }
- my_qp->uspace_squeue = resp.ipz_squeue.queue;
- /* fw_handle */
- resp.galpas = my_qp->galpas;
- ret = ehca_mmap_register(my_qp->galpas.user.fw_handle,
- (void**)&resp.galpas.kernel.fw_handle,
- &vma);
- if (ret) {
- ehca_err(pd->device, "Could not mmap fw_handle");
- goto create_qp_exit5;
- }
- my_qp->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
-
if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
ehca_err(pd->device, "Copy to udata failed");
ret = -EINVAL;
- goto create_qp_exit6;
+ goto create_qp_exit3;
}
}
return &my_qp->ib_qp;
-create_qp_exit6:
- ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
-
-create_qp_exit5:
- ehca_munmap(my_qp->uspace_squeue, my_qp->ipz_squeue.queue_length);
-
-create_qp_exit4:
- ehca_munmap(my_qp->uspace_rqueue, my_qp->ipz_rqueue.queue_length);
-
create_qp_exit3:
ipz_queue_dtor(&my_qp->ipz_rqueue);
ipz_queue_dtor(&my_qp->ipz_squeue);
@@ -931,7 +892,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
my_qp->qp_type == IB_QPT_SMI) &&
statetrans == IB_QPST_SQE2RTS) {
/* mark next free wqe if kernel */
- if (my_qp->uspace_squeue == 0) {
+ if (!ibqp->uobject) {
struct ehca_wqe *wqe;
/* lock send queue */
spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
@@ -1417,11 +1378,18 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
enum ib_qp_type qp_type;
unsigned long flags;
- if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
- my_pd->ownpid != cur_pid) {
- ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x",
- cur_pid, my_pd->ownpid);
- return -EINVAL;
+ if (ibqp->uobject) {
+ if (my_qp->mm_count_galpa ||
+ my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
+ ehca_err(ibqp->device, "Resources still referenced in "
+ "user space qp_num=%x", ibqp->qp_num);
+ return -EINVAL;
+ }
+ if (my_pd->ownpid != cur_pid) {
+ ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x",
+ cur_pid, my_pd->ownpid);
+ return -EINVAL;
+ }
}
if (my_qp->send_cq) {
@@ -1439,24 +1407,6 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
idr_remove(&ehca_qp_idr, my_qp->token);
spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
- /* un-mmap if vma alloc */
- if (my_qp->uspace_rqueue) {
- ret = ehca_munmap(my_qp->uspace_rqueue,
- my_qp->ipz_rqueue.queue_length);
- if (ret)
- ehca_err(ibqp->device, "Could not munmap rqueue "
- "qp_num=%x", qp_num);
- ret = ehca_munmap(my_qp->uspace_squeue,
- my_qp->ipz_squeue.queue_length);
- if (ret)
- ehca_err(ibqp->device, "Could not munmap squeue "
- "qp_num=%x", qp_num);
- ret = ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
- if (ret)
- ehca_err(ibqp->device, "Could not munmap fwh qp_num=%x",
- qp_num);
- }
-
h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
if (h_ret != H_SUCCESS) {
ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx "
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index b46bda1bf85d..08d3f892d9f3 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -579,7 +579,7 @@ poll_cq_one_read_cqe:
} else
wc->status = IB_WC_SUCCESS;
- wc->qp_num = cqe->local_qp_number;
+ wc->qp = NULL;
wc->byte_len = cqe->nr_bytes_transferred;
wc->pkey_index = cqe->pkey_index;
wc->slid = cqe->rlid;
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index e08764e4aef2..73db920b6945 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -68,105 +68,183 @@ int ehca_dealloc_ucontext(struct ib_ucontext *context)
return 0;
}
-struct page *ehca_nopage(struct vm_area_struct *vma,
- unsigned long address, int *type)
+static void ehca_mm_open(struct vm_area_struct *vma)
{
- struct page *mypage = NULL;
- u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
- u32 idr_handle = fileoffset >> 32;
- u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */
- u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
- u32 cur_pid = current->tgid;
- unsigned long flags;
- struct ehca_cq *cq;
- struct ehca_qp *qp;
- struct ehca_pd *pd;
- u64 offset;
- void *vaddr;
+ u32 *count = (u32*)vma->vm_private_data;
+ if (!count) {
+ ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
+ vma->vm_start, vma->vm_end);
+ return;
+ }
+ (*count)++;
+ if (!(*count))
+ ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
+ vma->vm_start, vma->vm_end);
+ ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
+ vma->vm_start, vma->vm_end, *count);
+}
- switch (q_type) {
- case 1: /* CQ */
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
- cq = idr_find(&ehca_cq_idr, idr_handle);
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+static void ehca_mm_close(struct vm_area_struct *vma)
+{
+ u32 *count = (u32*)vma->vm_private_data;
+ if (!count) {
+ ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
+ vma->vm_start, vma->vm_end);
+ return;
+ }
+ (*count)--;
+ ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
+ vma->vm_start, vma->vm_end, *count);
+}
- /* make sure this mmap really belongs to the authorized user */
- if (!cq) {
- ehca_gen_err("cq is NULL ret=NOPAGE_SIGBUS");
- return NOPAGE_SIGBUS;
+static struct vm_operations_struct vm_ops = {
+ .open = ehca_mm_open,
+ .close = ehca_mm_close,
+};
+
+static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
+ u32 *mm_count)
+{
+ int ret;
+ u64 vsize, physical;
+
+ vsize = vma->vm_end - vma->vm_start;
+ if (vsize != EHCA_PAGESIZE) {
+ ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start);
+ return -EINVAL;
+ }
+
+ physical = galpas->user.fw_handle;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical);
+ /* VM_IO | VM_RESERVED are set by remap_pfn_range() */
+ ret = remap_pfn_range(vma, vma->vm_start, physical >> PAGE_SHIFT,
+ vsize, vma->vm_page_prot);
+ if (unlikely(ret)) {
+ ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
+ return -ENOMEM;
+ }
+
+ vma->vm_private_data = mm_count;
+ (*mm_count)++;
+ vma->vm_ops = &vm_ops;
+
+ return 0;
+}
+
+static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
+ u32 *mm_count)
+{
+ int ret;
+ u64 start, ofs;
+ struct page *page;
+
+ vma->vm_flags |= VM_RESERVED;
+ start = vma->vm_start;
+ for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
+ u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
+ page = virt_to_page(virt_addr);
+ ret = vm_insert_page(vma, start, page);
+ if (unlikely(ret)) {
+ ehca_gen_err("vm_insert_page() failed rc=%x", ret);
+ return ret;
}
+ start += PAGE_SIZE;
+ }
+ vma->vm_private_data = mm_count;
+ (*mm_count)++;
+ vma->vm_ops = &vm_ops;
- if (cq->ownpid != cur_pid) {
+ return 0;
+}
+
+static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
+ u32 rsrc_type)
+{
+ int ret;
+
+ switch (rsrc_type) {
+ case 1: /* galpa fw handle */
+ ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number);
+ ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
+ if (unlikely(ret)) {
ehca_err(cq->ib_cq.device,
- "Invalid caller pid=%x ownpid=%x",
- cur_pid, cq->ownpid);
- return NOPAGE_SIGBUS;
+ "ehca_mmap_fw() failed rc=%x cq_num=%x",
+ ret, cq->cq_number);
+ return ret;
}
+ break;
- if (rsrc_type == 2) {
- ehca_dbg(cq->ib_cq.device, "cq=%p cq queuearea", cq);
- offset = address - vma->vm_start;
- vaddr = ipz_qeit_calc(&cq->ipz_queue, offset);
- ehca_dbg(cq->ib_cq.device, "offset=%lx vaddr=%p",
- offset, vaddr);
- mypage = virt_to_page(vaddr);
+ case 2: /* cq queue_addr */
+ ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number);
+ ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
+ if (unlikely(ret)) {
+ ehca_err(cq->ib_cq.device,
+ "ehca_mmap_queue() failed rc=%x cq_num=%x",
+ ret, cq->cq_number);
+ return ret;
}
break;
- case 2: /* QP */
- spin_lock_irqsave(&ehca_qp_idr_lock, flags);
- qp = idr_find(&ehca_qp_idr, idr_handle);
- spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+ default:
+ ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x",
+ rsrc_type, cq->cq_number);
+ return -EINVAL;
+ }
- /* make sure this mmap really belongs to the authorized user */
- if (!qp) {
- ehca_gen_err("qp is NULL ret=NOPAGE_SIGBUS");
- return NOPAGE_SIGBUS;
+ return 0;
+}
+
+static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
+ u32 rsrc_type)
+{
+ int ret;
+
+ switch (rsrc_type) {
+ case 1: /* galpa fw handle */
+ ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
+ ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
+ if (unlikely(ret)) {
+ ehca_err(qp->ib_qp.device,
+ "remap_pfn_range() failed ret=%x qp_num=%x",
+ ret, qp->ib_qp.qp_num);
+ return -ENOMEM;
}
+ break;
- pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
- if (pd->ownpid != cur_pid) {
+ case 2: /* qp rqueue_addr */
+ ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue",
+ qp->ib_qp.qp_num);
+ ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, &qp->mm_count_rqueue);
+ if (unlikely(ret)) {
ehca_err(qp->ib_qp.device,
- "Invalid caller pid=%x ownpid=%x",
- cur_pid, pd->ownpid);
- return NOPAGE_SIGBUS;
+ "ehca_mmap_queue(rq) failed rc=%x qp_num=%x",
+ ret, qp->ib_qp.qp_num);
+ return ret;
}
+ break;
- if (rsrc_type == 2) { /* rqueue */
- ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueuearea", qp);
- offset = address - vma->vm_start;
- vaddr = ipz_qeit_calc(&qp->ipz_rqueue, offset);
- ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
- offset, vaddr);
- mypage = virt_to_page(vaddr);
- } else if (rsrc_type == 3) { /* squeue */
- ehca_dbg(qp->ib_qp.device, "qp=%p qp squeuearea", qp);
- offset = address - vma->vm_start;
- vaddr = ipz_qeit_calc(&qp->ipz_squeue, offset);
- ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
- offset, vaddr);
- mypage = virt_to_page(vaddr);
+ case 3: /* qp squeue_addr */
+ ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue",
+ qp->ib_qp.qp_num);
+ ret = ehca_mmap_queue(vma, &qp->ipz_squeue, &qp->mm_count_squeue);
+ if (unlikely(ret)) {
+ ehca_err(qp->ib_qp.device,
+ "ehca_mmap_queue(sq) failed rc=%x qp_num=%x",
+ ret, qp->ib_qp.qp_num);
+ return ret;
}
break;
default:
- ehca_gen_err("bad queue type %x", q_type);
- return NOPAGE_SIGBUS;
- }
-
- if (!mypage) {
- ehca_gen_err("Invalid page adr==NULL ret=NOPAGE_SIGBUS");
- return NOPAGE_SIGBUS;
+ ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x",
+ rsrc_type, qp->ib_qp.qp_num);
+ return -EINVAL;
}
- get_page(mypage);
- return mypage;
+ return 0;
}
-static struct vm_operations_struct ehcau_vm_ops = {
- .nopage = ehca_nopage,
-};
-
int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
@@ -175,7 +253,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
u32 cur_pid = current->tgid;
u32 ret;
- u64 vsize, physical;
unsigned long flags;
struct ehca_cq *cq;
struct ehca_qp *qp;
@@ -201,44 +278,12 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
return -EINVAL;
- switch (rsrc_type) {
- case 1: /* galpa fw handle */
- ehca_dbg(cq->ib_cq.device, "cq=%p cq triggerarea", cq);
- vma->vm_flags |= VM_RESERVED;
- vsize = vma->vm_end - vma->vm_start;
- if (vsize != EHCA_PAGESIZE) {
- ehca_err(cq->ib_cq.device, "invalid vsize=%lx",
- vma->vm_end - vma->vm_start);
- return -EINVAL;
- }
-
- physical = cq->galpas.user.fw_handle;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= VM_IO | VM_RESERVED;
-
- ehca_dbg(cq->ib_cq.device,
- "vsize=%lx physical=%lx", vsize, physical);
- ret = remap_pfn_range(vma, vma->vm_start,
- physical >> PAGE_SHIFT, vsize,
- vma->vm_page_prot);
- if (ret) {
- ehca_err(cq->ib_cq.device,
- "remap_pfn_range() failed ret=%x",
- ret);
- return -ENOMEM;
- }
- break;
-
- case 2: /* cq queue_addr */
- ehca_dbg(cq->ib_cq.device, "cq=%p cq q_addr", cq);
- vma->vm_flags |= VM_RESERVED;
- vma->vm_ops = &ehcau_vm_ops;
- break;
-
- default:
- ehca_err(cq->ib_cq.device, "bad resource type %x",
- rsrc_type);
- return -EINVAL;
+ ret = ehca_mmap_cq(vma, cq, rsrc_type);
+ if (unlikely(ret)) {
+ ehca_err(cq->ib_cq.device,
+ "ehca_mmap_cq() failed rc=%x cq_num=%x",
+ ret, cq->cq_number);
+ return ret;
}
break;
@@ -262,50 +307,12 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context)
return -EINVAL;
- switch (rsrc_type) {
- case 1: /* galpa fw handle */
- ehca_dbg(qp->ib_qp.device, "qp=%p qp triggerarea", qp);
- vma->vm_flags |= VM_RESERVED;
- vsize = vma->vm_end - vma->vm_start;
- if (vsize != EHCA_PAGESIZE) {
- ehca_err(qp->ib_qp.device, "invalid vsize=%lx",
- vma->vm_end - vma->vm_start);
- return -EINVAL;
- }
-
- physical = qp->galpas.user.fw_handle;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= VM_IO | VM_RESERVED;
-
- ehca_dbg(qp->ib_qp.device, "vsize=%lx physical=%lx",
- vsize, physical);
- ret = remap_pfn_range(vma, vma->vm_start,
- physical >> PAGE_SHIFT, vsize,
- vma->vm_page_prot);
- if (ret) {
- ehca_err(qp->ib_qp.device,
- "remap_pfn_range() failed ret=%x",
- ret);
- return -ENOMEM;
- }
- break;
-
- case 2: /* qp rqueue_addr */
- ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueue_addr", qp);
- vma->vm_flags |= VM_RESERVED;
- vma->vm_ops = &ehcau_vm_ops;
- break;
-
- case 3: /* qp squeue_addr */
- ehca_dbg(qp->ib_qp.device, "qp=%p qp squeue_addr", qp);
- vma->vm_flags |= VM_RESERVED;
- vma->vm_ops = &ehcau_vm_ops;
- break;
-
- default:
- ehca_err(qp->ib_qp.device, "bad resource type %x",
- rsrc_type);
- return -EINVAL;
+ ret = ehca_mmap_qp(vma, qp, rsrc_type);
+ if (unlikely(ret)) {
+ ehca_err(qp->ib_qp.device,
+ "ehca_mmap_qp() failed rc=%x qp_num=%x",
+ ret, qp->ib_qp.qp_num);
+ return ret;
}
break;
@@ -316,77 +323,3 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return 0;
}
-
-int ehca_mmap_nopage(u64 foffset, u64 length, void **mapped,
- struct vm_area_struct **vma)
-{
- down_write(&current->mm->mmap_sem);
- *mapped = (void*)do_mmap(NULL,0, length, PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS,
- foffset);
- up_write(&current->mm->mmap_sem);
- if (!(*mapped)) {
- ehca_gen_err("couldn't mmap foffset=%lx length=%lx",
- foffset, length);
- return -EINVAL;
- }
-
- *vma = find_vma(current->mm, (u64)*mapped);
- if (!(*vma)) {
- down_write(&current->mm->mmap_sem);
- do_munmap(current->mm, 0, length);
- up_write(&current->mm->mmap_sem);
- ehca_gen_err("couldn't find vma queue=%p", *mapped);
- return -EINVAL;
- }
- (*vma)->vm_flags |= VM_RESERVED;
- (*vma)->vm_ops = &ehcau_vm_ops;
-
- return 0;
-}
-
-int ehca_mmap_register(u64 physical, void **mapped,
- struct vm_area_struct **vma)
-{
- int ret;
- unsigned long vsize;
- /* ehca hw supports only 4k page */
- ret = ehca_mmap_nopage(0, EHCA_PAGESIZE, mapped, vma);
- if (ret) {
- ehca_gen_err("could'nt mmap physical=%lx", physical);
- return ret;
- }
-
- (*vma)->vm_flags |= VM_RESERVED;
- vsize = (*vma)->vm_end - (*vma)->vm_start;
- if (vsize != EHCA_PAGESIZE) {
- ehca_gen_err("invalid vsize=%lx",
- (*vma)->vm_end - (*vma)->vm_start);
- return -EINVAL;
- }
-
- (*vma)->vm_page_prot = pgprot_noncached((*vma)->vm_page_prot);
- (*vma)->vm_flags |= VM_IO | VM_RESERVED;
-
- ret = remap_pfn_range((*vma), (*vma)->vm_start,
- physical >> PAGE_SHIFT, vsize,
- (*vma)->vm_page_prot);
- if (ret) {
- ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
- return -ENOMEM;
- }
-
- return 0;
-
-}
-
-int ehca_munmap(unsigned long addr, size_t len) {
- int ret = 0;
- struct mm_struct *mm = current->mm;
- if (mm) {
- down_write(&mm->mmap_sem);
- ret = do_munmap(mm, addr, len);
- up_write(&mm->mmap_sem);
- }
- return ret;
-}
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 46c1c89bf6ae..64f07b19349f 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -379,7 +379,7 @@ void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
wc.vendor_err = 0;
wc.byte_len = 0;
wc.imm_data = 0;
- wc.qp_num = qp->ibqp.qp_num;
+ wc.qp = &qp->ibqp;
wc.src_qp = 0;
wc.wc_flags = 0;
wc.pkey_index = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index ce6038743c5c..5ff20cb04494 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -702,7 +702,7 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
wc->vendor_err = 0;
wc->byte_len = 0;
- wc->qp_num = qp->ibqp.qp_num;
+ wc->qp = &qp->ibqp;
wc->src_qp = qp->remote_qpn;
wc->pkey_index = 0;
wc->slid = qp->remote_ah_attr.dlid;
@@ -836,7 +836,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
wc.vendor_err = 0;
wc.byte_len = wqe->length;
- wc.qp_num = qp->ibqp.qp_num;
+ wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
wc.pkey_index = 0;
wc.slid = qp->remote_ah_attr.dlid;
@@ -951,7 +951,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
wc.vendor_err = 0;
wc.byte_len = 0;
- wc.qp_num = qp->ibqp.qp_num;
+ wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
wc.pkey_index = 0;
wc.slid = qp->remote_ah_attr.dlid;
@@ -1511,7 +1511,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
wc.status = IB_WC_SUCCESS;
wc.opcode = IB_WC_RECV;
wc.vendor_err = 0;
- wc.qp_num = qp->ibqp.qp_num;
+ wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
wc.pkey_index = 0;
wc.slid = qp->remote_ah_attr.dlid;
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index f7530512045d..e86cb171872e 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -137,7 +137,7 @@ bad_lkey:
wc.vendor_err = 0;
wc.byte_len = 0;
wc.imm_data = 0;
- wc.qp_num = qp->ibqp.qp_num;
+ wc.qp = &qp->ibqp;
wc.src_qp = 0;
wc.wc_flags = 0;
wc.pkey_index = 0;
@@ -336,7 +336,7 @@ again:
wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
wc.vendor_err = 0;
wc.byte_len = 0;
- wc.qp_num = sqp->ibqp.qp_num;
+ wc.qp = &sqp->ibqp;
wc.src_qp = sqp->remote_qpn;
wc.pkey_index = 0;
wc.slid = sqp->remote_ah_attr.dlid;
@@ -426,7 +426,7 @@ again:
wc.status = IB_WC_SUCCESS;
wc.vendor_err = 0;
wc.byte_len = wqe->length;
- wc.qp_num = qp->ibqp.qp_num;
+ wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
/* XXX do we know which pkey matched? Only needed for GSI. */
wc.pkey_index = 0;
@@ -447,7 +447,7 @@ send_comp:
wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
wc.vendor_err = 0;
wc.byte_len = wqe->length;
- wc.qp_num = sqp->ibqp.qp_num;
+ wc.qp = &sqp->ibqp;
wc.src_qp = 0;
wc.pkey_index = 0;
wc.slid = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index e636cfd67a82..325d6634ff53 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -49,7 +49,7 @@ static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe,
wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
wc->vendor_err = 0;
wc->byte_len = wqe->length;
- wc->qp_num = qp->ibqp.qp_num;
+ wc->qp = &qp->ibqp;
wc->src_qp = qp->remote_qpn;
wc->pkey_index = 0;
wc->slid = qp->remote_ah_attr.dlid;
@@ -411,7 +411,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
wc.status = IB_WC_SUCCESS;
wc.opcode = IB_WC_RECV;
wc.vendor_err = 0;
- wc.qp_num = qp->ibqp.qp_num;
+ wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
wc.pkey_index = 0;
wc.slid = qp->remote_ah_attr.dlid;
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 49f1102af8b3..9a3e54664ee4 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -66,7 +66,7 @@ bad_lkey:
wc.vendor_err = 0;
wc.byte_len = 0;
wc.imm_data = 0;
- wc.qp_num = qp->ibqp.qp_num;
+ wc.qp = &qp->ibqp;
wc.src_qp = 0;
wc.wc_flags = 0;
wc.pkey_index = 0;
@@ -255,7 +255,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
wc->status = IB_WC_SUCCESS;
wc->opcode = IB_WC_RECV;
wc->vendor_err = 0;
- wc->qp_num = qp->ibqp.qp_num;
+ wc->qp = &qp->ibqp;
wc->src_qp = sqp->ibqp.qp_num;
/* XXX do we know which pkey matched? Only needed for GSI. */
wc->pkey_index = 0;
@@ -474,7 +474,7 @@ done:
wc.vendor_err = 0;
wc.opcode = IB_WC_SEND;
wc.byte_len = len;
- wc.qp_num = qp->ibqp.qp_num;
+ wc.qp = &qp->ibqp;
wc.src_qp = 0;
wc.wc_flags = 0;
/* XXX initialize other fields? */
@@ -651,7 +651,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
wc.status = IB_WC_SUCCESS;
wc.opcode = IB_WC_RECV;
wc.vendor_err = 0;
- wc.qp_num = qp->ibqp.qp_num;
+ wc.qp = &qp->ibqp;
wc.src_qp = src_qp;
/* XXX do we know which pkey matched? Only needed for GSI. */
wc.pkey_index = 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 768df7265b81..968d1519761c 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1854,7 +1854,7 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
memset(inbox + 256, 0, 256);
- MTHCA_PUT(inbox, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET);
+ MTHCA_PUT(inbox, in_wc->qp->qp_num, MAD_IFC_MY_QPN_OFFSET);
MTHCA_PUT(inbox, in_wc->src_qp, MAD_IFC_RQPN_OFFSET);
val = in_wc->sl << 4;
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 1159c8a0f2c5..efd79ef109a6 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -534,7 +534,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
}
}
- entry->qp_num = (*cur_qp)->qpn;
+ entry->qp = &(*cur_qp)->ibqp;
if (is_send) {
wq = &(*cur_qp)->sq;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 72611fd15103..5e8ac577f0ad 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -548,6 +548,7 @@ static int srp_reconnect_target(struct srp_target_port *target)
target->tx_head = 0;
target->tx_tail = 0;
+ target->qp_in_error = 0;
ret = srp_connect_target(target);
if (ret)
goto err;
@@ -878,6 +879,7 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr)
printk(KERN_ERR PFX "failed %s status %d\n",
wc.wr_id & SRP_OP_RECV ? "receive" : "send",
wc.status);
+ target->qp_in_error = 1;
break;
}
@@ -1337,6 +1339,8 @@ static int srp_abort(struct scsi_cmnd *scmnd)
printk(KERN_ERR "SRP abort called\n");
+ if (target->qp_in_error)
+ return FAILED;
if (srp_find_req(target, scmnd, &req))
return FAILED;
if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
@@ -1365,6 +1369,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
printk(KERN_ERR "SRP reset_device called\n");
+ if (target->qp_in_error)
+ return FAILED;
if (srp_find_req(target, scmnd, &req))
return FAILED;
if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
@@ -1801,6 +1807,7 @@ static ssize_t srp_create_target(struct class_device *class_dev,
goto err_free;
}
+ target->qp_in_error = 0;
ret = srp_connect_target(target);
if (ret) {
printk(KERN_ERR PFX "Connection failed\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index c21772317b86..2f3319c719a5 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -158,6 +158,7 @@ struct srp_target_port {
struct completion done;
int status;
enum srp_target_state state;
+ int qp_in_error;
};
struct srp_iu {