summaryrefslogtreecommitdiff
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2007-01-18 02:34:51 +0300
committerDavid Woodhouse <dwmw2@infradead.org>2007-01-18 02:34:51 +0300
commit9cdf083f981b8d37b3212400a359368661385099 (patch)
treeaa15a6a08ad87e650dea40fb59b3180bef0d345b /drivers/infiniband
parente499e01d234a31d59679b7b1e1cf628d917ba49a (diff)
parenta8b3485287731978899ced11f24628c927890e78 (diff)
downloadlinux-9cdf083f981b8d37b3212400a359368661385099.tar.xz
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/Makefile6
-rw-r--r--drivers/infiniband/core/addr.c25
-rw-r--r--drivers/infiniband/core/cache.c7
-rw-r--r--drivers/infiniband/core/cm.c144
-rw-r--r--drivers/infiniband/core/cma.c490
-rw-r--r--drivers/infiniband/core/fmr_pool.c12
-rw-r--r--drivers/infiniband/core/iwcm.c47
-rw-r--r--drivers/infiniband/core/mad.c117
-rw-r--r--drivers/infiniband/core/mad_priv.h8
-rw-r--r--drivers/infiniband/core/mad_rmpp.c18
-rw-r--r--drivers/infiniband/core/sa_query.c10
-rw-r--r--drivers/infiniband/core/ucm.c20
-rw-r--r--drivers/infiniband/core/ucma.c885
-rw-r--r--drivers/infiniband/core/uverbs_main.c6
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c5
-rw-r--r--drivers/infiniband/core/uverbs_mem.c19
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c49
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_vq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_av.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c14
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_pd.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c28
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.c13
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.h15
-rw-r--r--drivers/infiniband/hw/ipath/Makefile1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_dma.c189
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c9
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c10
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_keys.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c11
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c21
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c142
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mcg.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_pd.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c33
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h22
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c100
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c32
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c24
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c4
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h4
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c30
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c130
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c10
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c101
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h4
70 files changed, 2228 insertions, 708 deletions
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 163d991eb8c9..50fb1cd447b7 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,9 +1,11 @@
infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o
+user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o
obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
ib_cm.o iw_cm.o $(infiniband-y)
obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
-obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o
+obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
+ $(user_access-y)
ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
device.o fmr_pool.o cache.o
@@ -18,6 +20,8 @@ iw_cm-y := iwcm.o
rdma_cm-y := cma.o
+rdma_ucm-y := ucma.o
+
ib_addr-y := addr.o
ib_umad-y := user_mad.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index e11187ecc931..af939796750d 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -55,11 +55,11 @@ struct addr_req {
int status;
};
-static void process_req(void *data);
+static void process_req(struct work_struct *work);
static DEFINE_MUTEX(lock);
static LIST_HEAD(req_list);
-static DECLARE_WORK(work, process_req, NULL);
+static DECLARE_DELAYED_WORK(work, process_req);
static struct workqueue_struct *addr_wq;
void rdma_addr_register_client(struct rdma_addr_client *client)
@@ -139,7 +139,7 @@ static void queue_req(struct addr_req *req)
mutex_lock(&lock);
list_for_each_entry_reverse(temp_req, &req_list, list) {
- if (time_after(req->timeout, temp_req->timeout))
+ if (time_after_eq(req->timeout, temp_req->timeout))
break;
}
@@ -215,7 +215,7 @@ out:
return ret;
}
-static void process_req(void *data)
+static void process_req(struct work_struct *work)
{
struct addr_req *req, *temp_req;
struct sockaddr_in *src_in, *dst_in;
@@ -225,19 +225,17 @@ static void process_req(void *data)
mutex_lock(&lock);
list_for_each_entry_safe(req, temp_req, &req_list, list) {
- if (req->status) {
+ if (req->status == -ENODATA) {
src_in = (struct sockaddr_in *) &req->src_addr;
dst_in = (struct sockaddr_in *) &req->dst_addr;
req->status = addr_resolve_remote(src_in, dst_in,
req->addr);
+ if (req->status && time_after_eq(jiffies, req->timeout))
+ req->status = -ETIMEDOUT;
+ else if (req->status == -ENODATA)
+ continue;
}
- if (req->status && time_after(jiffies, req->timeout))
- req->status = -ETIMEDOUT;
- else if (req->status == -ENODATA)
- continue;
-
- list_del(&req->list);
- list_add_tail(&req->list, &done_list);
+ list_move_tail(&req->list, &done_list);
}
if (!list_empty(&req_list)) {
@@ -347,8 +345,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
if (req->addr == addr) {
req->status = -ECANCELED;
req->timeout = jiffies;
- list_del(&req->list);
- list_add(&req->list, &req_list);
+ list_move(&req->list, &req_list);
set_timeout(req->timeout);
break;
}
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 20e9f64e67a6..98272fbbfb31 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -285,9 +285,10 @@ err:
kfree(tprops);
}
-static void ib_cache_task(void *work_ptr)
+static void ib_cache_task(struct work_struct *_work)
{
- struct ib_update_work *work = work_ptr;
+ struct ib_update_work *work =
+ container_of(_work, struct ib_update_work, work);
ib_cache_update(work->device, work->port_num);
kfree(work);
@@ -306,7 +307,7 @@ static void ib_cache_event(struct ib_event_handler *handler,
event->event == IB_EVENT_CLIENT_REREGISTER) {
work = kmalloc(sizeof *work, GFP_ATOMIC);
if (work) {
- INIT_WORK(&work->work, ib_cache_task, work);
+ INIT_WORK(&work->work, ib_cache_task);
work->device = event->device;
work->port_num = event->element.port_num;
schedule_work(&work->work);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 25b1018a476c..d446998b12a4 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -101,7 +101,7 @@ struct cm_av {
};
struct cm_work {
- struct work_struct work;
+ struct delayed_work work;
struct list_head list;
struct cm_port *port;
struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
@@ -147,12 +147,12 @@ struct cm_id_private {
__be32 rq_psn;
int timeout_ms;
enum ib_mtu path_mtu;
+ __be16 pkey;
u8 private_data_len;
u8 max_cm_retries;
u8 peer_to_peer;
u8 responder_resources;
u8 initiator_depth;
- u8 local_ack_timeout;
u8 retry_count;
u8 rnr_retry_count;
u8 service_timeout;
@@ -161,7 +161,7 @@ struct cm_id_private {
atomic_t work_count;
};
-static void cm_work_handler(void *data);
+static void cm_work_handler(struct work_struct *work);
static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
{
@@ -240,11 +240,10 @@ static void * cm_copy_private_data(const void *private_data,
if (!private_data || !private_data_len)
return NULL;
- data = kmalloc(private_data_len, GFP_KERNEL);
+ data = kmemdup(private_data, private_data_len, GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
- memcpy(data, private_data, private_data_len);
return data;
}
@@ -669,8 +668,7 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
return ERR_PTR(-ENOMEM);
timewait_info->work.local_id = local_id;
- INIT_WORK(&timewait_info->work.work, cm_work_handler,
- &timewait_info->work);
+ INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
return timewait_info;
}
@@ -691,7 +689,7 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
* timewait before notifying the user that we've exited timewait.
*/
cm_id_priv->id.state = IB_CM_TIMEWAIT;
- wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
+ wait_time = cm_convert_to_ms(cm_id_priv->av.packet_life_time + 1);
queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
msecs_to_jiffies(wait_time));
cm_id_priv->timewait_info = NULL;
@@ -1010,6 +1008,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
cm_id_priv->responder_resources = param->responder_resources;
cm_id_priv->retry_count = param->retry_count;
cm_id_priv->path_mtu = param->primary_path->mtu;
+ cm_id_priv->pkey = param->primary_path->pkey;
cm_id_priv->qp_type = param->qp_type;
ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
@@ -1024,8 +1023,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
- cm_id_priv->local_ack_timeout =
- cm_req_get_primary_local_ack_timeout(req_msg);
spin_lock_irqsave(&cm_id_priv->lock, flags);
ret = ib_post_send_mad(cm_id_priv->msg, NULL);
@@ -1410,9 +1407,8 @@ static int cm_req_handler(struct cm_work *work)
cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
+ cm_id_priv->pkey = req_msg->pkey;
cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
- cm_id_priv->local_ack_timeout =
- cm_req_get_primary_local_ack_timeout(req_msg);
cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
@@ -1716,7 +1712,7 @@ static int cm_establish_handler(struct cm_work *work)
unsigned long flags;
int ret;
- /* See comment in ib_cm_establish about lookup. */
+ /* See comment in cm_establish about lookup. */
cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
if (!cm_id_priv)
return -EINVAL;
@@ -2402,11 +2398,16 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_ESTABLISHED ||
- cm_id->lap_state != IB_CM_LAP_IDLE) {
+ (cm_id->lap_state != IB_CM_LAP_UNINIT &&
+ cm_id->lap_state != IB_CM_LAP_IDLE)) {
ret = -EINVAL;
goto out;
}
+ ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
+ if (ret)
+ goto out;
+
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto out;
@@ -2431,7 +2432,8 @@ out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
}
EXPORT_SYMBOL(ib_send_cm_lap);
-static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
+static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
+ struct ib_sa_path_rec *path,
struct cm_lap_msg *lap_msg)
{
memset(path, 0, sizeof *path);
@@ -2443,10 +2445,10 @@ static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
path->hop_limit = lap_msg->alt_hop_limit;
path->traffic_class = cm_lap_get_traffic_class(lap_msg);
path->reversible = 1;
- /* pkey is same as in REQ */
+ path->pkey = cm_id_priv->pkey;
path->sl = cm_lap_get_sl(lap_msg);
path->mtu_selector = IB_SA_EQ;
- /* mtu is same as in REQ */
+ path->mtu = cm_id_priv->path_mtu;
path->rate_selector = IB_SA_EQ;
path->rate = cm_lap_get_packet_rate(lap_msg);
path->packet_life_time_selector = IB_SA_EQ;
@@ -2472,7 +2474,7 @@ static int cm_lap_handler(struct cm_work *work)
param = &work->cm_event.param.lap_rcvd;
param->alternate_path = &work->path[0];
- cm_format_path_from_lap(param->alternate_path, lap_msg);
+ cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
work->cm_event.private_data = &lap_msg->private_data;
spin_lock_irqsave(&cm_id_priv->lock, flags);
@@ -2480,6 +2482,7 @@ static int cm_lap_handler(struct cm_work *work)
goto unlock;
switch (cm_id_priv->id.lap_state) {
+ case IB_CM_LAP_UNINIT:
case IB_CM_LAP_IDLE:
break;
case IB_CM_MRA_LAP_SENT:
@@ -2502,6 +2505,10 @@ static int cm_lap_handler(struct cm_work *work)
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
+ cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
@@ -2987,9 +2994,9 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
}
}
-static void cm_work_handler(void *data)
+static void cm_work_handler(struct work_struct *_work)
{
- struct cm_work *work = data;
+ struct cm_work *work = container_of(_work, struct cm_work, work.work);
int ret;
switch (work->cm_event.event) {
@@ -3040,7 +3047,7 @@ static void cm_work_handler(void *data)
cm_free_work(work);
}
-int ib_cm_establish(struct ib_cm_id *cm_id)
+static int cm_establish(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
struct cm_work *work;
@@ -3079,16 +3086,53 @@ int ib_cm_establish(struct ib_cm_id *cm_id)
* we need to find the cm_id once we're in the context of the
* worker thread, rather than holding a reference on it.
*/
- INIT_WORK(&work->work, cm_work_handler, work);
+ INIT_DELAYED_WORK(&work->work, cm_work_handler);
work->local_id = cm_id->local_id;
work->remote_id = cm_id->remote_id;
work->mad_recv_wc = NULL;
work->cm_event.event = IB_CM_USER_ESTABLISHED;
- queue_work(cm.wq, &work->work);
+ queue_delayed_work(cm.wq, &work->work, 0);
out:
return ret;
}
-EXPORT_SYMBOL(ib_cm_establish);
+
+static int cm_migrate(struct ib_cm_id *cm_id)
+{
+ struct cm_id_private *cm_id_priv;
+ unsigned long flags;
+ int ret = 0;
+
+ cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id->state == IB_CM_ESTABLISHED &&
+ (cm_id->lap_state == IB_CM_LAP_UNINIT ||
+ cm_id->lap_state == IB_CM_LAP_IDLE)) {
+ cm_id->lap_state = IB_CM_LAP_IDLE;
+ cm_id_priv->av = cm_id_priv->alt_av;
+ } else
+ ret = -EINVAL;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ return ret;
+}
+
+int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
+{
+ int ret;
+
+ switch (event) {
+ case IB_EVENT_COMM_EST:
+ ret = cm_establish(cm_id);
+ break;
+ case IB_EVENT_PATH_MIG:
+ ret = cm_migrate(cm_id);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(ib_cm_notify);
static void cm_recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_recv_wc *mad_recv_wc)
@@ -3146,11 +3190,11 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
return;
}
- INIT_WORK(&work->work, cm_work_handler, work);
+ INIT_DELAYED_WORK(&work->work, cm_work_handler);
work->cm_event.event = event;
work->mad_recv_wc = mad_recv_wc;
work->port = (struct cm_port *)mad_agent->context;
- queue_work(cm.wq, &work->work);
+ queue_delayed_work(cm.wq, &work->work, 0);
}
static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
@@ -3173,8 +3217,7 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
case IB_CM_ESTABLISHED:
*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
IB_QP_PKEY_INDEX | IB_QP_PORT;
- qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE;
+ qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
if (cm_id_priv->responder_resources)
qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_ATOMIC;
@@ -3222,6 +3265,9 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
if (cm_id_priv->alt_av.ah_attr.dlid) {
*qp_attr_mask |= IB_QP_ALT_PATH;
qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
+ qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
+ qp_attr->alt_timeout =
+ cm_id_priv->alt_av.packet_life_time + 1;
qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
}
ret = 0;
@@ -3243,24 +3289,40 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch (cm_id_priv->id.state) {
+ /* Allow transition to RTS before sending REP */
+ case IB_CM_REQ_RCVD:
+ case IB_CM_MRA_REQ_SENT:
+
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
case IB_CM_ESTABLISHED:
- *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
- qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
- if (cm_id_priv->qp_type == IB_QPT_RC) {
- *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
- IB_QP_RNR_RETRY |
- IB_QP_MAX_QP_RD_ATOMIC;
- qp_attr->timeout = cm_id_priv->local_ack_timeout;
- qp_attr->retry_cnt = cm_id_priv->retry_count;
- qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
- qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
- }
- if (cm_id_priv->alt_av.ah_attr.dlid) {
- *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
+ if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
+ *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
+ qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
+ if (cm_id_priv->qp_type == IB_QPT_RC) {
+ *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
+ IB_QP_RNR_RETRY |
+ IB_QP_MAX_QP_RD_ATOMIC;
+ qp_attr->timeout =
+ cm_id_priv->av.packet_life_time + 1;
+ qp_attr->retry_cnt = cm_id_priv->retry_count;
+ qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
+ qp_attr->max_rd_atomic =
+ cm_id_priv->initiator_depth;
+ }
+ if (cm_id_priv->alt_av.ah_attr.dlid) {
+ *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
+ qp_attr->path_mig_state = IB_MIG_REARM;
+ }
+ } else {
+ *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
+ qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
+ qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
+ qp_attr->alt_timeout =
+ cm_id_priv->alt_av.packet_life_time + 1;
+ qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
qp_attr->path_mig_state = IB_MIG_REARM;
}
ret = 0;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 845090b0859c..9e0ab048c878 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -70,6 +70,7 @@ static DEFINE_MUTEX(lock);
static struct workqueue_struct *cma_wq;
static DEFINE_IDR(sdp_ps);
static DEFINE_IDR(tcp_ps);
+static DEFINE_IDR(udp_ps);
struct cma_device {
struct list_head list;
@@ -133,7 +134,6 @@ struct rdma_id_private {
u32 seq_num;
u32 qp_num;
- enum ib_qp_type qp_type;
u8 srq;
};
@@ -344,7 +344,7 @@ static int cma_init_ib_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
return ret;
qp_attr.qp_state = IB_QPS_INIT;
- qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
+ qp_attr.qp_access_flags = 0;
qp_attr.port_num = id_priv->id.port_num;
return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS |
IB_QP_PKEY_INDEX | IB_QP_PORT);
@@ -392,7 +392,6 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
id->qp = qp;
id_priv->qp_num = qp->qp_num;
- id_priv->qp_type = qp->qp_type;
id_priv->srq = (qp->srq != NULL);
return 0;
err:
@@ -510,9 +509,17 @@ static inline int cma_any_addr(struct sockaddr *addr)
return cma_zero_addr(addr) || cma_loopback_addr(addr);
}
+static inline __be16 cma_port(struct sockaddr *addr)
+{
+ if (addr->sa_family == AF_INET)
+ return ((struct sockaddr_in *) addr)->sin_port;
+ else
+ return ((struct sockaddr_in6 *) addr)->sin6_port;
+}
+
static inline int cma_any_port(struct sockaddr *addr)
{
- return !((struct sockaddr_in *) addr)->sin_port;
+ return !cma_port(addr);
}
static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
@@ -594,20 +601,6 @@ static inline int cma_user_data_offset(enum rdma_port_space ps)
}
}
-static int cma_notify_user(struct rdma_id_private *id_priv,
- enum rdma_cm_event_type type, int status,
- void *data, u8 data_len)
-{
- struct rdma_cm_event event;
-
- event.event = type;
- event.status = status;
- event.private_data = data;
- event.private_data_len = data_len;
-
- return id_priv->id.event_handler(&id_priv->id, &event);
-}
-
static void cma_cancel_route(struct rdma_id_private *id_priv)
{
switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
@@ -776,63 +769,61 @@ static int cma_verify_rep(struct rdma_id_private *id_priv, void *data)
return 0;
}
-static int cma_rtu_recv(struct rdma_id_private *id_priv)
+static void cma_set_rep_event_data(struct rdma_cm_event *event,
+ struct ib_cm_rep_event_param *rep_data,
+ void *private_data)
{
- int ret;
-
- ret = cma_modify_qp_rts(&id_priv->id);
- if (ret)
- goto reject;
-
- return 0;
-reject:
- cma_modify_qp_err(&id_priv->id);
- ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
- NULL, 0, NULL, 0);
- return ret;
+ event->param.conn.private_data = private_data;
+ event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
+ event->param.conn.responder_resources = rep_data->responder_resources;
+ event->param.conn.initiator_depth = rep_data->initiator_depth;
+ event->param.conn.flow_control = rep_data->flow_control;
+ event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
+ event->param.conn.srq = rep_data->srq;
+ event->param.conn.qp_num = rep_data->remote_qpn;
}
static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
{
struct rdma_id_private *id_priv = cm_id->context;
- enum rdma_cm_event_type event;
- u8 private_data_len = 0;
- int ret = 0, status = 0;
+ struct rdma_cm_event event;
+ int ret = 0;
atomic_inc(&id_priv->dev_remove);
if (!cma_comp(id_priv, CMA_CONNECT))
goto out;
+ memset(&event, 0, sizeof event);
switch (ib_event->event) {
case IB_CM_REQ_ERROR:
case IB_CM_REP_ERROR:
- event = RDMA_CM_EVENT_UNREACHABLE;
- status = -ETIMEDOUT;
+ event.event = RDMA_CM_EVENT_UNREACHABLE;
+ event.status = -ETIMEDOUT;
break;
case IB_CM_REP_RECEIVED:
- status = cma_verify_rep(id_priv, ib_event->private_data);
- if (status)
- event = RDMA_CM_EVENT_CONNECT_ERROR;
+ event.status = cma_verify_rep(id_priv, ib_event->private_data);
+ if (event.status)
+ event.event = RDMA_CM_EVENT_CONNECT_ERROR;
else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
- status = cma_rep_recv(id_priv);
- event = status ? RDMA_CM_EVENT_CONNECT_ERROR :
- RDMA_CM_EVENT_ESTABLISHED;
+ event.status = cma_rep_recv(id_priv);
+ event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
+ RDMA_CM_EVENT_ESTABLISHED;
} else
- event = RDMA_CM_EVENT_CONNECT_RESPONSE;
- private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
+ event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
+ cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
+ ib_event->private_data);
break;
case IB_CM_RTU_RECEIVED:
- status = cma_rtu_recv(id_priv);
- event = status ? RDMA_CM_EVENT_CONNECT_ERROR :
- RDMA_CM_EVENT_ESTABLISHED;
+ case IB_CM_USER_ESTABLISHED:
+ event.event = RDMA_CM_EVENT_ESTABLISHED;
break;
case IB_CM_DREQ_ERROR:
- status = -ETIMEDOUT; /* fall through */
+ event.status = -ETIMEDOUT; /* fall through */
case IB_CM_DREQ_RECEIVED:
case IB_CM_DREP_RECEIVED:
if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
goto out;
- event = RDMA_CM_EVENT_DISCONNECTED;
+ event.event = RDMA_CM_EVENT_DISCONNECTED;
break;
case IB_CM_TIMEWAIT_EXIT:
case IB_CM_MRA_RECEIVED:
@@ -840,9 +831,10 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
goto out;
case IB_CM_REJ_RECEIVED:
cma_modify_qp_err(&id_priv->id);
- status = ib_event->param.rej_rcvd.reason;
- event = RDMA_CM_EVENT_REJECTED;
- private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
+ event.status = ib_event->param.rej_rcvd.reason;
+ event.event = RDMA_CM_EVENT_REJECTED;
+ event.param.conn.private_data = ib_event->private_data;
+ event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
break;
default:
printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
@@ -850,8 +842,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
goto out;
}
- ret = cma_notify_user(id_priv, event, status, ib_event->private_data,
- private_data_len);
+ ret = id_priv->id.event_handler(&id_priv->id, &event);
if (ret) {
/* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.ib = NULL;
@@ -865,8 +856,8 @@ out:
return ret;
}
-static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id,
- struct ib_cm_event *ib_event)
+static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
+ struct ib_cm_event *ib_event)
{
struct rdma_id_private *id_priv;
struct rdma_cm_id *id;
@@ -913,9 +904,61 @@ err:
return NULL;
}
+static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
+ struct ib_cm_event *ib_event)
+{
+ struct rdma_id_private *id_priv;
+ struct rdma_cm_id *id;
+ union cma_ip_addr *src, *dst;
+ __u16 port;
+ u8 ip_ver;
+ int ret;
+
+ id = rdma_create_id(listen_id->event_handler, listen_id->context,
+ listen_id->ps);
+ if (IS_ERR(id))
+ return NULL;
+
+
+ if (cma_get_net_info(ib_event->private_data, listen_id->ps,
+ &ip_ver, &port, &src, &dst))
+ goto err;
+
+ cma_save_net_info(&id->route.addr, &listen_id->route.addr,
+ ip_ver, port, src, dst);
+
+ ret = rdma_translate_ip(&id->route.addr.src_addr,
+ &id->route.addr.dev_addr);
+ if (ret)
+ goto err;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ id_priv->state = CMA_CONNECT;
+ return id_priv;
+err:
+ rdma_destroy_id(id);
+ return NULL;
+}
+
+static void cma_set_req_event_data(struct rdma_cm_event *event,
+ struct ib_cm_req_event_param *req_data,
+ void *private_data, int offset)
+{
+ event->param.conn.private_data = private_data + offset;
+ event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
+ event->param.conn.responder_resources = req_data->responder_resources;
+ event->param.conn.initiator_depth = req_data->initiator_depth;
+ event->param.conn.flow_control = req_data->flow_control;
+ event->param.conn.retry_count = req_data->retry_count;
+ event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
+ event->param.conn.srq = req_data->srq;
+ event->param.conn.qp_num = req_data->remote_qpn;
+}
+
static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
{
struct rdma_id_private *listen_id, *conn_id;
+ struct rdma_cm_event event;
int offset, ret;
listen_id = cm_id->context;
@@ -925,7 +968,19 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
goto out;
}
- conn_id = cma_new_id(&listen_id->id, ib_event);
+ memset(&event, 0, sizeof event);
+ offset = cma_user_data_offset(listen_id->id.ps);
+ event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
+ if (listen_id->id.ps == RDMA_PS_UDP) {
+ conn_id = cma_new_udp_id(&listen_id->id, ib_event);
+ event.param.ud.private_data = ib_event->private_data + offset;
+ event.param.ud.private_data_len =
+ IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
+ } else {
+ conn_id = cma_new_conn_id(&listen_id->id, ib_event);
+ cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
+ ib_event->private_data, offset);
+ }
if (!conn_id) {
ret = -ENOMEM;
goto out;
@@ -935,29 +990,25 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
mutex_lock(&lock);
ret = cma_acquire_dev(conn_id);
mutex_unlock(&lock);
- if (ret) {
- ret = -ENODEV;
- cma_exch(conn_id, CMA_DESTROYING);
- cma_release_remove(conn_id);
- rdma_destroy_id(&conn_id->id);
- goto out;
- }
+ if (ret)
+ goto release_conn_id;
conn_id->cm_id.ib = cm_id;
cm_id->context = conn_id;
cm_id->cm_handler = cma_ib_handler;
- offset = cma_user_data_offset(listen_id->id.ps);
- ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0,
- ib_event->private_data + offset,
- IB_CM_REQ_PRIVATE_DATA_SIZE - offset);
- if (ret) {
- /* Destroy the CM ID by returning a non-zero value. */
- conn_id->cm_id.ib = NULL;
- cma_exch(conn_id, CMA_DESTROYING);
- cma_release_remove(conn_id);
- rdma_destroy_id(&conn_id->id);
- }
+ ret = conn_id->id.event_handler(&conn_id->id, &event);
+ if (!ret)
+ goto out;
+
+ /* Destroy the CM ID by returning a non-zero value. */
+ conn_id->cm_id.ib = NULL;
+
+release_conn_id:
+ cma_exch(conn_id, CMA_DESTROYING);
+ cma_release_remove(conn_id);
+ rdma_destroy_id(&conn_id->id);
+
out:
cma_release_remove(listen_id);
return ret;
@@ -965,8 +1016,7 @@ out:
static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr)
{
- return cpu_to_be64(((u64)ps << 16) +
- be16_to_cpu(((struct sockaddr_in *) addr)->sin_port));
+ return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr)));
}
static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
@@ -1022,36 +1072,49 @@ static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
{
struct rdma_id_private *id_priv = iw_id->context;
- enum rdma_cm_event_type event = 0;
+ struct rdma_cm_event event;
struct sockaddr_in *sin;
int ret = 0;
+ memset(&event, 0, sizeof event);
atomic_inc(&id_priv->dev_remove);
switch (iw_event->event) {
case IW_CM_EVENT_CLOSE:
- event = RDMA_CM_EVENT_DISCONNECTED;
+ event.event = RDMA_CM_EVENT_DISCONNECTED;
break;
case IW_CM_EVENT_CONNECT_REPLY:
sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
*sin = iw_event->local_addr;
sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
*sin = iw_event->remote_addr;
- if (iw_event->status)
- event = RDMA_CM_EVENT_REJECTED;
- else
- event = RDMA_CM_EVENT_ESTABLISHED;
+ switch (iw_event->status) {
+ case 0:
+ event.event = RDMA_CM_EVENT_ESTABLISHED;
+ break;
+ case -ECONNRESET:
+ case -ECONNREFUSED:
+ event.event = RDMA_CM_EVENT_REJECTED;
+ break;
+ case -ETIMEDOUT:
+ event.event = RDMA_CM_EVENT_UNREACHABLE;
+ break;
+ default:
+ event.event = RDMA_CM_EVENT_CONNECT_ERROR;
+ break;
+ }
break;
case IW_CM_EVENT_ESTABLISHED:
- event = RDMA_CM_EVENT_ESTABLISHED;
+ event.event = RDMA_CM_EVENT_ESTABLISHED;
break;
default:
BUG_ON(1);
}
- ret = cma_notify_user(id_priv, event, iw_event->status,
- iw_event->private_data,
- iw_event->private_data_len);
+ event.status = iw_event->status;
+ event.param.conn.private_data = iw_event->private_data;
+ event.param.conn.private_data_len = iw_event->private_data_len;
+ ret = id_priv->id.event_handler(&id_priv->id, &event);
if (ret) {
/* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.iw = NULL;
@@ -1072,6 +1135,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
struct rdma_id_private *listen_id, *conn_id;
struct sockaddr_in *sin;
struct net_device *dev = NULL;
+ struct rdma_cm_event event;
int ret;
listen_id = cm_id->context;
@@ -1125,9 +1189,11 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
*sin = iw_event->remote_addr;
- ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0,
- iw_event->private_data,
- iw_event->private_data_len);
+ memset(&event, 0, sizeof event);
+ event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
+ event.param.conn.private_data = iw_event->private_data;
+ event.param.conn.private_data_len = iw_event->private_data_len;
+ ret = conn_id->id.event_handler(&conn_id->id, &event);
if (ret) {
/* User wants to destroy the CM ID */
conn_id->cm_id.iw = NULL;
@@ -1341,9 +1407,9 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
return (id_priv->query_id < 0) ? id_priv->query_id : 0;
}
-static void cma_work_handler(void *data)
+static void cma_work_handler(struct work_struct *_work)
{
- struct cma_work *work = data;
+ struct cma_work *work = container_of(_work, struct cma_work, work);
struct rdma_id_private *id_priv = work->id;
int destroy = 0;
@@ -1374,7 +1440,7 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
return -ENOMEM;
work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler, work);
+ INIT_WORK(&work->work, cma_work_handler);
work->old_state = CMA_ROUTE_QUERY;
work->new_state = CMA_ROUTE_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
@@ -1431,7 +1497,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
return -ENOMEM;
work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler, work);
+ INIT_WORK(&work->work, cma_work_handler);
work->old_state = CMA_ROUTE_QUERY;
work->new_state = CMA_ROUTE_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
@@ -1481,19 +1547,18 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
u8 p;
mutex_lock(&lock);
+ if (list_empty(&dev_list)) {
+ ret = -ENODEV;
+ goto out;
+ }
list_for_each_entry(cma_dev, &dev_list, list)
for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
- if (!ib_query_port (cma_dev->device, p, &port_attr) &&
+ if (!ib_query_port(cma_dev->device, p, &port_attr) &&
port_attr.state == IB_PORT_ACTIVE)
goto port_found;
- if (!list_empty(&dev_list)) {
- p = 1;
- cma_dev = list_entry(dev_list.next, struct cma_device, list);
- } else {
- ret = -ENODEV;
- goto out;
- }
+ p = 1;
+ cma_dev = list_entry(dev_list.next, struct cma_device, list);
port_found:
ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
@@ -1517,8 +1582,9 @@ static void addr_handler(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *dev_addr, void *context)
{
struct rdma_id_private *id_priv = context;
- enum rdma_cm_event_type event;
+ struct rdma_cm_event event;
+ memset(&event, 0, sizeof event);
atomic_inc(&id_priv->dev_remove);
/*
@@ -1538,14 +1604,15 @@ static void addr_handler(int status, struct sockaddr *src_addr,
if (status) {
if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
goto out;
- event = RDMA_CM_EVENT_ADDR_ERROR;
+ event.event = RDMA_CM_EVENT_ADDR_ERROR;
+ event.status = status;
} else {
memcpy(&id_priv->id.route.addr.src_addr, src_addr,
ip_addr_size(src_addr));
- event = RDMA_CM_EVENT_ADDR_RESOLVED;
+ event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
}
- if (cma_notify_user(id_priv, event, status, NULL, 0)) {
+ if (id_priv->id.event_handler(&id_priv->id, &event)) {
cma_exch(id_priv, CMA_DESTROYING);
cma_release_remove(id_priv);
cma_deref_id(id_priv);
@@ -1585,7 +1652,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
}
work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler, work);
+ INIT_WORK(&work->work, cma_work_handler);
work->old_state = CMA_ADDR_QUERY;
work->new_state = CMA_ADDR_RESOLVED;
work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
@@ -1735,6 +1802,9 @@ static int cma_get_port(struct rdma_id_private *id_priv)
case RDMA_PS_TCP:
ps = &tcp_ps;
break;
+ case RDMA_PS_UDP:
+ ps = &udp_ps;
+ break;
default:
return -EPROTONOSUPPORT;
}
@@ -1823,6 +1893,110 @@ static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
return 0;
}
+static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
+ struct ib_cm_event *ib_event)
+{
+ struct rdma_id_private *id_priv = cm_id->context;
+ struct rdma_cm_event event;
+ struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
+ int ret = 0;
+
+ memset(&event, 0, sizeof event);
+ atomic_inc(&id_priv->dev_remove);
+ if (!cma_comp(id_priv, CMA_CONNECT))
+ goto out;
+
+ switch (ib_event->event) {
+ case IB_CM_SIDR_REQ_ERROR:
+ event.event = RDMA_CM_EVENT_UNREACHABLE;
+ event.status = -ETIMEDOUT;
+ break;
+ case IB_CM_SIDR_REP_RECEIVED:
+ event.param.ud.private_data = ib_event->private_data;
+ event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
+ if (rep->status != IB_SIDR_SUCCESS) {
+ event.event = RDMA_CM_EVENT_UNREACHABLE;
+ event.status = ib_event->param.sidr_rep_rcvd.status;
+ break;
+ }
+ if (rep->qkey != RDMA_UD_QKEY) {
+ event.event = RDMA_CM_EVENT_UNREACHABLE;
+ event.status = -EINVAL;
+ break;
+ }
+ ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
+ id_priv->id.route.path_rec,
+ &event.param.ud.ah_attr);
+ event.param.ud.qp_num = rep->qpn;
+ event.param.ud.qkey = rep->qkey;
+ event.event = RDMA_CM_EVENT_ESTABLISHED;
+ event.status = 0;
+ break;
+ default:
+ printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
+ ib_event->event);
+ goto out;
+ }
+
+ ret = id_priv->id.event_handler(&id_priv->id, &event);
+ if (ret) {
+ /* Destroy the CM ID by returning a non-zero value. */
+ id_priv->cm_id.ib = NULL;
+ cma_exch(id_priv, CMA_DESTROYING);
+ cma_release_remove(id_priv);
+ rdma_destroy_id(&id_priv->id);
+ return ret;
+ }
+out:
+ cma_release_remove(id_priv);
+ return ret;
+}
+
+static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
+ struct rdma_conn_param *conn_param)
+{
+ struct ib_cm_sidr_req_param req;
+ struct rdma_route *route;
+ int ret;
+
+ req.private_data_len = sizeof(struct cma_hdr) +
+ conn_param->private_data_len;
+ req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
+ if (!req.private_data)
+ return -ENOMEM;
+
+ if (conn_param->private_data && conn_param->private_data_len)
+ memcpy((void *) req.private_data + sizeof(struct cma_hdr),
+ conn_param->private_data, conn_param->private_data_len);
+
+ route = &id_priv->id.route;
+ ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route);
+ if (ret)
+ goto out;
+
+ id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
+ cma_sidr_rep_handler, id_priv);
+ if (IS_ERR(id_priv->cm_id.ib)) {
+ ret = PTR_ERR(id_priv->cm_id.ib);
+ goto out;
+ }
+
+ req.path = route->path_rec;
+ req.service_id = cma_get_service_id(id_priv->id.ps,
+ &route->addr.dst_addr);
+ req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
+ req.max_cm_retries = CMA_MAX_CM_RETRIES;
+
+ ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
+ if (ret) {
+ ib_destroy_cm_id(id_priv->cm_id.ib);
+ id_priv->cm_id.ib = NULL;
+ }
+out:
+ kfree(req.private_data);
+ return ret;
+}
+
static int cma_connect_ib(struct rdma_id_private *id_priv,
struct rdma_conn_param *conn_param)
{
@@ -1862,7 +2036,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
req.service_id = cma_get_service_id(id_priv->id.ps,
&route->addr.dst_addr);
req.qp_num = id_priv->qp_num;
- req.qp_type = id_priv->qp_type;
+ req.qp_type = IB_QPT_RC;
req.starting_psn = id_priv->seq_num;
req.responder_resources = conn_param->responder_resources;
req.initiator_depth = conn_param->initiator_depth;
@@ -1939,13 +2113,15 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
if (!id->qp) {
id_priv->qp_num = conn_param->qp_num;
- id_priv->qp_type = conn_param->qp_type;
id_priv->srq = conn_param->srq;
}
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
- ret = cma_connect_ib(id_priv, conn_param);
+ if (id->ps == RDMA_PS_UDP)
+ ret = cma_resolve_ib_udp(id_priv, conn_param);
+ else
+ ret = cma_connect_ib(id_priv, conn_param);
break;
case RDMA_TRANSPORT_IWARP:
ret = cma_connect_iw(id_priv, conn_param);
@@ -1968,11 +2144,25 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
struct rdma_conn_param *conn_param)
{
struct ib_cm_rep_param rep;
- int ret;
+ struct ib_qp_attr qp_attr;
+ int qp_attr_mask, ret;
- ret = cma_modify_qp_rtr(&id_priv->id);
- if (ret)
- return ret;
+ if (id_priv->id.qp) {
+ ret = cma_modify_qp_rtr(&id_priv->id);
+ if (ret)
+ goto out;
+
+ qp_attr.qp_state = IB_QPS_RTS;
+ ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr,
+ &qp_attr_mask);
+ if (ret)
+ goto out;
+
+ qp_attr.max_rd_atomic = conn_param->initiator_depth;
+ ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
+ if (ret)
+ goto out;
+ }
memset(&rep, 0, sizeof rep);
rep.qp_num = id_priv->qp_num;
@@ -1987,7 +2177,9 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
rep.rnr_retry_count = conn_param->rnr_retry_count;
rep.srq = id_priv->srq ? 1 : 0;
- return ib_send_cm_rep(id_priv->cm_id.ib, &rep);
+ ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
+out:
+ return ret;
}
static int cma_accept_iw(struct rdma_id_private *id_priv,
@@ -2012,6 +2204,24 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
}
+static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
+ enum ib_cm_sidr_status status,
+ const void *private_data, int private_data_len)
+{
+ struct ib_cm_sidr_rep_param rep;
+
+ memset(&rep, 0, sizeof rep);
+ rep.status = status;
+ if (status == IB_SIDR_SUCCESS) {
+ rep.qp_num = id_priv->qp_num;
+ rep.qkey = RDMA_UD_QKEY;
+ }
+ rep.private_data = private_data;
+ rep.private_data_len = private_data_len;
+
+ return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
+}
+
int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
{
struct rdma_id_private *id_priv;
@@ -2023,13 +2233,16 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
if (!id->qp && conn_param) {
id_priv->qp_num = conn_param->qp_num;
- id_priv->qp_type = conn_param->qp_type;
id_priv->srq = conn_param->srq;
}
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
- if (conn_param)
+ if (id->ps == RDMA_PS_UDP)
+ ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
+ conn_param->private_data,
+ conn_param->private_data_len);
+ else if (conn_param)
ret = cma_accept_ib(id_priv, conn_param);
else
ret = cma_rep_recv(id_priv);
@@ -2053,6 +2266,27 @@ reject:
}
EXPORT_SYMBOL(rdma_accept);
+int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
+{
+ struct rdma_id_private *id_priv;
+ int ret;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ if (!cma_comp(id_priv, CMA_CONNECT))
+ return -EINVAL;
+
+ switch (id->device->node_type) {
+ case RDMA_NODE_IB_CA:
+ ret = ib_cm_notify(id_priv->cm_id.ib, event);
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(rdma_notify);
+
int rdma_reject(struct rdma_cm_id *id, const void *private_data,
u8 private_data_len)
{
@@ -2065,9 +2299,13 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
- ret = ib_send_cm_rej(id_priv->cm_id.ib,
- IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
- private_data, private_data_len);
+ if (id->ps == RDMA_PS_UDP)
+ ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
+ private_data, private_data_len);
+ else
+ ret = ib_send_cm_rej(id_priv->cm_id.ib,
+ IB_CM_REJ_CONSUMER_DEFINED, NULL,
+ 0, private_data, private_data_len);
break;
case RDMA_TRANSPORT_IWARP:
ret = iw_cm_reject(id_priv->cm_id.iw,
@@ -2123,8 +2361,6 @@ static void cma_add_one(struct ib_device *device)
cma_dev->device = device;
cma_dev->node_guid = device->node_guid;
- if (!cma_dev->node_guid)
- goto err;
init_completion(&cma_dev->comp);
atomic_set(&cma_dev->refcount, 1);
@@ -2136,13 +2372,11 @@ static void cma_add_one(struct ib_device *device)
list_for_each_entry(id_priv, &listen_any_list, list)
cma_listen_on_dev(id_priv, cma_dev);
mutex_unlock(&lock);
- return;
-err:
- kfree(cma_dev);
}
static int cma_remove_id_dev(struct rdma_id_private *id_priv)
{
+ struct rdma_cm_event event;
enum cma_state state;
/* Record that we want to remove the device */
@@ -2157,8 +2391,9 @@ static int cma_remove_id_dev(struct rdma_id_private *id_priv)
if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
return 0;
- return cma_notify_user(id_priv, RDMA_CM_EVENT_DEVICE_REMOVAL,
- 0, NULL, 0);
+ memset(&event, 0, sizeof event);
+ event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
+ return id_priv->id.event_handler(&id_priv->id, &event);
}
static void cma_process_remove(struct cma_device *cma_dev)
@@ -2240,6 +2475,7 @@ static void cma_cleanup(void)
destroy_workqueue(cma_wq);
idr_destroy(&sdp_ps);
idr_destroy(&tcp_ps);
+ idr_destroy(&udp_ps);
}
module_init(cma_init);
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 86a3b2d401db..8926a2bd4a87 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -394,20 +394,12 @@ EXPORT_SYMBOL(ib_destroy_fmr_pool);
*/
int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
{
- int serial;
-
- atomic_inc(&pool->req_ser);
- /*
- * It's OK if someone else bumps req_ser again here -- we'll
- * just wait a little longer.
- */
- serial = atomic_read(&pool->req_ser);
+ int serial = atomic_inc_return(&pool->req_ser);
wake_up_process(pool->thread);
if (wait_event_interruptible(pool->force_wait,
- atomic_read(&pool->flush_ser) -
- atomic_read(&pool->req_ser) >= 0))
+ atomic_read(&pool->flush_ser) - serial >= 0))
return -EINTR;
return 0;
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index c3fb304a4e86..1039ad57d53b 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -80,7 +80,7 @@ struct iwcm_work {
* 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
* the backlog is exceeded, then no more connection request events will
* be processed. cm_event_handler() returns -ENOMEM in this case. Its up
- * to the provider to reject the connectino request.
+ * to the provider to reject the connection request.
* 2) in the connection request workqueue handler, cm_conn_req_handler().
* If work elements cannot be allocated for the new connect request cm_id,
* then IWCM will call the provider reject method. This is ok since
@@ -131,26 +131,25 @@ static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
}
/*
- * Save private data from incoming connection requests in the
- * cm_id_priv so the low level driver doesn't have to. Adjust
+ * Save private data from incoming connection requests to
+ * iw_cm_event, so the low level driver doesn't have to. Adjust
* the event ptr to point to the local copy.
*/
-static int copy_private_data(struct iwcm_id_private *cm_id_priv,
- struct iw_cm_event *event)
+static int copy_private_data(struct iw_cm_event *event)
{
void *p;
- p = kmalloc(event->private_data_len, GFP_ATOMIC);
+ p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
if (!p)
return -ENOMEM;
- memcpy(p, event->private_data, event->private_data_len);
event->private_data = p;
return 0;
}
/*
- * Release a reference on cm_id. If the last reference is being removed
- * and iw_destroy_cm_id is waiting, wake up the waiting thread.
+ * Release a reference on cm_id. If the last reference is being
+ * released, enable the waiting thread (in iw_destroy_cm_id) to
+ * get woken up, and return 1 if a thread is already waiting.
*/
static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
{
@@ -243,7 +242,7 @@ static int iwcm_modify_qp_sqd(struct ib_qp *qp)
/*
* CM_ID <-- CLOSING
*
- * Block if a passive or active connection is currenlty being processed. Then
+ * Block if a passive or active connection is currently being processed. Then
* process the event as follows:
* - If we are ESTABLISHED, move to CLOSING and modify the QP state
* based on the abrupt flag
@@ -408,7 +407,7 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
{
struct iwcm_id_private *cm_id_priv;
unsigned long flags;
- int ret = 0;
+ int ret;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
@@ -535,7 +534,7 @@ EXPORT_SYMBOL(iw_cm_accept);
int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
{
struct iwcm_id_private *cm_id_priv;
- int ret = 0;
+ int ret;
unsigned long flags;
struct ib_qp *qp;
@@ -620,7 +619,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
spin_lock_irqsave(&listen_id_priv->lock, flags);
if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
spin_unlock_irqrestore(&listen_id_priv->lock, flags);
- return;
+ goto out;
}
spin_unlock_irqrestore(&listen_id_priv->lock, flags);
@@ -629,7 +628,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
listen_id_priv->id.context);
/* If the cm_id could not be created, ignore the request */
if (IS_ERR(cm_id))
- return;
+ goto out;
cm_id->provider_data = iw_event->provider_data;
cm_id->local_addr = iw_event->local_addr;
@@ -642,7 +641,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
if (ret) {
iw_cm_reject(cm_id, NULL, 0);
iw_destroy_cm_id(cm_id);
- return;
+ goto out;
}
/* Call the client CM handler */
@@ -654,6 +653,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
kfree(cm_id);
}
+out:
if (iw_event->private_data_len)
kfree(iw_event->private_data);
}
@@ -674,7 +674,7 @@ static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
struct iw_cm_event *iw_event)
{
unsigned long flags;
- int ret = 0;
+ int ret;
spin_lock_irqsave(&cm_id_priv->lock, flags);
@@ -704,7 +704,7 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
struct iw_cm_event *iw_event)
{
unsigned long flags;
- int ret = 0;
+ int ret;
spin_lock_irqsave(&cm_id_priv->lock, flags);
/*
@@ -828,9 +828,10 @@ static int process_event(struct iwcm_id_private *cm_id_priv,
* thread asleep on the destroy_comp list vs. an object destroyed
* here synchronously when the last reference is removed.
*/
-static void cm_work_handler(void *arg)
+static void cm_work_handler(struct work_struct *_work)
{
- struct iwcm_work *work = arg, lwork;
+ struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
+ struct iw_cm_event levent;
struct iwcm_id_private *cm_id_priv = work->cm_id;
unsigned long flags;
int empty;
@@ -843,11 +844,11 @@ static void cm_work_handler(void *arg)
struct iwcm_work, list);
list_del_init(&work->list);
empty = list_empty(&cm_id_priv->work_list);
- lwork = *work;
+ levent = work->event;
put_work(work);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = process_event(cm_id_priv, &work->event);
+ ret = process_event(cm_id_priv, &levent);
if (ret) {
set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
destroy_cm_id(&cm_id_priv->id);
@@ -899,14 +900,14 @@ static int cm_event_handler(struct iw_cm_id *cm_id,
goto out;
}
- INIT_WORK(&work->work, cm_work_handler, work);
+ INIT_WORK(&work->work, cm_work_handler);
work->cm_id = cm_id_priv;
work->event = *iw_event;
if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
work->event.private_data_len) {
- ret = copy_private_data(cm_id_priv, &work->event);
+ ret = copy_private_data(&work->event);
if (ret) {
put_work(work);
goto out;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index a72bcea46ff6..5ed141ebd1c8 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -46,7 +46,7 @@ MODULE_DESCRIPTION("kernel IB MAD API");
MODULE_AUTHOR("Hal Rosenstock");
MODULE_AUTHOR("Sean Hefty");
-static kmem_cache_t *ib_mad_cache;
+static struct kmem_cache *ib_mad_cache;
static struct list_head ib_mad_port_list;
static u32 ib_mad_client_id = 0;
@@ -65,8 +65,8 @@ static struct ib_mad_agent_private *find_mad_agent(
static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
struct ib_mad_private *mad);
static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
-static void timeout_sends(void *data);
-static void local_completions(void *data);
+static void timeout_sends(struct work_struct *work);
+static void local_completions(struct work_struct *work);
static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
struct ib_mad_agent_private *agent_priv,
u8 mgmt_class);
@@ -356,10 +356,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
INIT_LIST_HEAD(&mad_agent_priv->wait_list);
INIT_LIST_HEAD(&mad_agent_priv->done_list);
INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
- INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
+ INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
INIT_LIST_HEAD(&mad_agent_priv->local_list);
- INIT_WORK(&mad_agent_priv->local_work, local_completions,
- mad_agent_priv);
+ INIT_WORK(&mad_agent_priv->local_work, local_completions);
atomic_set(&mad_agent_priv->refcount, 1);
init_completion(&mad_agent_priv->comp);
@@ -999,17 +998,17 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
mad_agent = mad_send_wr->send_buf.mad_agent;
sge = mad_send_wr->sg_list;
- sge[0].addr = dma_map_single(mad_agent->device->dma_device,
- mad_send_wr->send_buf.mad,
- sge[0].length,
- DMA_TO_DEVICE);
- pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr);
-
- sge[1].addr = dma_map_single(mad_agent->device->dma_device,
- ib_get_payload(mad_send_wr),
- sge[1].length,
- DMA_TO_DEVICE);
- pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr);
+ sge[0].addr = ib_dma_map_single(mad_agent->device,
+ mad_send_wr->send_buf.mad,
+ sge[0].length,
+ DMA_TO_DEVICE);
+ mad_send_wr->header_mapping = sge[0].addr;
+
+ sge[1].addr = ib_dma_map_single(mad_agent->device,
+ ib_get_payload(mad_send_wr),
+ sge[1].length,
+ DMA_TO_DEVICE);
+ mad_send_wr->payload_mapping = sge[1].addr;
spin_lock_irqsave(&qp_info->send_queue.lock, flags);
if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
@@ -1027,12 +1026,12 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
}
spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
if (ret) {
- dma_unmap_single(mad_agent->device->dma_device,
- pci_unmap_addr(mad_send_wr, header_mapping),
- sge[0].length, DMA_TO_DEVICE);
- dma_unmap_single(mad_agent->device->dma_device,
- pci_unmap_addr(mad_send_wr, payload_mapping),
- sge[1].length, DMA_TO_DEVICE);
+ ib_dma_unmap_single(mad_agent->device,
+ mad_send_wr->header_mapping,
+ sge[0].length, DMA_TO_DEVICE);
+ ib_dma_unmap_single(mad_agent->device,
+ mad_send_wr->payload_mapping,
+ sge[1].length, DMA_TO_DEVICE);
}
return ret;
}
@@ -1851,11 +1850,11 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
mad_list);
recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
- dma_unmap_single(port_priv->device->dma_device,
- pci_unmap_addr(&recv->header, mapping),
- sizeof(struct ib_mad_private) -
- sizeof(struct ib_mad_private_header),
- DMA_FROM_DEVICE);
+ ib_dma_unmap_single(port_priv->device,
+ recv->header.mapping,
+ sizeof(struct ib_mad_private) -
+ sizeof(struct ib_mad_private_header),
+ DMA_FROM_DEVICE);
/* Setup MAD receive work completion from "normal" work completion */
recv->header.wc = *wc;
@@ -2081,12 +2080,12 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
qp_info = send_queue->qp_info;
retry:
- dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
- pci_unmap_addr(mad_send_wr, header_mapping),
- mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
- dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
- pci_unmap_addr(mad_send_wr, payload_mapping),
- mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
+ ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
+ mad_send_wr->header_mapping,
+ mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
+ ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
+ mad_send_wr->payload_mapping,
+ mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
queued_send_wr = NULL;
spin_lock_irqsave(&send_queue->lock, flags);
list_del(&mad_list->list);
@@ -2198,12 +2197,12 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv,
/*
* IB MAD completion callback
*/
-static void ib_mad_completion_handler(void *data)
+static void ib_mad_completion_handler(struct work_struct *work)
{
struct ib_mad_port_private *port_priv;
struct ib_wc wc;
- port_priv = (struct ib_mad_port_private *)data;
+ port_priv = container_of(work, struct ib_mad_port_private, work);
ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
@@ -2324,7 +2323,7 @@ void ib_cancel_mad(struct ib_mad_agent *mad_agent,
}
EXPORT_SYMBOL(ib_cancel_mad);
-static void local_completions(void *data)
+static void local_completions(struct work_struct *work)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_local_private *local;
@@ -2334,7 +2333,8 @@ static void local_completions(void *data)
struct ib_wc wc;
struct ib_mad_send_wc mad_send_wc;
- mad_agent_priv = (struct ib_mad_agent_private *)data;
+ mad_agent_priv =
+ container_of(work, struct ib_mad_agent_private, local_work);
spin_lock_irqsave(&mad_agent_priv->lock, flags);
while (!list_empty(&mad_agent_priv->local_list)) {
@@ -2434,14 +2434,15 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
return ret;
}
-static void timeout_sends(void *data)
+static void timeout_sends(struct work_struct *work)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_wr_private *mad_send_wr;
struct ib_mad_send_wc mad_send_wc;
unsigned long flags, delay;
- mad_agent_priv = (struct ib_mad_agent_private *)data;
+ mad_agent_priv = container_of(work, struct ib_mad_agent_private,
+ timed_work.work);
mad_send_wc.vendor_err = 0;
spin_lock_irqsave(&mad_agent_priv->lock, flags);
@@ -2527,13 +2528,12 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
break;
}
}
- sg_list.addr = dma_map_single(qp_info->port_priv->
- device->dma_device,
- &mad_priv->grh,
- sizeof *mad_priv -
- sizeof mad_priv->header,
- DMA_FROM_DEVICE);
- pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
+ sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
+ &mad_priv->grh,
+ sizeof *mad_priv -
+ sizeof mad_priv->header,
+ DMA_FROM_DEVICE);
+ mad_priv->header.mapping = sg_list.addr;
recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
mad_priv->header.mad_list.mad_queue = recv_queue;
@@ -2548,12 +2548,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
list_del(&mad_priv->header.mad_list.list);
recv_queue->count--;
spin_unlock_irqrestore(&recv_queue->lock, flags);
- dma_unmap_single(qp_info->port_priv->device->dma_device,
- pci_unmap_addr(&mad_priv->header,
- mapping),
- sizeof *mad_priv -
- sizeof mad_priv->header,
- DMA_FROM_DEVICE);
+ ib_dma_unmap_single(qp_info->port_priv->device,
+ mad_priv->header.mapping,
+ sizeof *mad_priv -
+ sizeof mad_priv->header,
+ DMA_FROM_DEVICE);
kmem_cache_free(ib_mad_cache, mad_priv);
printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
break;
@@ -2585,11 +2584,11 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
/* Remove from posted receive MAD list */
list_del(&mad_list->list);
- dma_unmap_single(qp_info->port_priv->device->dma_device,
- pci_unmap_addr(&recv->header, mapping),
- sizeof(struct ib_mad_private) -
- sizeof(struct ib_mad_private_header),
- DMA_FROM_DEVICE);
+ ib_dma_unmap_single(qp_info->port_priv->device,
+ recv->header.mapping,
+ sizeof(struct ib_mad_private) -
+ sizeof(struct ib_mad_private_header),
+ DMA_FROM_DEVICE);
kmem_cache_free(ib_mad_cache, recv);
}
@@ -2799,7 +2798,7 @@ static int ib_mad_port_open(struct ib_device *device,
ret = -ENOMEM;
goto error8;
}
- INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
+ INIT_WORK(&port_priv->work, ib_mad_completion_handler);
spin_lock_irqsave(&ib_mad_port_list_lock, flags);
list_add_tail(&port_priv->port_list, &ib_mad_port_list);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index d06b59083f6e..de89717f49fe 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -73,7 +73,7 @@ struct ib_mad_private_header {
struct ib_mad_list_head mad_list;
struct ib_mad_recv_wc recv_wc;
struct ib_wc wc;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ u64 mapping;
} __attribute__ ((packed));
struct ib_mad_private {
@@ -102,7 +102,7 @@ struct ib_mad_agent_private {
struct list_head send_list;
struct list_head wait_list;
struct list_head done_list;
- struct work_struct timed_work;
+ struct delayed_work timed_work;
unsigned long timeout;
struct list_head local_list;
struct work_struct local_work;
@@ -126,8 +126,8 @@ struct ib_mad_send_wr_private {
struct list_head agent_list;
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_buf send_buf;
- DECLARE_PCI_UNMAP_ADDR(header_mapping)
- DECLARE_PCI_UNMAP_ADDR(payload_mapping)
+ u64 header_mapping;
+ u64 payload_mapping;
struct ib_send_wr send_wr;
struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
__be64 tid;
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 1ef79d015a1e..3663fd7022be 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -45,8 +45,8 @@ enum rmpp_state {
struct mad_rmpp_recv {
struct ib_mad_agent_private *agent;
struct list_head list;
- struct work_struct timeout_work;
- struct work_struct cleanup_work;
+ struct delayed_work timeout_work;
+ struct delayed_work cleanup_work;
struct completion comp;
enum rmpp_state state;
spinlock_t lock;
@@ -233,9 +233,10 @@ static void nack_recv(struct ib_mad_agent_private *agent,
}
}
-static void recv_timeout_handler(void *data)
+static void recv_timeout_handler(struct work_struct *work)
{
- struct mad_rmpp_recv *rmpp_recv = data;
+ struct mad_rmpp_recv *rmpp_recv =
+ container_of(work, struct mad_rmpp_recv, timeout_work.work);
struct ib_mad_recv_wc *rmpp_wc;
unsigned long flags;
@@ -254,9 +255,10 @@ static void recv_timeout_handler(void *data)
ib_free_recv_mad(rmpp_wc);
}
-static void recv_cleanup_handler(void *data)
+static void recv_cleanup_handler(struct work_struct *work)
{
- struct mad_rmpp_recv *rmpp_recv = data;
+ struct mad_rmpp_recv *rmpp_recv =
+ container_of(work, struct mad_rmpp_recv, cleanup_work.work);
unsigned long flags;
spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
@@ -285,8 +287,8 @@ create_rmpp_recv(struct ib_mad_agent_private *agent,
rmpp_recv->agent = agent;
init_completion(&rmpp_recv->comp);
- INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
- INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
+ INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
+ INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
spin_lock_init(&rmpp_recv->lock);
rmpp_recv->state = RMPP_STATE_ACTIVE;
atomic_set(&rmpp_recv->refcount, 1);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 1706d3c7e95e..e45afba75341 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -360,9 +360,10 @@ static void free_sm_ah(struct kref *kref)
kfree(sm_ah);
}
-static void update_sm_ah(void *port_ptr)
+static void update_sm_ah(struct work_struct *work)
{
- struct ib_sa_port *port = port_ptr;
+ struct ib_sa_port *port =
+ container_of(work, struct ib_sa_port, update_task);
struct ib_sa_sm_ah *new_ah, *old_ah;
struct ib_port_attr port_attr;
struct ib_ah_attr ah_attr;
@@ -992,8 +993,7 @@ static void ib_sa_add_one(struct ib_device *device)
if (IS_ERR(sa_dev->port[i].agent))
goto err;
- INIT_WORK(&sa_dev->port[i].update_task,
- update_sm_ah, &sa_dev->port[i]);
+ INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
}
ib_set_client_data(device, &sa_client, sa_dev);
@@ -1010,7 +1010,7 @@ static void ib_sa_add_one(struct ib_device *device)
goto err;
for (i = 0; i <= e - s; ++i)
- update_sm_ah(&sa_dev->port[i]);
+ update_sm_ah(&sa_dev->port[i].update_task);
return;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index ad4f4d5c2924..f15220a0ee75 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -161,12 +161,14 @@ static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx)
struct ib_ucm_event, ctx_list);
list_del(&uevent->file_list);
list_del(&uevent->ctx_list);
+ mutex_unlock(&ctx->file->file_mutex);
/* clear incoming connections. */
if (ib_ucm_new_cm_id(uevent->resp.event))
ib_destroy_cm_id(uevent->cm_id);
kfree(uevent);
+ mutex_lock(&ctx->file->file_mutex);
}
mutex_unlock(&ctx->file->file_mutex);
}
@@ -328,20 +330,18 @@ static int ib_ucm_event_process(struct ib_cm_event *evt,
}
if (uvt->data_len) {
- uvt->data = kmalloc(uvt->data_len, GFP_KERNEL);
+ uvt->data = kmemdup(evt->private_data, uvt->data_len, GFP_KERNEL);
if (!uvt->data)
goto err1;
- memcpy(uvt->data, evt->private_data, uvt->data_len);
uvt->resp.present |= IB_UCM_PRES_DATA;
}
if (uvt->info_len) {
- uvt->info = kmalloc(uvt->info_len, GFP_KERNEL);
+ uvt->info = kmemdup(info, uvt->info_len, GFP_KERNEL);
if (!uvt->info)
goto err2;
- memcpy(uvt->info, info, uvt->info_len);
uvt->resp.present |= IB_UCM_PRES_INFO;
}
return 0;
@@ -685,11 +685,11 @@ out:
return result;
}
-static ssize_t ib_ucm_establish(struct ib_ucm_file *file,
- const char __user *inbuf,
- int in_len, int out_len)
+static ssize_t ib_ucm_notify(struct ib_ucm_file *file,
+ const char __user *inbuf,
+ int in_len, int out_len)
{
- struct ib_ucm_establish cmd;
+ struct ib_ucm_notify cmd;
struct ib_ucm_context *ctx;
int result;
@@ -700,7 +700,7 @@ static ssize_t ib_ucm_establish(struct ib_ucm_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- result = ib_cm_establish(ctx->cm_id);
+ result = ib_cm_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
ib_ucm_ctx_put(ctx);
return result;
}
@@ -1107,7 +1107,7 @@ static ssize_t (*ucm_cmd_table[])(struct ib_ucm_file *file,
[IB_USER_CM_CMD_DESTROY_ID] = ib_ucm_destroy_id,
[IB_USER_CM_CMD_ATTR_ID] = ib_ucm_attr_id,
[IB_USER_CM_CMD_LISTEN] = ib_ucm_listen,
- [IB_USER_CM_CMD_ESTABLISH] = ib_ucm_establish,
+ [IB_USER_CM_CMD_NOTIFY] = ib_ucm_notify,
[IB_USER_CM_CMD_SEND_REQ] = ib_ucm_send_req,
[IB_USER_CM_CMD_SEND_REP] = ib_ucm_send_rep,
[IB_USER_CM_CMD_SEND_RTU] = ib_ucm_send_rtu,
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
new file mode 100644
index 000000000000..e2e8d329b443
--- /dev/null
+++ b/drivers/infiniband/core/ucma.c
@@ -0,0 +1,885 @@
+/*
+ * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/idr.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/miscdevice.h>
+
+#include <rdma/rdma_user_cm.h>
+#include <rdma/ib_marshall.h>
+#include <rdma/rdma_cm.h>
+
+MODULE_AUTHOR("Sean Hefty");
+MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
+MODULE_LICENSE("Dual BSD/GPL");
+
+enum {
+ UCMA_MAX_BACKLOG = 128
+};
+
+struct ucma_file {
+ struct mutex mut;
+ struct file *filp;
+ struct list_head ctx_list;
+ struct list_head event_list;
+ wait_queue_head_t poll_wait;
+};
+
+struct ucma_context {
+ int id;
+ struct completion comp;
+ atomic_t ref;
+ int events_reported;
+ int backlog;
+
+ struct ucma_file *file;
+ struct rdma_cm_id *cm_id;
+ u64 uid;
+
+ struct list_head list;
+};
+
+struct ucma_event {
+ struct ucma_context *ctx;
+ struct list_head list;
+ struct rdma_cm_id *cm_id;
+ struct rdma_ucm_event_resp resp;
+};
+
+static DEFINE_MUTEX(mut);
+static DEFINE_IDR(ctx_idr);
+
+static inline struct ucma_context *_ucma_find_context(int id,
+ struct ucma_file *file)
+{
+ struct ucma_context *ctx;
+
+ ctx = idr_find(&ctx_idr, id);
+ if (!ctx)
+ ctx = ERR_PTR(-ENOENT);
+ else if (ctx->file != file)
+ ctx = ERR_PTR(-EINVAL);
+ return ctx;
+}
+
+static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
+{
+ struct ucma_context *ctx;
+
+ mutex_lock(&mut);
+ ctx = _ucma_find_context(id, file);
+ if (!IS_ERR(ctx))
+ atomic_inc(&ctx->ref);
+ mutex_unlock(&mut);
+ return ctx;
+}
+
+static void ucma_put_ctx(struct ucma_context *ctx)
+{
+ if (atomic_dec_and_test(&ctx->ref))
+ complete(&ctx->comp);
+}
+
+static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
+{
+ struct ucma_context *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ atomic_set(&ctx->ref, 1);
+ init_completion(&ctx->comp);
+ ctx->file = file;
+
+ do {
+ ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
+ if (!ret)
+ goto error;
+
+ mutex_lock(&mut);
+ ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
+ mutex_unlock(&mut);
+ } while (ret == -EAGAIN);
+
+ if (ret)
+ goto error;
+
+ list_add_tail(&ctx->list, &file->ctx_list);
+ return ctx;
+
+error:
+ kfree(ctx);
+ return NULL;
+}
+
+static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
+ struct rdma_conn_param *src)
+{
+ if (src->private_data_len)
+ memcpy(dst->private_data, src->private_data,
+ src->private_data_len);
+ dst->private_data_len = src->private_data_len;
+ dst->responder_resources =src->responder_resources;
+ dst->initiator_depth = src->initiator_depth;
+ dst->flow_control = src->flow_control;
+ dst->retry_count = src->retry_count;
+ dst->rnr_retry_count = src->rnr_retry_count;
+ dst->srq = src->srq;
+ dst->qp_num = src->qp_num;
+}
+
+static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
+ struct rdma_ud_param *src)
+{
+ if (src->private_data_len)
+ memcpy(dst->private_data, src->private_data,
+ src->private_data_len);
+ dst->private_data_len = src->private_data_len;
+ ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
+ dst->qp_num = src->qp_num;
+ dst->qkey = src->qkey;
+}
+
+static void ucma_set_event_context(struct ucma_context *ctx,
+ struct rdma_cm_event *event,
+ struct ucma_event *uevent)
+{
+ uevent->ctx = ctx;
+ uevent->resp.uid = ctx->uid;
+ uevent->resp.id = ctx->id;
+}
+
+static int ucma_event_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ struct ucma_event *uevent;
+ struct ucma_context *ctx = cm_id->context;
+ int ret = 0;
+
+ uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
+ if (!uevent)
+ return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
+
+ uevent->cm_id = cm_id;
+ ucma_set_event_context(ctx, event, uevent);
+ uevent->resp.event = event->event;
+ uevent->resp.status = event->status;
+ if (cm_id->ps == RDMA_PS_UDP)
+ ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
+ else
+ ucma_copy_conn_event(&uevent->resp.param.conn,
+ &event->param.conn);
+
+ mutex_lock(&ctx->file->mut);
+ if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
+ if (!ctx->backlog) {
+ ret = -EDQUOT;
+ kfree(uevent);
+ goto out;
+ }
+ ctx->backlog--;
+ } else if (!ctx->uid) {
+ /*
+ * We ignore events for new connections until userspace has set
+ * their context. This can only happen if an error occurs on a
+ * new connection before the user accepts it. This is okay,
+ * since the accept will just fail later.
+ */
+ kfree(uevent);
+ goto out;
+ }
+
+ list_add_tail(&uevent->list, &ctx->file->event_list);
+ wake_up_interruptible(&ctx->file->poll_wait);
+out:
+ mutex_unlock(&ctx->file->mut);
+ return ret;
+}
+
+static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct ucma_context *ctx;
+ struct rdma_ucm_get_event cmd;
+ struct ucma_event *uevent;
+ int ret = 0;
+ DEFINE_WAIT(wait);
+
+ if (out_len < sizeof uevent->resp)
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ mutex_lock(&file->mut);
+ while (list_empty(&file->event_list)) {
+ if (file->filp->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE);
+ mutex_unlock(&file->mut);
+ schedule();
+ mutex_lock(&file->mut);
+ finish_wait(&file->poll_wait, &wait);
+ }
+
+ if (ret)
+ goto done;
+
+ uevent = list_entry(file->event_list.next, struct ucma_event, list);
+
+ if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
+ ctx = ucma_alloc_ctx(file);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ uevent->ctx->backlog++;
+ ctx->cm_id = uevent->cm_id;
+ ctx->cm_id->context = ctx;
+ uevent->resp.id = ctx->id;
+ }
+
+ if (copy_to_user((void __user *)(unsigned long)cmd.response,
+ &uevent->resp, sizeof uevent->resp)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ list_del(&uevent->list);
+ uevent->ctx->events_reported++;
+ kfree(uevent);
+done:
+ mutex_unlock(&file->mut);
+ return ret;
+}
+
+static ssize_t ucma_create_id(struct ucma_file *file,
+ const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_create_id cmd;
+ struct rdma_ucm_create_id_resp resp;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (out_len < sizeof(resp))
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ mutex_lock(&file->mut);
+ ctx = ucma_alloc_ctx(file);
+ mutex_unlock(&file->mut);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->uid = cmd.uid;
+ ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps);
+ if (IS_ERR(ctx->cm_id)) {
+ ret = PTR_ERR(ctx->cm_id);
+ goto err1;
+ }
+
+ resp.id = ctx->id;
+ if (copy_to_user((void __user *)(unsigned long)cmd.response,
+ &resp, sizeof(resp))) {
+ ret = -EFAULT;
+ goto err2;
+ }
+ return 0;
+
+err2:
+ rdma_destroy_id(ctx->cm_id);
+err1:
+ mutex_lock(&mut);
+ idr_remove(&ctx_idr, ctx->id);
+ mutex_unlock(&mut);
+ kfree(ctx);
+ return ret;
+}
+
+static void ucma_cleanup_events(struct ucma_context *ctx)
+{
+ struct ucma_event *uevent, *tmp;
+
+ list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
+ if (uevent->ctx != ctx)
+ continue;
+
+ list_del(&uevent->list);
+
+ /* clear incoming connections. */
+ if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
+ rdma_destroy_id(uevent->cm_id);
+
+ kfree(uevent);
+ }
+}
+
+static int ucma_free_ctx(struct ucma_context *ctx)
+{
+ int events_reported;
+
+ /* No new events will be generated after destroying the id. */
+ rdma_destroy_id(ctx->cm_id);
+
+ /* Cleanup events not yet reported to the user. */
+ mutex_lock(&ctx->file->mut);
+ ucma_cleanup_events(ctx);
+ list_del(&ctx->list);
+ mutex_unlock(&ctx->file->mut);
+
+ events_reported = ctx->events_reported;
+ kfree(ctx);
+ return events_reported;
+}
+
+static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_destroy_id cmd;
+ struct rdma_ucm_destroy_id_resp resp;
+ struct ucma_context *ctx;
+ int ret = 0;
+
+ if (out_len < sizeof(resp))
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ mutex_lock(&mut);
+ ctx = _ucma_find_context(cmd.id, file);
+ if (!IS_ERR(ctx))
+ idr_remove(&ctx_idr, ctx->id);
+ mutex_unlock(&mut);
+
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ucma_put_ctx(ctx);
+ wait_for_completion(&ctx->comp);
+ resp.events_reported = ucma_free_ctx(ctx);
+
+ if (copy_to_user((void __user *)(unsigned long)cmd.response,
+ &resp, sizeof(resp)))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_bind_addr cmd;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t ucma_resolve_addr(struct ucma_file *file,
+ const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_resolve_addr cmd;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
+ (struct sockaddr *) &cmd.dst_addr,
+ cmd.timeout_ms);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t ucma_resolve_route(struct ucma_file *file,
+ const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_resolve_route cmd;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
+ struct rdma_route *route)
+{
+ struct rdma_dev_addr *dev_addr;
+
+ resp->num_paths = route->num_paths;
+ switch (route->num_paths) {
+ case 0:
+ dev_addr = &route->addr.dev_addr;
+ ib_addr_get_dgid(dev_addr,
+ (union ib_gid *) &resp->ib_route[0].dgid);
+ ib_addr_get_sgid(dev_addr,
+ (union ib_gid *) &resp->ib_route[0].sgid);
+ resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
+ break;
+ case 2:
+ ib_copy_path_rec_to_user(&resp->ib_route[1],
+ &route->path_rec[1]);
+ /* fall through */
+ case 1:
+ ib_copy_path_rec_to_user(&resp->ib_route[0],
+ &route->path_rec[0]);
+ break;
+ default:
+ break;
+ }
+}
+
+static ssize_t ucma_query_route(struct ucma_file *file,
+ const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_query_route cmd;
+ struct rdma_ucm_query_route_resp resp;
+ struct ucma_context *ctx;
+ struct sockaddr *addr;
+ int ret = 0;
+
+ if (out_len < sizeof(resp))
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ memset(&resp, 0, sizeof resp);
+ addr = &ctx->cm_id->route.addr.src_addr;
+ memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
+ sizeof(struct sockaddr_in) :
+ sizeof(struct sockaddr_in6));
+ addr = &ctx->cm_id->route.addr.dst_addr;
+ memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
+ sizeof(struct sockaddr_in) :
+ sizeof(struct sockaddr_in6));
+ if (!ctx->cm_id->device)
+ goto out;
+
+ resp.node_guid = ctx->cm_id->device->node_guid;
+ resp.port_num = ctx->cm_id->port_num;
+ switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
+ case RDMA_TRANSPORT_IB:
+ ucma_copy_ib_route(&resp, &ctx->cm_id->route);
+ break;
+ default:
+ break;
+ }
+
+out:
+ if (copy_to_user((void __user *)(unsigned long)cmd.response,
+ &resp, sizeof(resp)))
+ ret = -EFAULT;
+
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static void ucma_copy_conn_param(struct rdma_conn_param *dst,
+ struct rdma_ucm_conn_param *src)
+{
+ dst->private_data = src->private_data;
+ dst->private_data_len = src->private_data_len;
+ dst->responder_resources =src->responder_resources;
+ dst->initiator_depth = src->initiator_depth;
+ dst->flow_control = src->flow_control;
+ dst->retry_count = src->retry_count;
+ dst->rnr_retry_count = src->rnr_retry_count;
+ dst->srq = src->srq;
+ dst->qp_num = src->qp_num;
+}
+
+static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_connect cmd;
+ struct rdma_conn_param conn_param;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ if (!cmd.conn_param.valid)
+ return -EINVAL;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ucma_copy_conn_param(&conn_param, &cmd.conn_param);
+ ret = rdma_connect(ctx->cm_id, &conn_param);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_listen cmd;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ?
+ cmd.backlog : UCMA_MAX_BACKLOG;
+ ret = rdma_listen(ctx->cm_id, ctx->backlog);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_accept cmd;
+ struct rdma_conn_param conn_param;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ if (cmd.conn_param.valid) {
+ ctx->uid = cmd.uid;
+ ucma_copy_conn_param(&conn_param, &cmd.conn_param);
+ ret = rdma_accept(ctx->cm_id, &conn_param);
+ } else
+ ret = rdma_accept(ctx->cm_id, NULL);
+
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_reject cmd;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_disconnect cmd;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = rdma_disconnect(ctx->cm_id);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t ucma_init_qp_attr(struct ucma_file *file,
+ const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_init_qp_attr cmd;
+ struct ib_uverbs_qp_attr resp;
+ struct ucma_context *ctx;
+ struct ib_qp_attr qp_attr;
+ int ret;
+
+ if (out_len < sizeof(resp))
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ resp.qp_attr_mask = 0;
+ memset(&qp_attr, 0, sizeof qp_attr);
+ qp_attr.qp_state = cmd.qp_state;
+ ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
+ if (ret)
+ goto out;
+
+ ib_copy_qp_attr_to_user(&resp, &qp_attr);
+ if (copy_to_user((void __user *)(unsigned long)cmd.response,
+ &resp, sizeof(resp)))
+ ret = -EFAULT;
+
+out:
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_notify cmd;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
+ const char __user *inbuf,
+ int in_len, int out_len) = {
+ [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
+ [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
+ [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
+ [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
+ [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
+ [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
+ [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
+ [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
+ [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
+ [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
+ [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
+ [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
+ [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
+ [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
+ [RDMA_USER_CM_CMD_SET_OPTION] = NULL,
+ [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
+};
+
+static ssize_t ucma_write(struct file *filp, const char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct ucma_file *file = filp->private_data;
+ struct rdma_ucm_cmd_hdr hdr;
+ ssize_t ret;
+
+ if (len < sizeof(hdr))
+ return -EINVAL;
+
+ if (copy_from_user(&hdr, buf, sizeof(hdr)))
+ return -EFAULT;
+
+ if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
+ return -EINVAL;
+
+ if (hdr.in + sizeof(hdr) > len)
+ return -EINVAL;
+
+ if (!ucma_cmd_table[hdr.cmd])
+ return -ENOSYS;
+
+ ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
+ if (!ret)
+ ret = len;
+
+ return ret;
+}
+
+static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ struct ucma_file *file = filp->private_data;
+ unsigned int mask = 0;
+
+ poll_wait(filp, &file->poll_wait, wait);
+
+ if (!list_empty(&file->event_list))
+ mask = POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+static int ucma_open(struct inode *inode, struct file *filp)
+{
+ struct ucma_file *file;
+
+ file = kmalloc(sizeof *file, GFP_KERNEL);
+ if (!file)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&file->event_list);
+ INIT_LIST_HEAD(&file->ctx_list);
+ init_waitqueue_head(&file->poll_wait);
+ mutex_init(&file->mut);
+
+ filp->private_data = file;
+ file->filp = filp;
+ return 0;
+}
+
+static int ucma_close(struct inode *inode, struct file *filp)
+{
+ struct ucma_file *file = filp->private_data;
+ struct ucma_context *ctx, *tmp;
+
+ mutex_lock(&file->mut);
+ list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
+ mutex_unlock(&file->mut);
+
+ mutex_lock(&mut);
+ idr_remove(&ctx_idr, ctx->id);
+ mutex_unlock(&mut);
+
+ ucma_free_ctx(ctx);
+ mutex_lock(&file->mut);
+ }
+ mutex_unlock(&file->mut);
+ kfree(file);
+ return 0;
+}
+
+static struct file_operations ucma_fops = {
+ .owner = THIS_MODULE,
+ .open = ucma_open,
+ .release = ucma_close,
+ .write = ucma_write,
+ .poll = ucma_poll,
+};
+
+static struct miscdevice ucma_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "rdma_cm",
+ .fops = &ucma_fops,
+};
+
+static ssize_t show_abi_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
+}
+static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
+
+static int __init ucma_init(void)
+{
+ int ret;
+
+ ret = misc_register(&ucma_misc);
+ if (ret)
+ return ret;
+
+ ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
+ if (ret) {
+ printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
+ goto err;
+ }
+ return 0;
+err:
+ misc_deregister(&ucma_misc);
+ return ret;
+}
+
+static void __exit ucma_cleanup(void)
+{
+ device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
+ misc_deregister(&ucma_misc);
+ idr_destroy(&ctx_idr);
+}
+
+module_init(ucma_init);
+module_exit(ucma_cleanup);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 4e16314e8e6d..a617ca7b6923 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -534,9 +534,9 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
* module reference.
*/
filp->f_op = fops_get(&uverbs_event_fops);
- filp->f_vfsmnt = mntget(uverbs_event_mnt);
- filp->f_dentry = dget(uverbs_event_mnt->mnt_root);
- filp->f_mapping = filp->f_dentry->d_inode->i_mapping;
+ filp->f_path.mnt = mntget(uverbs_event_mnt);
+ filp->f_path.dentry = dget(uverbs_event_mnt->mnt_root);
+ filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
filp->f_flags = O_RDONLY;
filp->f_mode = FMODE_READ;
filp->private_data = ev_file;
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index ce46b13ae02b..5440da0e59b4 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -32,8 +32,8 @@
#include <rdma/ib_marshall.h>
-static void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
- struct ib_ah_attr *src)
+void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
+ struct ib_ah_attr *src)
{
memcpy(dst->grh.dgid, src->grh.dgid.raw, sizeof src->grh.dgid);
dst->grh.flow_label = src->grh.flow_label;
@@ -47,6 +47,7 @@ static void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
dst->port_num = src->port_num;
}
+EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
struct ib_qp_attr *src)
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index efe147dbeb42..c95fe952abd5 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -52,8 +52,8 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
int i;
list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
- dma_unmap_sg(dev->dma_device, chunk->page_list,
- chunk->nents, DMA_BIDIRECTIONAL);
+ ib_dma_unmap_sg(dev, chunk->page_list,
+ chunk->nents, DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->nents; ++i) {
if (umem->writable && dirty)
set_page_dirty_lock(chunk->page_list[i].page);
@@ -136,10 +136,10 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
chunk->page_list[i].length = PAGE_SIZE;
}
- chunk->nmap = dma_map_sg(dev->dma_device,
- &chunk->page_list[0],
- chunk->nents,
- DMA_BIDIRECTIONAL);
+ chunk->nmap = ib_dma_map_sg(dev,
+ &chunk->page_list[0],
+ chunk->nents,
+ DMA_BIDIRECTIONAL);
if (chunk->nmap <= 0) {
for (i = 0; i < chunk->nents; ++i)
put_page(chunk->page_list[i].page);
@@ -179,9 +179,10 @@ void ib_umem_release(struct ib_device *dev, struct ib_umem *umem)
up_write(&current->mm->mmap_sem);
}
-static void ib_umem_account(void *work_ptr)
+static void ib_umem_account(struct work_struct *_work)
{
- struct ib_umem_account_work *work = work_ptr;
+ struct ib_umem_account_work *work =
+ container_of(_work, struct ib_umem_account_work, work);
down_write(&work->mm->mmap_sem);
work->mm->locked_vm -= work->diff;
@@ -216,7 +217,7 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem)
return;
}
- INIT_WORK(&work->work, ib_umem_account, work);
+ INIT_WORK(&work->work, ib_umem_account);
work->mm = mm;
work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index 1b17dcdd0505..04a9db5de881 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -302,7 +302,7 @@ struct c2_dev {
unsigned long pa; /* PA device memory */
void **qptr_array;
- kmem_cache_t *host_msg_cache;
+ struct kmem_cache *host_msg_cache;
struct list_head cca_link; /* adapter list */
struct list_head eh_wakeup_list; /* event wakeup list */
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 5bcf697aa335..420c1380f5c3 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -161,8 +161,10 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
if (attr_mask & IB_QP_STATE) {
/* Ensure the state is valid */
- if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR)
- return -EINVAL;
+ if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) {
+ err = -EINVAL;
+ goto bail0;
+ }
wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state));
@@ -184,9 +186,10 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
if (attr->cur_qp_state != IB_QPS_RTR &&
attr->cur_qp_state != IB_QPS_RTS &&
attr->cur_qp_state != IB_QPS_SQD &&
- attr->cur_qp_state != IB_QPS_SQE)
- return -EINVAL;
- else
+ attr->cur_qp_state != IB_QPS_SQE) {
+ err = -EINVAL;
+ goto bail0;
+ } else
wr.next_qp_state =
cpu_to_be32(to_c2_state(attr->cur_qp_state));
@@ -564,6 +567,32 @@ int c2_alloc_qp(struct c2_dev *c2dev,
return err;
}
+static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
+{
+ if (send_cq == recv_cq)
+ spin_lock_irq(&send_cq->lock);
+ else if (send_cq > recv_cq) {
+ spin_lock_irq(&send_cq->lock);
+ spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
+ } else {
+ spin_lock_irq(&recv_cq->lock);
+ spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
+ }
+}
+
+static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
+{
+ if (send_cq == recv_cq)
+ spin_unlock_irq(&send_cq->lock);
+ else if (send_cq > recv_cq) {
+ spin_unlock(&recv_cq->lock);
+ spin_unlock_irq(&send_cq->lock);
+ } else {
+ spin_unlock(&send_cq->lock);
+ spin_unlock_irq(&recv_cq->lock);
+ }
+}
+
void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
{
struct c2_cq *send_cq;
@@ -576,15 +605,9 @@ void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
* Lock CQs here, so that CQ polling code can do QP lookup
* without taking a lock.
*/
- spin_lock_irq(&send_cq->lock);
- if (send_cq != recv_cq)
- spin_lock(&recv_cq->lock);
-
+ c2_lock_cqs(send_cq, recv_cq);
c2_free_qpn(c2dev, qp->qpn);
-
- if (send_cq != recv_cq)
- spin_unlock(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ c2_unlock_cqs(send_cq, recv_cq);
/*
* Destory qp in the rnic...
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 623dc95f91df..1687c511cb2f 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -441,7 +441,7 @@ static int c2_rnic_close(struct c2_dev *c2dev)
* involves initalizing the various limits and resouce pools that
* comprise the RNIC instance.
*/
-int c2_rnic_init(struct c2_dev *c2dev)
+int __devinit c2_rnic_init(struct c2_dev *c2dev)
{
int err;
u32 qsize, msgsize;
@@ -611,7 +611,7 @@ int c2_rnic_init(struct c2_dev *c2dev)
/*
* Called by c2_remove to cleanup the RNIC resources.
*/
-void c2_rnic_term(struct c2_dev *c2dev)
+void __devexit c2_rnic_term(struct c2_dev *c2dev)
{
/* Close the open adapter instance */
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/infiniband/hw/amso1100/c2_vq.c
index 40caeb5f41b4..36620a22413c 100644
--- a/drivers/infiniband/hw/amso1100/c2_vq.c
+++ b/drivers/infiniband/hw/amso1100/c2_vq.c
@@ -164,7 +164,7 @@ void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r)
*/
void *vq_repbuf_alloc(struct c2_dev *c2dev)
{
- return kmem_cache_alloc(c2dev->host_msg_cache, SLAB_ATOMIC);
+ return kmem_cache_alloc(c2dev->host_msg_cache, GFP_ATOMIC);
}
/*
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
index 214e2fdddeef..0d6e2c4bb245 100644
--- a/drivers/infiniband/hw/ehca/ehca_av.c
+++ b/drivers/infiniband/hw/ehca/ehca_av.c
@@ -57,7 +57,7 @@ struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
ib_device);
- av = kmem_cache_alloc(av_cache, SLAB_KERNEL);
+ av = kmem_cache_alloc(av_cache, GFP_KERNEL);
if (!av) {
ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
pd, ah_attr);
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 458fe19648a1..93995b658d94 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -134,7 +134,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
return ERR_PTR(-EINVAL);
- my_cq = kmem_cache_alloc(cq_cache, SLAB_KERNEL);
+ my_cq = kmem_cache_alloc(cq_cache, GFP_KERNEL);
if (!my_cq) {
ehca_err(device, "Out of memory for ehca_cq struct device=%p",
device);
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index e1b618c5f685..b7be950ab47c 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -50,7 +50,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
ib_device);
struct hipz_query_hca *rblock;
- rblock = ehca_alloc_fw_ctrlblock();
+ rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM;
@@ -110,7 +110,7 @@ int ehca_query_port(struct ib_device *ibdev,
ib_device);
struct hipz_query_port *rblock;
- rblock = ehca_alloc_fw_ctrlblock();
+ rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM;
@@ -179,7 +179,7 @@ int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
return -EINVAL;
}
- rblock = ehca_alloc_fw_ctrlblock();
+ rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM;
@@ -212,7 +212,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
return -EINVAL;
}
- rblock = ehca_alloc_fw_ctrlblock();
+ rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM;
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index c3ea746e9045..e7209afb4250 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -138,7 +138,7 @@ int ehca_error_data(struct ehca_shca *shca, void *data,
u64 *rblock;
unsigned long block_count;
- rblock = ehca_alloc_fw_ctrlblock();
+ rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
if (!rblock) {
ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index 3720e3032cce..cd7789f0d08e 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -180,10 +180,10 @@ int ehca_mmap_register(u64 physical,void **mapped,
int ehca_munmap(unsigned long addr, size_t len);
#ifdef CONFIG_PPC_64K_PAGES
-void *ehca_alloc_fw_ctrlblock(void);
+void *ehca_alloc_fw_ctrlblock(gfp_t flags);
void ehca_free_fw_ctrlblock(void *ptr);
#else
-#define ehca_alloc_fw_ctrlblock() ((void *) get_zeroed_page(GFP_KERNEL))
+#define ehca_alloc_fw_ctrlblock(flags) ((void *) get_zeroed_page(flags))
#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 01f5aa9cb56d..6574fbbaead5 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
-MODULE_VERSION("SVNEHCA_0018");
+MODULE_VERSION("SVNEHCA_0019");
int ehca_open_aqp1 = 0;
int ehca_debug_level = 0;
@@ -106,9 +106,9 @@ static struct timer_list poll_eqs_timer;
#ifdef CONFIG_PPC_64K_PAGES
static struct kmem_cache *ctblk_cache = NULL;
-void *ehca_alloc_fw_ctrlblock(void)
+void *ehca_alloc_fw_ctrlblock(gfp_t flags)
{
- void *ret = kmem_cache_zalloc(ctblk_cache, SLAB_KERNEL);
+ void *ret = kmem_cache_zalloc(ctblk_cache, flags);
if (!ret)
ehca_gen_err("Out of memory for ctblk");
return ret;
@@ -206,7 +206,7 @@ int ehca_sense_attributes(struct ehca_shca *shca)
u64 h_ret;
struct hipz_query_hca *rblock;
- rblock = ehca_alloc_fw_ctrlblock();
+ rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) {
ehca_gen_err("Cannot allocate rblock memory.");
return -ENOMEM;
@@ -258,7 +258,7 @@ static int init_node_guid(struct ehca_shca *shca)
int ret = 0;
struct hipz_query_hca *rblock;
- rblock = ehca_alloc_fw_ctrlblock();
+ rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM;
@@ -469,7 +469,7 @@ static ssize_t ehca_show_##name(struct device *dev, \
\
shca = dev->driver_data; \
\
- rblock = ehca_alloc_fw_ctrlblock(); \
+ rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \
if (!rblock) { \
dev_err(dev, "Can't allocate rblock memory."); \
return 0; \
@@ -790,7 +790,7 @@ int __init ehca_module_init(void)
int ret;
printk(KERN_INFO "eHCA Infiniband Device Driver "
- "(Rel.: SVNEHCA_0018)\n");
+ "(Rel.: SVNEHCA_0019)\n");
idr_init(&ehca_qp_idr);
idr_init(&ehca_cq_idr);
spin_lock_init(&ehca_qp_idr_lock);
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index abce676c0ae0..cfb362a1029c 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -53,7 +53,7 @@ static struct ehca_mr *ehca_mr_new(void)
{
struct ehca_mr *me;
- me = kmem_cache_alloc(mr_cache, SLAB_KERNEL);
+ me = kmem_cache_alloc(mr_cache, GFP_KERNEL);
if (me) {
memset(me, 0, sizeof(struct ehca_mr));
spin_lock_init(&me->mrlock);
@@ -72,7 +72,7 @@ static struct ehca_mw *ehca_mw_new(void)
{
struct ehca_mw *me;
- me = kmem_cache_alloc(mw_cache, SLAB_KERNEL);
+ me = kmem_cache_alloc(mw_cache, GFP_KERNEL);
if (me) {
memset(me, 0, sizeof(struct ehca_mw));
spin_lock_init(&me->mwlock);
@@ -1013,7 +1013,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
u32 i;
u64 *kpage;
- kpage = ehca_alloc_fw_ctrlblock();
+ kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!kpage) {
ehca_err(&shca->ib_device, "kpage alloc failed");
ret = -ENOMEM;
@@ -1124,7 +1124,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
ehca_mrmw_map_acl(acl, &hipz_acl);
ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
- kpage = ehca_alloc_fw_ctrlblock();
+ kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!kpage) {
ehca_err(&shca->ib_device, "kpage alloc failed");
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/ehca/ehca_pd.c b/drivers/infiniband/hw/ehca/ehca_pd.c
index 2c3cdc6f7b39..d5345e5b3cd6 100644
--- a/drivers/infiniband/hw/ehca/ehca_pd.c
+++ b/drivers/infiniband/hw/ehca/ehca_pd.c
@@ -50,7 +50,7 @@ struct ib_pd *ehca_alloc_pd(struct ib_device *device,
{
struct ehca_pd *pd;
- pd = kmem_cache_alloc(pd_cache, SLAB_KERNEL);
+ pd = kmem_cache_alloc(pd_cache, GFP_KERNEL);
if (!pd) {
ehca_err(device, "device=%p context=%p out of memory",
device, context);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index cf3e50ee2d06..34b85556d01e 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -450,7 +450,7 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
if (pd->uobject && udata)
context = pd->uobject->context;
- my_qp = kmem_cache_alloc(qp_cache, SLAB_KERNEL);
+ my_qp = kmem_cache_alloc(qp_cache, GFP_KERNEL);
if (!my_qp) {
ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
return ERR_PTR(-ENOMEM);
@@ -732,8 +732,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
u64 h_ret;
struct ipz_queue *squeue;
void *bad_send_wqe_p, *bad_send_wqe_v;
- void *squeue_start_p, *squeue_end_p;
- void *squeue_start_v, *squeue_end_v;
+ u64 q_ofs;
struct ehca_wqe *wqe;
int qp_num = my_qp->ib_qp.qp_num;
@@ -755,26 +754,23 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
if (ehca_debug_level)
ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
squeue = &my_qp->ipz_squeue;
- squeue_start_p = (void*)virt_to_abs(ipz_qeit_calc(squeue, 0L));
- squeue_end_p = squeue_start_p+squeue->queue_length;
- squeue_start_v = abs_to_virt((u64)squeue_start_p);
- squeue_end_v = abs_to_virt((u64)squeue_end_p);
- ehca_dbg(&shca->ib_device, "qp_num=%x squeue_start_v=%p squeue_end_v=%p",
- qp_num, squeue_start_v, squeue_end_v);
+ if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
+ ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x"
+ " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p);
+ return -EFAULT;
+ }
/* loop sets wqe's purge bit */
- wqe = (struct ehca_wqe*)bad_send_wqe_v;
+ wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs);
*bad_wqe_cnt = 0;
while (wqe->optype != 0xff && wqe->wqef != 0xff) {
if (ehca_debug_level)
ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
wqe->nr_of_data_seg = 0; /* suppress data access */
wqe->wqef = WQEF_PURGE; /* WQE to be purged */
- wqe = (struct ehca_wqe*)((u8*)wqe+squeue->qe_size);
+ q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
+ wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs);
*bad_wqe_cnt = (*bad_wqe_cnt)+1;
- if ((void*)wqe >= squeue_end_v) {
- wqe = squeue_start_v;
- }
}
/*
* bad wqe will be reprocessed and ignored when pol_cq() is called,
@@ -811,7 +807,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
unsigned long spl_flags = 0;
/* do query_qp to obtain current attr values */
- mqpcb = ehca_alloc_fw_ctrlblock();
+ mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!mqpcb) {
ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
"ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
@@ -1277,7 +1273,7 @@ int ehca_query_qp(struct ib_qp *qp,
return -EINVAL;
}
- qpcb = ehca_alloc_fw_ctrlblock();
+ qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!qpcb) {
ehca_err(qp->device,"Out of memory for qpcb "
"ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
index e028ff1588cc..bf7a40088f61 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
@@ -70,6 +70,19 @@ void *ipz_qeit_eq_get_inc(struct ipz_queue *queue)
return ret;
}
+int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset)
+{
+ int i;
+ for (i = 0; i < queue->queue_length / queue->pagesize; i++) {
+ u64 page = (u64)virt_to_abs(queue->queue_pages[i]);
+ if (addr >= page && addr < page + queue->pagesize) {
+ *q_offset = addr - page + i * queue->pagesize;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
int ipz_queue_ctor(struct ipz_queue *queue,
const u32 nr_of_pages,
const u32 pagesize, const u32 qe_size, const u32 nr_of_sg)
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
index 2f13509d5257..dc3bda2634b7 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
@@ -150,6 +150,21 @@ static inline void *ipz_qeit_reset(struct ipz_queue *queue)
return ipz_qeit_get(queue);
}
+/*
+ * return the q_offset corresponding to an absolute address
+ */
+int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset);
+
+/*
+ * return the next queue offset. don't modify the queue.
+ */
+static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset)
+{
+ offset += queue->qe_size;
+ if (offset >= queue->queue_length) offset = 0;
+ return offset;
+}
+
/* struct generic page table */
struct ipz_pt {
u64 entries[EHCA_PT_ENTRIES];
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index 7dc10551cf18..ec2e603ea241 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
ib_ipath-y := \
ipath_cq.o \
ipath_diag.o \
+ ipath_dma.o \
ipath_driver.o \
ipath_eeprom.o \
ipath_file_ops.o \
diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c
new file mode 100644
index 000000000000..6e0f2b8918ce
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_dma.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2006 QLogic, Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_verbs.h>
+
+#include "ipath_verbs.h"
+
+#define BAD_DMA_ADDRESS ((u64) 0)
+
+/*
+ * The following functions implement driver specific replacements
+ * for the ib_dma_*() functions.
+ *
+ * These functions return kernel virtual addresses instead of
+ * device bus addresses since the driver uses the CPU to copy
+ * data instead of using hardware DMA.
+ */
+
+static int ipath_mapping_error(struct ib_device *dev, u64 dma_addr)
+{
+ return dma_addr == BAD_DMA_ADDRESS;
+}
+
+static u64 ipath_dma_map_single(struct ib_device *dev,
+ void *cpu_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ return (u64) cpu_addr;
+}
+
+static void ipath_dma_unmap_single(struct ib_device *dev,
+ u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+}
+
+static u64 ipath_dma_map_page(struct ib_device *dev,
+ struct page *page,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ u64 addr;
+
+ BUG_ON(!valid_dma_direction(direction));
+
+ if (offset + size > PAGE_SIZE) {
+ addr = BAD_DMA_ADDRESS;
+ goto done;
+ }
+
+ addr = (u64) page_address(page);
+ if (addr)
+ addr += offset;
+ /* TODO: handle highmem pages */
+
+done:
+ return addr;
+}
+
+static void ipath_dma_unmap_page(struct ib_device *dev,
+ u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+}
+
+int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ u64 addr;
+ int i;
+ int ret = nents;
+
+ BUG_ON(!valid_dma_direction(direction));
+
+ for (i = 0; i < nents; i++) {
+ addr = (u64) page_address(sg[i].page);
+ /* TODO: handle highmem pages */
+ if (!addr) {
+ ret = 0;
+ break;
+ }
+ }
+ return ret;
+}
+
+static void ipath_unmap_sg(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+}
+
+static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
+{
+ u64 addr = (u64) page_address(sg->page);
+
+ if (addr)
+ addr += sg->offset;
+ return addr;
+}
+
+static unsigned int ipath_sg_dma_len(struct ib_device *dev,
+ struct scatterlist *sg)
+{
+ return sg->length;
+}
+
+static void ipath_sync_single_for_cpu(struct ib_device *dev,
+ u64 addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+}
+
+static void ipath_sync_single_for_device(struct ib_device *dev,
+ u64 addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+}
+
+static void *ipath_dma_alloc_coherent(struct ib_device *dev, size_t size,
+ u64 *dma_handle, gfp_t flag)
+{
+ struct page *p;
+ void *addr = NULL;
+
+ p = alloc_pages(flag, get_order(size));
+ if (p)
+ addr = page_address(p);
+ if (dma_handle)
+ *dma_handle = (u64) addr;
+ return addr;
+}
+
+static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ free_pages((unsigned long) cpu_addr, get_order(size));
+}
+
+struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
+ ipath_mapping_error,
+ ipath_dma_map_single,
+ ipath_dma_unmap_single,
+ ipath_dma_map_page,
+ ipath_dma_unmap_page,
+ ipath_map_sg,
+ ipath_unmap_sg,
+ ipath_sg_dma_address,
+ ipath_sg_dma_len,
+ ipath_sync_single_for_cpu,
+ ipath_sync_single_for_device,
+ ipath_dma_alloc_coherent,
+ ipath_dma_free_coherent
+};
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 1aeddb48e355..ae7f21a0cdc0 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -1825,8 +1825,6 @@ void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
*/
void ipath_shutdown_device(struct ipath_devdata *dd)
{
- u64 val;
-
ipath_dbg("Shutting down the device\n");
dd->ipath_flags |= IPATH_LINKUNK;
@@ -1849,7 +1847,7 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
*/
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL);
/* flush it */
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
/*
* enough for anything that's going to trickle out to have actually
* done so.
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index a9ddc6911f66..b932bcb67a5e 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -699,7 +699,6 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
int start_stop)
{
struct ipath_devdata *dd = pd->port_dd;
- u64 tval;
ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n",
start_stop ? "en" : "dis", dd->ipath_unit,
@@ -729,7 +728,7 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl);
/* now be sure chip saw it before we return */
- tval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
if (start_stop) {
/*
* And try to be sure that tail reg update has happened too.
@@ -738,7 +737,7 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
* in memory copy, since we could overwrite an update by the
* chip if we did.
*/
- tval = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
+ ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
}
/* always; new head should be equal to new tail; see above */
bail:
@@ -1745,9 +1744,9 @@ static int ipath_assign_port(struct file *fp,
goto done;
}
- i_minor = iminor(fp->f_dentry->d_inode) - IPATH_USER_MINOR_BASE;
+ i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE;
ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
- (long)fp->f_dentry->d_inode->i_rdev, i_minor);
+ (long)fp->f_path.dentry->d_inode->i_rdev, i_minor);
if (i_minor)
ret = find_free_port(i_minor - 1, fp, uinfo);
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index d9ff283f725e..79a60f020a21 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -118,7 +118,7 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
u16 i;
struct ipath_devdata *dd;
- dd = file->f_dentry->d_inode->i_private;
+ dd = file->f_path.dentry->d_inode->i_private;
for (i = 0; i < NUM_COUNTERS; i++)
counters[i] = ipath_snap_cntr(dd, i);
@@ -138,7 +138,7 @@ static ssize_t atomic_node_info_read(struct file *file, char __user *buf,
struct ipath_devdata *dd;
u64 guid;
- dd = file->f_dentry->d_inode->i_private;
+ dd = file->f_path.dentry->d_inode->i_private;
guid = be64_to_cpu(dd->ipath_guid);
@@ -177,7 +177,7 @@ static ssize_t atomic_port_info_read(struct file *file, char __user *buf,
u32 tmp, tmp2;
struct ipath_devdata *dd;
- dd = file->f_dentry->d_inode->i_private;
+ dd = file->f_path.dentry->d_inode->i_private;
/* so we only initialize non-zero fields. */
memset(portinfo, 0, sizeof portinfo);
@@ -324,7 +324,7 @@ static ssize_t flash_read(struct file *file, char __user *buf,
goto bail;
}
- dd = file->f_dentry->d_inode->i_private;
+ dd = file->f_path.dentry->d_inode->i_private;
if (ipath_eeprom_read(dd, pos, tmp, count)) {
ipath_dev_err(dd, "failed to read from flash\n");
ret = -ENXIO;
@@ -377,7 +377,7 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
goto bail_tmp;
}
- dd = file->f_dentry->d_inode->i_private;
+ dd = file->f_path.dentry->d_inode->i_private;
if (ipath_eeprom_write(dd, pos, tmp, count)) {
ret = -ENXIO;
ipath_dev_err(dd, "failed to write to flash\n");
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index e57c7a351cb5..7468477ba837 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -1447,7 +1447,7 @@ static void ipath_ht_tidtemplate(struct ipath_devdata *dd)
static int ipath_ht_early_init(struct ipath_devdata *dd)
{
u32 __iomem *piobuf;
- u32 pioincr, val32, egrsize;
+ u32 pioincr, val32;
int i;
/*
@@ -1467,7 +1467,6 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
* errors interrupts if we ever see one).
*/
dd->ipath_rcvegrbufsize = dd->ipath_piosize2k;
- egrsize = dd->ipath_rcvegrbufsize;
/*
* the min() check here is currently a nop, but it may not
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
index 6af89683f710..ae8bf9950c6d 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
@@ -602,7 +602,7 @@ static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
*/
static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
{
- u64 val, tmp, config1, prev_val;
+ u64 val, config1, prev_val;
int ret = 0;
ipath_dbg("Trying to bringup serdes\n");
@@ -633,7 +633,7 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
| INFINIPATH_SERDC0_L1PWR_DN;
ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
/* be sure chip saw it */
- tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
udelay(5); /* need pll reset set at least for a bit */
/*
* after PLL is reset, set the per-lane Resets and TxIdle and
@@ -647,7 +647,7 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
"and txidle (%llx)\n", (unsigned long long) val);
ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
/* be sure chip saw it */
- tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
/* need PLL reset clear for at least 11 usec before lane
* resets cleared; give it a few more to be sure */
udelay(15);
@@ -851,12 +851,12 @@ static int ipath_setup_pe_config(struct ipath_devdata *dd,
int pos, ret;
dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
- dd->ipath_irq = pdev->irq;
ret = pci_enable_msi(dd->pcidev);
if (ret)
ipath_dev_err(dd, "pci_enable_msi failed: %d, "
"interrupts may not work\n", ret);
/* continue even if it fails, we may still be OK... */
+ dd->ipath_irq = pdev->irq;
if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
u16 control;
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index d819cca524cd..d4f6b5239ef8 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -347,10 +347,9 @@ done:
static int init_chip_reset(struct ipath_devdata *dd,
struct ipath_portdata **pdp)
{
- struct ipath_portdata *pd;
u32 rtmp;
- *pdp = pd = dd->ipath_pd[0];
+ *pdp = dd->ipath_pd[0];
/* ensure chip does no sends or receives while we re-initialize */
dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0);
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 5652a550d442..72b9e279d19d 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -598,10 +598,9 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
* on close
*/
if (errs & INFINIPATH_E_RRCVHDRFULL) {
- int any;
u32 hd, tl;
ipath_stats.sps_hdrqfull++;
- for (any = i = 0; i < dd->ipath_cfgports; i++) {
+ for (i = 0; i < dd->ipath_cfgports; i++) {
struct ipath_portdata *pd = dd->ipath_pd[i];
if (i == 0) {
hd = dd->ipath_port0head;
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
index 9a6cbd05adcd..851763d7d2db 100644
--- a/drivers/infiniband/hw/ipath/ipath_keys.c
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -134,7 +134,7 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
*/
if (sge->lkey == 0) {
isge->mr = NULL;
- isge->vaddr = bus_to_virt(sge->addr);
+ isge->vaddr = (void *) sge->addr;
isge->length = sge->length;
isge->sge_length = sge->length;
ret = 1;
@@ -202,12 +202,12 @@ int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
int ret;
/*
- * We use RKEY == zero for physical addresses
- * (see ipath_get_dma_mr).
+ * We use RKEY == zero for kernel virtual addresses
+ * (see ipath_get_dma_mr and ipath_dma.c).
*/
if (rkey == 0) {
sge->mr = NULL;
- sge->vaddr = phys_to_virt(vaddr);
+ sge->vaddr = (void *) vaddr;
sge->length = len;
sge->sge_length = len;
ss->sg_list = NULL;
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index a0673c1eef71..8cc8598d6c69 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -54,6 +54,8 @@ static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
* @acc: access flags
*
* Returns the memory region on success, otherwise returns an errno.
+ * Note that all DMA addresses should be created via the
+ * struct ib_dma_mapping_ops functions (see ipath_dma.c).
*/
struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
{
@@ -149,8 +151,7 @@ struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
m = 0;
n = 0;
for (i = 0; i < num_phys_buf; i++) {
- mr->mr.map[m]->segs[n].vaddr =
- phys_to_virt(buffer_list[i].addr);
+ mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
mr->mr.map[m]->segs[n].length = buffer_list[i].size;
mr->mr.length += buffer_list[i].size;
n++;
@@ -347,7 +348,7 @@ int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
n = 0;
ps = 1 << fmr->page_shift;
for (i = 0; i < list_len; i++) {
- fmr->mr.map[m]->segs[n].vaddr = phys_to_virt(page_list[i]);
+ fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
fmr->mr.map[m]->segs[n].length = ps;
if (++n == IPATH_SEGSZ) {
m++;
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index 182de34f9f47..ffa6318ad0cc 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -215,7 +215,6 @@ static ssize_t store_mlid(struct device *dev,
size_t count)
{
struct ipath_devdata *dd = dev_get_drvdata(dev);
- int unit;
u16 mlid;
int ret;
@@ -223,8 +222,6 @@ static ssize_t store_mlid(struct device *dev,
if (ret < 0 || mlid < IPATH_MULTICAST_LID_BASE)
goto invalid;
- unit = dd->ipath_unit;
-
dd->ipath_mlid = mlid;
goto bail;
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 413754b1d8a2..8536aeb96af8 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -214,9 +214,10 @@ struct ipath_user_pages_work {
unsigned long num_pages;
};
-static void user_pages_account(void *ptr)
+static void user_pages_account(struct work_struct *_work)
{
- struct ipath_user_pages_work *work = ptr;
+ struct ipath_user_pages_work *work =
+ container_of(_work, struct ipath_user_pages_work, work);
down_write(&work->mm->mmap_sem);
work->mm->locked_vm -= work->num_pages;
@@ -242,7 +243,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
goto bail;
- INIT_WORK(&work->work, user_pages_account, work);
+ INIT_WORK(&work->work, user_pages_account);
work->mm = mm;
work->num_pages = num_pages;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index a5456108dbad..2aaacdb7e52a 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1487,7 +1487,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
- idev->pma_counter_select[5] = IB_PMA_PORT_XMIT_WAIT;
+ idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
idev->link_width_enabled = 3; /* 1x or 4x */
/* Snapshot current HW counters to "clear" them. */
@@ -1599,6 +1599,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
dev->detach_mcast = ipath_multicast_detach;
dev->process_mad = ipath_process_mad;
dev->mmap = ipath_mmap;
+ dev->dma_ops = &ipath_dma_mapping_ops;
snprintf(dev->node_desc, sizeof(dev->node_desc),
IPATH_IDSTR " %s", init_utsname()->nodename);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 8039f6e5f0c8..c0c8d5b24a7d 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -812,4 +812,6 @@ extern unsigned int ib_ipath_max_srq_wrs;
extern const u32 ib_ipath_rnr_table[];
+extern struct ib_dma_mapping_ops ipath_dma_mapping_ops;
+
#endif /* IPATH_VERBS_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index 69599455aca2..27caf3b0648a 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -33,7 +33,6 @@
* $Id: mthca_av.c 1349 2004-12-16 21:09:43Z roland $
*/
-#include <linux/init.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -190,7 +189,7 @@ int mthca_create_ah(struct mthca_dev *dev,
on_hca_fail:
if (ah->type == MTHCA_AH_PCI_POOL) {
ah->av = pci_pool_alloc(dev->av_table.pool,
- SLAB_ATOMIC, &ah->avdma);
+ GFP_ATOMIC, &ah->avdma);
if (!ah->av)
return -ENOMEM;
@@ -323,7 +322,7 @@ int mthca_ah_query(struct ib_ah *ibah, struct ib_ah_attr *attr)
return 0;
}
-int __devinit mthca_init_av_table(struct mthca_dev *dev)
+int mthca_init_av_table(struct mthca_dev *dev)
{
int err;
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index cd044ea2dfa4..e948158a28d9 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -57,7 +57,7 @@ static int catas_reset_disable;
module_param_named(catas_reset_disable, catas_reset_disable, int, 0644);
MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero");
-static void catas_reset(void *work_ptr)
+static void catas_reset(struct work_struct *work)
{
struct mthca_dev *dev, *tmpdev;
LIST_HEAD(tlist);
@@ -203,7 +203,7 @@ void mthca_stop_catas_poll(struct mthca_dev *dev)
int __init mthca_catas_init(void)
{
- INIT_WORK(&catas_work, catas_reset, NULL);
+ INIT_WORK(&catas_work, catas_reset);
catas_wq = create_singlethread_workqueue("mthca_catas");
if (!catas_wq)
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 149b36901239..1159c8a0f2c5 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -36,7 +36,6 @@
* $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $
*/
-#include <linux/init.h>
#include <linux/hardirq.h>
#include <asm/io.h>
@@ -55,6 +54,10 @@ enum {
MTHCA_CQ_ENTRY_SIZE = 0x20
};
+enum {
+ MTHCA_ATOMIC_BYTE_LEN = 8
+};
+
/*
* Must be packed because start is 64 bits but only aligned to 32 bits.
*/
@@ -600,11 +603,11 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
break;
case MTHCA_OPCODE_ATOMIC_CS:
entry->opcode = IB_WC_COMP_SWAP;
- entry->byte_len = be32_to_cpu(cqe->byte_cnt);
+ entry->byte_len = MTHCA_ATOMIC_BYTE_LEN;
break;
case MTHCA_OPCODE_ATOMIC_FA:
entry->opcode = IB_WC_FETCH_ADD;
- entry->byte_len = be32_to_cpu(cqe->byte_cnt);
+ entry->byte_len = MTHCA_ATOMIC_BYTE_LEN;
break;
case MTHCA_OPCODE_BIND_MW:
entry->opcode = IB_WC_BIND_MW;
@@ -970,7 +973,7 @@ void mthca_free_cq(struct mthca_dev *dev,
mthca_free_mailbox(dev, mailbox);
}
-int __devinit mthca_init_cq_table(struct mthca_dev *dev)
+int mthca_init_cq_table(struct mthca_dev *dev)
{
int err;
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index e284e0613a94..8ec9fa1ff9ea 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -33,7 +33,6 @@
* $Id: mthca_eq.c 1382 2004-12-24 02:21:02Z roland $
*/
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
@@ -479,10 +478,10 @@ static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr)
return IRQ_HANDLED;
}
-static int __devinit mthca_create_eq(struct mthca_dev *dev,
- int nent,
- u8 intr,
- struct mthca_eq *eq)
+static int mthca_create_eq(struct mthca_dev *dev,
+ int nent,
+ u8 intr,
+ struct mthca_eq *eq)
{
int npages;
u64 *dma_list = NULL;
@@ -664,9 +663,9 @@ static void mthca_free_irqs(struct mthca_dev *dev)
dev->eq_table.eq + i);
}
-static int __devinit mthca_map_reg(struct mthca_dev *dev,
- unsigned long offset, unsigned long size,
- void __iomem **map)
+static int mthca_map_reg(struct mthca_dev *dev,
+ unsigned long offset, unsigned long size,
+ void __iomem **map)
{
unsigned long base = pci_resource_start(dev->pdev, 0);
@@ -691,7 +690,7 @@ static void mthca_unmap_reg(struct mthca_dev *dev, unsigned long offset,
iounmap(map);
}
-static int __devinit mthca_map_eq_regs(struct mthca_dev *dev)
+static int mthca_map_eq_regs(struct mthca_dev *dev)
{
if (mthca_is_memfree(dev)) {
/*
@@ -781,7 +780,7 @@ static void mthca_unmap_eq_regs(struct mthca_dev *dev)
}
}
-int __devinit mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
+int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
{
int ret;
u8 status;
@@ -825,7 +824,7 @@ void mthca_unmap_eq_icm(struct mthca_dev *dev)
__free_page(dev->eq_table.icm_page);
}
-int __devinit mthca_init_eq_table(struct mthca_dev *dev)
+int mthca_init_eq_table(struct mthca_dev *dev)
{
int err;
u8 status;
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 45e106f14807..acfa41d968ee 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -317,7 +317,7 @@ err:
return ret;
}
-void __devexit mthca_free_agents(struct mthca_dev *dev)
+void mthca_free_agents(struct mthca_dev *dev)
{
struct ib_mad_agent *agent;
int p, q;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 47ea02148368..44bc6cc734ab 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -80,25 +80,62 @@ static int tune_pci = 0;
module_param(tune_pci, int, 0444);
MODULE_PARM_DESC(tune_pci, "increase PCI burst from the default set by BIOS if nonzero");
-struct mutex mthca_device_mutex;
+DEFINE_MUTEX(mthca_device_mutex);
+
+#define MTHCA_DEFAULT_NUM_QP (1 << 16)
+#define MTHCA_DEFAULT_RDB_PER_QP (1 << 2)
+#define MTHCA_DEFAULT_NUM_CQ (1 << 16)
+#define MTHCA_DEFAULT_NUM_MCG (1 << 13)
+#define MTHCA_DEFAULT_NUM_MPT (1 << 17)
+#define MTHCA_DEFAULT_NUM_MTT (1 << 20)
+#define MTHCA_DEFAULT_NUM_UDAV (1 << 15)
+#define MTHCA_DEFAULT_NUM_RESERVED_MTTS (1 << 18)
+#define MTHCA_DEFAULT_NUM_UARC_SIZE (1 << 18)
+
+static struct mthca_profile hca_profile = {
+ .num_qp = MTHCA_DEFAULT_NUM_QP,
+ .rdb_per_qp = MTHCA_DEFAULT_RDB_PER_QP,
+ .num_cq = MTHCA_DEFAULT_NUM_CQ,
+ .num_mcg = MTHCA_DEFAULT_NUM_MCG,
+ .num_mpt = MTHCA_DEFAULT_NUM_MPT,
+ .num_mtt = MTHCA_DEFAULT_NUM_MTT,
+ .num_udav = MTHCA_DEFAULT_NUM_UDAV, /* Tavor only */
+ .fmr_reserved_mtts = MTHCA_DEFAULT_NUM_RESERVED_MTTS, /* Tavor only */
+ .uarc_size = MTHCA_DEFAULT_NUM_UARC_SIZE, /* Arbel only */
+};
+
+module_param_named(num_qp, hca_profile.num_qp, int, 0444);
+MODULE_PARM_DESC(num_qp, "maximum number of QPs per HCA");
+
+module_param_named(rdb_per_qp, hca_profile.rdb_per_qp, int, 0444);
+MODULE_PARM_DESC(rdb_per_qp, "number of RDB buffers per QP");
+
+module_param_named(num_cq, hca_profile.num_cq, int, 0444);
+MODULE_PARM_DESC(num_cq, "maximum number of CQs per HCA");
+
+module_param_named(num_mcg, hca_profile.num_mcg, int, 0444);
+MODULE_PARM_DESC(num_mcg, "maximum number of multicast groups per HCA");
+
+module_param_named(num_mpt, hca_profile.num_mpt, int, 0444);
+MODULE_PARM_DESC(num_mpt,
+ "maximum number of memory protection table entries per HCA");
+
+module_param_named(num_mtt, hca_profile.num_mtt, int, 0444);
+MODULE_PARM_DESC(num_mtt,
+ "maximum number of memory translation table segments per HCA");
+
+module_param_named(num_udav, hca_profile.num_udav, int, 0444);
+MODULE_PARM_DESC(num_udav, "maximum number of UD address vectors per HCA");
+
+module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444);
+MODULE_PARM_DESC(fmr_reserved_mtts,
+ "number of memory translation table segments reserved for FMR");
static const char mthca_version[] __devinitdata =
DRV_NAME ": Mellanox InfiniBand HCA driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
-static struct mthca_profile default_profile = {
- .num_qp = 1 << 16,
- .rdb_per_qp = 4,
- .num_cq = 1 << 16,
- .num_mcg = 1 << 13,
- .num_mpt = 1 << 17,
- .num_mtt = 1 << 20,
- .num_udav = 1 << 15, /* Tavor only */
- .fmr_reserved_mtts = 1 << 18, /* Tavor only */
- .uarc_size = 1 << 18, /* Arbel only */
-};
-
-static int __devinit mthca_tune_pci(struct mthca_dev *mdev)
+static int mthca_tune_pci(struct mthca_dev *mdev)
{
int cap;
u16 val;
@@ -143,7 +180,7 @@ static int __devinit mthca_tune_pci(struct mthca_dev *mdev)
return 0;
}
-static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
+static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
{
int err;
u8 status;
@@ -255,7 +292,7 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim
return 0;
}
-static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
+static int mthca_init_tavor(struct mthca_dev *mdev)
{
u8 status;
int err;
@@ -303,7 +340,7 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
goto err_disable;
}
- profile = default_profile;
+ profile = hca_profile;
profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
profile.uarc_size = 0;
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
@@ -333,7 +370,7 @@ err_disable:
return err;
}
-static int __devinit mthca_load_fw(struct mthca_dev *mdev)
+static int mthca_load_fw(struct mthca_dev *mdev)
{
u8 status;
int err;
@@ -379,10 +416,10 @@ err_free:
return err;
}
-static int __devinit mthca_init_icm(struct mthca_dev *mdev,
- struct mthca_dev_lim *dev_lim,
- struct mthca_init_hca_param *init_hca,
- u64 icm_size)
+static int mthca_init_icm(struct mthca_dev *mdev,
+ struct mthca_dev_lim *dev_lim,
+ struct mthca_init_hca_param *init_hca,
+ u64 icm_size)
{
u64 aux_pages;
u8 status;
@@ -575,7 +612,7 @@ static void mthca_free_icms(struct mthca_dev *mdev)
mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
}
-static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
+static int mthca_init_arbel(struct mthca_dev *mdev)
{
struct mthca_dev_lim dev_lim;
struct mthca_profile profile;
@@ -621,7 +658,7 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
goto err_stop_fw;
}
- profile = default_profile;
+ profile = hca_profile;
profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
profile.num_udav = 0;
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
@@ -683,7 +720,7 @@ static void mthca_close_hca(struct mthca_dev *mdev)
mthca_SYS_DIS(mdev, &status);
}
-static int __devinit mthca_init_hca(struct mthca_dev *mdev)
+static int mthca_init_hca(struct mthca_dev *mdev)
{
u8 status;
int err;
@@ -720,7 +757,7 @@ err_close:
return err;
}
-static int __devinit mthca_setup_hca(struct mthca_dev *dev)
+static int mthca_setup_hca(struct mthca_dev *dev)
{
int err;
u8 status;
@@ -875,8 +912,7 @@ err_uar_table_free:
return err;
}
-static int __devinit mthca_request_regions(struct pci_dev *pdev,
- int ddr_hidden)
+static int mthca_request_regions(struct pci_dev *pdev, int ddr_hidden)
{
int err;
@@ -928,7 +964,7 @@ static void mthca_release_regions(struct pci_dev *pdev,
MTHCA_HCR_SIZE);
}
-static int __devinit mthca_enable_msi_x(struct mthca_dev *mdev)
+static int mthca_enable_msi_x(struct mthca_dev *mdev)
{
struct msix_entry entries[3];
int err;
@@ -1213,7 +1249,7 @@ int __mthca_restart_one(struct pci_dev *pdev)
}
static int __devinit mthca_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+ const struct pci_device_id *id)
{
static int mthca_version_printed = 0;
int ret;
@@ -1279,11 +1315,55 @@ static struct pci_driver mthca_driver = {
.remove = __devexit_p(mthca_remove_one)
};
+static void __init __mthca_check_profile_val(const char *name, int *pval,
+ int pval_default)
+{
+ /* value must be positive and power of 2 */
+ int old_pval = *pval;
+
+ if (old_pval <= 0)
+ *pval = pval_default;
+ else
+ *pval = roundup_pow_of_two(old_pval);
+
+ if (old_pval != *pval) {
+ printk(KERN_WARNING PFX "Invalid value %d for %s in module parameter.\n",
+ old_pval, name);
+ printk(KERN_WARNING PFX "Corrected %s to %d.\n", name, *pval);
+ }
+}
+
+#define mthca_check_profile_val(name, default) \
+ __mthca_check_profile_val(#name, &hca_profile.name, default)
+
+static void __init mthca_validate_profile(void)
+{
+ mthca_check_profile_val(num_qp, MTHCA_DEFAULT_NUM_QP);
+ mthca_check_profile_val(rdb_per_qp, MTHCA_DEFAULT_RDB_PER_QP);
+ mthca_check_profile_val(num_cq, MTHCA_DEFAULT_NUM_CQ);
+ mthca_check_profile_val(num_mcg, MTHCA_DEFAULT_NUM_MCG);
+ mthca_check_profile_val(num_mpt, MTHCA_DEFAULT_NUM_MPT);
+ mthca_check_profile_val(num_mtt, MTHCA_DEFAULT_NUM_MTT);
+ mthca_check_profile_val(num_udav, MTHCA_DEFAULT_NUM_UDAV);
+ mthca_check_profile_val(fmr_reserved_mtts, MTHCA_DEFAULT_NUM_RESERVED_MTTS);
+
+ if (hca_profile.fmr_reserved_mtts >= hca_profile.num_mtt) {
+ printk(KERN_WARNING PFX "Invalid fmr_reserved_mtts module parameter %d.\n",
+ hca_profile.fmr_reserved_mtts);
+ printk(KERN_WARNING PFX "(Must be smaller than num_mtt %d)\n",
+ hca_profile.num_mtt);
+ hca_profile.fmr_reserved_mtts = hca_profile.num_mtt / 2;
+ printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n",
+ hca_profile.fmr_reserved_mtts);
+ }
+}
+
static int __init mthca_init(void)
{
int ret;
- mutex_init(&mthca_device_mutex);
+ mthca_validate_profile();
+
ret = mthca_catas_init();
if (ret)
return ret;
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c
index 47ca8a9b7247..a8ad072be074 100644
--- a/drivers/infiniband/hw/mthca/mthca_mcg.c
+++ b/drivers/infiniband/hw/mthca/mthca_mcg.c
@@ -32,7 +32,6 @@
* $Id: mthca_mcg.c 1349 2004-12-16 21:09:43Z roland $
*/
-#include <linux/init.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -371,7 +370,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
return err;
}
-int __devinit mthca_init_mcg_table(struct mthca_dev *dev)
+int mthca_init_mcg_table(struct mthca_dev *dev)
{
int err;
int table_size = dev->limits.num_mgms + dev->limits.num_amgms;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 15cc2f6eb475..6b19645d946c 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -232,7 +232,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj)
list_for_each_entry(chunk, &icm->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i) {
- if (chunk->mem[i].length >= offset) {
+ if (chunk->mem[i].length > offset) {
page = chunk->mem[i].page;
goto out;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index a486dec1707e..f71ffa88db3a 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -34,7 +34,6 @@
*/
#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include "mthca_dev.h"
@@ -135,7 +134,7 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
spin_unlock(&buddy->lock);
}
-static int __devinit mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
+static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
{
int i, s;
@@ -759,7 +758,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
*(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
}
-int __devinit mthca_init_mr_table(struct mthca_dev *dev)
+int mthca_init_mr_table(struct mthca_dev *dev)
{
unsigned long addr;
int err, i;
diff --git a/drivers/infiniband/hw/mthca/mthca_pd.c b/drivers/infiniband/hw/mthca/mthca_pd.c
index 59df51614c85..c1e950764bd8 100644
--- a/drivers/infiniband/hw/mthca/mthca_pd.c
+++ b/drivers/infiniband/hw/mthca/mthca_pd.c
@@ -34,7 +34,6 @@
* $Id: mthca_pd.c 1349 2004-12-16 21:09:43Z roland $
*/
-#include <linux/init.h>
#include <linux/errno.h>
#include "mthca_dev.h"
@@ -69,7 +68,7 @@ void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd)
mthca_free(&dev->pd_table.alloc, pd->pd_num);
}
-int __devinit mthca_init_pd_table(struct mthca_dev *dev)
+int mthca_init_pd_table(struct mthca_dev *dev)
{
return mthca_alloc_init(&dev->pd_table.alloc,
dev->limits.num_pds,
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index fc67f780581b..7b96751695ea 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -124,7 +124,7 @@ static int mthca_query_device(struct ib_device *ibdev,
props->max_map_per_fmr = 255;
else
props->max_map_per_fmr =
- (1 << (32 - long_log2(mdev->limits.num_mpts))) - 1;
+ (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
err = 0;
out:
@@ -816,7 +816,7 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda
lkey = ucmd.lkey;
}
- ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, long_log2(entries), &status);
+ ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries), &status);
if (status)
ret = -EINVAL;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 6a7822e0fc19..5f5214c0337d 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -35,7 +35,6 @@
* $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
*/
-#include <linux/init.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -430,13 +429,18 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
- int err;
- struct mthca_mailbox *mailbox;
+ int err = 0;
+ struct mthca_mailbox *mailbox = NULL;
struct mthca_qp_param *qp_param;
struct mthca_qp_context *context;
int mthca_state;
u8 status;
+ if (qp->state == IB_QPS_RESET) {
+ qp_attr->qp_state = IB_QPS_RESET;
+ goto done;
+ }
+
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
@@ -455,7 +459,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
mthca_state = be32_to_cpu(context->flags) >> 28;
qp_attr->qp_state = to_ib_qp_state(mthca_state);
- qp_attr->cur_qp_state = qp_attr->qp_state;
qp_attr->path_mtu = context->mtu_msgmax >> 5;
qp_attr->path_mig_state =
to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
@@ -465,11 +468,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff;
qp_attr->qp_access_flags =
to_ib_qp_access_flags(be32_to_cpu(context->params2));
- qp_attr->cap.max_send_wr = qp->sq.max;
- qp_attr->cap.max_recv_wr = qp->rq.max;
- qp_attr->cap.max_send_sge = qp->sq.max_gs;
- qp_attr->cap.max_recv_sge = qp->rq.max_gs;
- qp_attr->cap.max_inline_data = qp->max_inline_data;
if (qp->transport == RC || qp->transport == UC) {
to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
@@ -496,7 +494,16 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5;
qp_attr->alt_timeout = context->alt_path.ackto >> 3;
- qp_init_attr->cap = qp_attr->cap;
+
+done:
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->cap.max_send_wr = qp->sq.max;
+ qp_attr->cap.max_recv_wr = qp->rq.max;
+ qp_attr->cap.max_send_sge = qp->sq.max_gs;
+ qp_attr->cap.max_recv_sge = qp->rq.max_gs;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
+
+ qp_init_attr->cap = qp_attr->cap;
out:
mthca_free_mailbox(dev, mailbox);
@@ -637,11 +644,11 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
if (mthca_is_memfree(dev)) {
if (qp->rq.max)
- qp_context->rq_size_stride = long_log2(qp->rq.max) << 3;
+ qp_context->rq_size_stride = ilog2(qp->rq.max) << 3;
qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
if (qp->sq.max)
- qp_context->sq_size_stride = long_log2(qp->sq.max) << 3;
+ qp_context->sq_size_stride = ilog2(qp->sq.max) << 3;
qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
}
@@ -2241,7 +2248,7 @@ void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
*new_wqe = 0;
}
-int __devinit mthca_init_qp_table(struct mthca_dev *dev)
+int mthca_init_qp_table(struct mthca_dev *dev)
{
int err;
u8 status;
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index f5d7677d1079..10684da33d58 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -120,7 +120,7 @@ static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
memset(context, 0, sizeof *context);
- logsize = long_log2(srq->max) + srq->wqe_shift;
+ logsize = ilog2(srq->max);
context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
context->db_index = cpu_to_be32(srq->db_index);
@@ -213,7 +213,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz))
return -EINVAL;
- srq->wqe_shift = long_log2(ds);
+ srq->wqe_shift = ilog2(ds);
srq->srqn = mthca_alloc(&dev->srq_table.alloc);
if (srq->srqn == -1)
@@ -715,7 +715,7 @@ int mthca_max_srq_sge(struct mthca_dev *dev)
sizeof (struct mthca_data_seg));
}
-int __devinit mthca_init_srq_table(struct mthca_dev *dev)
+int mthca_init_srq_table(struct mthca_dev *dev)
{
int err;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 0b8a79d53a00..07deee8f81ce 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -105,12 +105,12 @@ struct ipoib_mcast;
struct ipoib_rx_buf {
struct sk_buff *skb;
- dma_addr_t mapping;
+ u64 mapping;
};
struct ipoib_tx_buf {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ u64 mapping;
};
/*
@@ -136,11 +136,11 @@ struct ipoib_dev_priv {
struct list_head multicast_list;
struct rb_root multicast_tree;
- struct work_struct pkey_task;
- struct work_struct mcast_task;
+ struct delayed_work pkey_task;
+ struct delayed_work mcast_task;
struct work_struct flush_task;
struct work_struct restart_task;
- struct work_struct ah_reap_task;
+ struct delayed_work ah_reap_task;
struct ib_device *ca;
u8 port;
@@ -233,7 +233,7 @@ static inline struct ipoib_neigh **to_ipoib_neigh(struct neighbour *neigh)
}
struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh);
-void ipoib_neigh_free(struct ipoib_neigh *neigh);
+void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh);
extern struct workqueue_struct *ipoib_workqueue;
@@ -254,13 +254,13 @@ int ipoib_add_pkey_attr(struct net_device *dev);
void ipoib_send(struct net_device *dev, struct sk_buff *skb,
struct ipoib_ah *address, u32 qpn);
-void ipoib_reap_ah(void *dev_ptr);
+void ipoib_reap_ah(struct work_struct *work);
void ipoib_flush_paths(struct net_device *dev);
struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
-void ipoib_ib_dev_flush(void *dev);
+void ipoib_ib_dev_flush(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
int ipoib_ib_dev_open(struct net_device *dev);
@@ -271,10 +271,10 @@ int ipoib_ib_dev_stop(struct net_device *dev);
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
void ipoib_dev_cleanup(struct net_device *dev);
-void ipoib_mcast_join_task(void *dev_ptr);
+void ipoib_mcast_join_task(struct work_struct *work);
void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
-void ipoib_mcast_restart_task(void *dev_ptr);
+void ipoib_mcast_restart_task(struct work_struct *work);
int ipoib_mcast_start_thread(struct net_device *dev);
int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
@@ -312,7 +312,7 @@ void ipoib_event(struct ib_event_handler *handler,
int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
-void ipoib_pkey_poll(void *dev);
+void ipoib_pkey_poll(struct work_struct *work);
int ipoib_pkey_dev_delay_open(struct net_device *dev);
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 8bf5e9ec7c95..59d9594ed6d9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -109,9 +109,8 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id)
ret = ib_post_recv(priv->qp, &param, &bad_wr);
if (unlikely(ret)) {
ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
- dma_unmap_single(priv->ca->dma_device,
- priv->rx_ring[id].mapping,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
+ IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(priv->rx_ring[id].skb);
priv->rx_ring[id].skb = NULL;
}
@@ -123,7 +122,7 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
- dma_addr_t addr;
+ u64 addr;
skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
if (!skb)
@@ -136,10 +135,9 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
*/
skb_reserve(skb, 4);
- addr = dma_map_single(priv->ca->dma_device,
- skb->data, IPOIB_BUF_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(addr))) {
+ addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
dev_kfree_skb_any(skb);
return -EIO;
}
@@ -174,7 +172,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
struct ipoib_dev_priv *priv = netdev_priv(dev);
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
struct sk_buff *skb;
- dma_addr_t addr;
+ u64 addr;
ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n",
wr_id, wc->opcode, wc->status);
@@ -193,8 +191,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
ipoib_warn(priv, "failed recv event "
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
- dma_unmap_single(priv->ca->dma_device, addr,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ib_dma_unmap_single(priv->ca, addr,
+ IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
priv->rx_ring[wr_id].skb = NULL;
return;
@@ -212,8 +210,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid);
- dma_unmap_single(priv->ca->dma_device, addr,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
skb_put(skb, wc->byte_len);
skb_pull(skb, IB_GRH_BYTES);
@@ -261,10 +258,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
tx_req = &priv->tx_ring[wr_id];
- dma_unmap_single(priv->ca->dma_device,
- pci_unmap_addr(tx_req, mapping),
- tx_req->skb->len,
- DMA_TO_DEVICE);
+ ib_dma_unmap_single(priv->ca, tx_req->mapping,
+ tx_req->skb->len, DMA_TO_DEVICE);
++priv->stats.tx_packets;
priv->stats.tx_bytes += tx_req->skb->len;
@@ -311,7 +306,7 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
static inline int post_send(struct ipoib_dev_priv *priv,
unsigned int wr_id,
struct ib_ah *address, u32 qpn,
- dma_addr_t addr, int len)
+ u64 addr, int len)
{
struct ib_send_wr *bad_wr;
@@ -330,7 +325,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_tx_buf *tx_req;
- dma_addr_t addr;
+ u64 addr;
if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
@@ -353,21 +348,20 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
*/
tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
tx_req->skb = skb;
- addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(addr))) {
+ addr = ib_dma_map_single(priv->ca, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
++priv->stats.tx_errors;
dev_kfree_skb_any(skb);
return;
}
- pci_unmap_addr_set(tx_req, mapping, addr);
+ tx_req->mapping = addr;
if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
address->ah, qpn, addr, skb->len))) {
ipoib_warn(priv, "post_send failed\n");
++priv->stats.tx_errors;
- dma_unmap_single(priv->ca->dma_device, addr, skb->len,
- DMA_TO_DEVICE);
+ ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
} else {
dev->trans_start = jiffies;
@@ -400,10 +394,11 @@ static void __ipoib_reap_ah(struct net_device *dev)
spin_unlock_irq(&priv->tx_lock);
}
-void ipoib_reap_ah(void *dev_ptr)
+void ipoib_reap_ah(struct work_struct *work)
{
- struct net_device *dev = dev_ptr;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
+ struct net_device *dev = priv->dev;
__ipoib_reap_ah(dev);
@@ -537,24 +532,27 @@ int ipoib_ib_dev_stop(struct net_device *dev)
while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
tx_req = &priv->tx_ring[priv->tx_tail &
(ipoib_sendq_size - 1)];
- dma_unmap_single(priv->ca->dma_device,
- pci_unmap_addr(tx_req, mapping),
- tx_req->skb->len,
- DMA_TO_DEVICE);
+ ib_dma_unmap_single(priv->ca,
+ tx_req->mapping,
+ tx_req->skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
}
- for (i = 0; i < ipoib_recvq_size; ++i)
- if (priv->rx_ring[i].skb) {
- dma_unmap_single(priv->ca->dma_device,
- pci_unmap_addr(&priv->rx_ring[i],
- mapping),
- IPOIB_BUF_SIZE,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(priv->rx_ring[i].skb);
- priv->rx_ring[i].skb = NULL;
- }
+ for (i = 0; i < ipoib_recvq_size; ++i) {
+ struct ipoib_rx_buf *rx_req;
+
+ rx_req = &priv->rx_ring[i];
+ if (!rx_req->skb)
+ continue;
+ ib_dma_unmap_single(priv->ca,
+ rx_req->mapping,
+ IPOIB_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(rx_req->skb);
+ rx_req->skb = NULL;
+ }
goto timeout;
}
@@ -613,10 +611,11 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
return 0;
}
-void ipoib_ib_dev_flush(void *_dev)
+void ipoib_ib_dev_flush(struct work_struct *work)
{
- struct net_device *dev = (struct net_device *)_dev;
- struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv;
+ struct ipoib_dev_priv *cpriv, *priv =
+ container_of(work, struct ipoib_dev_priv, flush_task);
+ struct net_device *dev = priv->dev;
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) {
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
@@ -638,14 +637,14 @@ void ipoib_ib_dev_flush(void *_dev)
*/
if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
ipoib_ib_dev_up(dev);
- ipoib_mcast_restart_task(dev);
+ ipoib_mcast_restart_task(&priv->restart_task);
}
mutex_lock(&priv->vlan_mutex);
/* Flush any child interfaces too */
list_for_each_entry(cpriv, &priv->child_intfs, list)
- ipoib_ib_dev_flush(cpriv->dev);
+ ipoib_ib_dev_flush(&cpriv->flush_task);
mutex_unlock(&priv->vlan_mutex);
}
@@ -672,10 +671,11 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
* change async notification is available.
*/
-void ipoib_pkey_poll(void *dev_ptr)
+void ipoib_pkey_poll(struct work_struct *work)
{
- struct net_device *dev = dev_ptr;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, pkey_task.work);
+ struct net_device *dev = priv->dev;
ipoib_pkey_dev_check_presence(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 85522daeb946..705eb1d0e554 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -264,7 +264,7 @@ static void path_free(struct net_device *dev, struct ipoib_path *path)
if (neigh->ah)
ipoib_put_ah(neigh->ah);
- ipoib_neigh_free(neigh);
+ ipoib_neigh_free(dev, neigh);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -497,8 +497,6 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
return;
}
- skb_queue_head_init(&neigh->queue);
-
/*
* We can only be called from ipoib_start_xmit, so we're
* inside tx_lock -- no need to save/restore flags.
@@ -525,10 +523,11 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb->dst->neighbour->ha));
} else {
neigh->ah = NULL;
- __skb_queue_tail(&neigh->queue, skb);
if (!path->query && path_rec_start(dev, path))
goto err_list;
+
+ __skb_queue_tail(&neigh->queue, skb);
}
spin_unlock(&priv->lock);
@@ -538,7 +537,7 @@ err_list:
list_del(&neigh->list);
err_path:
- ipoib_neigh_free(neigh);
+ ipoib_neigh_free(dev, neigh);
++priv->stats.tx_dropped;
dev_kfree_skb_any(skb);
@@ -655,7 +654,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
ipoib_put_ah(neigh->ah);
list_del(&neigh->list);
- ipoib_neigh_free(neigh);
+ ipoib_neigh_free(dev, neigh);
spin_unlock(&priv->lock);
ipoib_path_lookup(skb, dev);
goto out;
@@ -786,7 +785,7 @@ static void ipoib_neigh_destructor(struct neighbour *n)
if (neigh->ah)
ah = neigh->ah;
list_del(&neigh->list);
- ipoib_neigh_free(neigh);
+ ipoib_neigh_free(n->dev, neigh);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -805,13 +804,20 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour)
neigh->neighbour = neighbour;
*to_ipoib_neigh(neighbour) = neigh;
+ skb_queue_head_init(&neigh->queue);
return neigh;
}
-void ipoib_neigh_free(struct ipoib_neigh *neigh)
+void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct sk_buff *skb;
*to_ipoib_neigh(neigh->neighbour) = NULL;
+ while ((skb = __skb_dequeue(&neigh->queue))) {
+ ++priv->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ }
kfree(neigh);
}
@@ -933,11 +939,11 @@ static void ipoib_setup(struct net_device *dev)
INIT_LIST_HEAD(&priv->dead_ahs);
INIT_LIST_HEAD(&priv->multicast_list);
- INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev);
- INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev);
- INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev);
- INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
- INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev);
+ INIT_DELAYED_WORK(&priv->pkey_task, ipoib_pkey_poll);
+ INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
+ INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush);
+ INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
+ INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
}
struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 3faa1820f0e9..b04b72ca32ed 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -114,7 +114,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
*/
if (neigh->ah)
ipoib_put_ah(neigh->ah);
- ipoib_neigh_free(neigh);
+ ipoib_neigh_free(dev, neigh);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -399,7 +399,8 @@ static void ipoib_mcast_join_complete(int status,
mcast->backoff = 1;
mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_work(ipoib_workqueue, &priv->mcast_task);
+ queue_delayed_work(ipoib_workqueue,
+ &priv->mcast_task, 0);
mutex_unlock(&mcast_mutex);
complete(&mcast->done);
return;
@@ -435,7 +436,8 @@ static void ipoib_mcast_join_complete(int status,
if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
if (status == -ETIMEDOUT)
- queue_work(ipoib_workqueue, &priv->mcast_task);
+ queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
+ 0);
else
queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
mcast->backoff * HZ);
@@ -517,10 +519,11 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
mcast->query_id = ret;
}
-void ipoib_mcast_join_task(void *dev_ptr)
+void ipoib_mcast_join_task(struct work_struct *work)
{
- struct net_device *dev = dev_ptr;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, mcast_task.work);
+ struct net_device *dev = priv->dev;
if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
return;
@@ -610,7 +613,7 @@ int ipoib_mcast_start_thread(struct net_device *dev)
mutex_lock(&mcast_mutex);
if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_work(ipoib_workqueue, &priv->mcast_task);
+ queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
mutex_unlock(&mcast_mutex);
spin_lock_irq(&priv->lock);
@@ -818,10 +821,11 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
}
}
-void ipoib_mcast_restart_task(void *dev_ptr)
+void ipoib_mcast_restart_task(struct work_struct *work)
{
- struct net_device *dev = dev_ptr;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, restart_task);
+ struct net_device *dev = priv->dev;
struct dev_mc_list *mclist;
struct ipoib_mcast *mcast, *tmcast;
LIST_HEAD(remove_list);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 9b2041e25d59..dd221eda3ea6 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -177,7 +177,7 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
* - if yes, the mtask is recycled at iscsi_complete_pdu
* - if no, the mtask is recycled at iser_snd_completion
*/
- if (error && error != -EAGAIN)
+ if (error && error != -ENOBUFS)
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return error;
@@ -241,7 +241,7 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
iscsi_iser_ctask_xmit_exit:
- if (error && error != -EAGAIN)
+ if (error && error != -ENOBUFS)
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return error;
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 9c53916f28c2..cae8c96a55f8 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -182,7 +182,7 @@ struct iser_regd_buf {
struct iser_mem_reg reg; /* memory registration info */
void *virt_addr;
struct iser_device *device; /* device->device for dma_unmap */
- dma_addr_t dma_addr; /* if non zero, addr for dma_unmap */
+ u64 dma_addr; /* if non zero, addr for dma_unmap */
enum dma_data_direction direction; /* direction for dma_unmap */
unsigned int data_size;
atomic_t ref_count; /* refcount, freed when dec to 0 */
@@ -283,7 +283,7 @@ struct iser_global {
struct mutex connlist_mutex;
struct list_head connlist; /* all iSER IB connections */
- kmem_cache_t *desc_cache;
+ struct kmem_cache *desc_cache;
};
extern struct iser_global ig;
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 9b3d79c796c8..0a7d1ab60e6d 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -304,18 +304,14 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn)
static int
iser_check_xmit(struct iscsi_conn *conn, void *task)
{
- int rc = 0;
struct iscsi_iser_conn *iser_conn = conn->dd_data;
- write_lock_bh(conn->recv_lock);
if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
ISER_QP_MAX_REQ_DTOS) {
- iser_dbg("%ld can't xmit task %p, suspending tx\n",jiffies,task);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- rc = -EAGAIN;
+ iser_dbg("%ld can't xmit task %p\n",jiffies,task);
+ return -ENOBUFS;
}
- write_unlock_bh(conn->recv_lock);
- return rc;
+ return 0;
}
@@ -340,7 +336,7 @@ int iser_send_command(struct iscsi_conn *conn,
return -EPERM;
}
if (iser_check_xmit(conn, ctask))
- return -EAGAIN;
+ return -ENOBUFS;
edtl = ntohl(hdr->data_length);
@@ -426,7 +422,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
}
if (iser_check_xmit(conn, ctask))
- return -EAGAIN;
+ return -ENOBUFS;
itt = ntohl(hdr->itt);
data_seg_len = ntoh24(hdr->dlength);
@@ -487,10 +483,8 @@ int iser_send_control(struct iscsi_conn *conn,
struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iser_desc *mdesc = mtask->dd_data;
struct iser_dto *send_dto = NULL;
- unsigned int itt;
unsigned long data_seg_len;
int err = 0;
- unsigned char opcode;
struct iser_regd_buf *regd_buf;
struct iser_device *device;
@@ -500,7 +494,7 @@ int iser_send_control(struct iscsi_conn *conn,
}
if (iser_check_xmit(conn,mtask))
- return -EAGAIN;
+ return -ENOBUFS;
/* build the tx desc regd header and add it to the tx desc dto */
mdesc->type = ISCSI_TX_CONTROL;
@@ -512,8 +506,6 @@ int iser_send_control(struct iscsi_conn *conn,
iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
- itt = ntohl(mtask->hdr->itt);
- opcode = mtask->hdr->opcode & ISCSI_OPCODE_MASK;
data_seg_len = ntoh24(mtask->hdr->dlength);
if (data_seg_len > 0) {
@@ -609,6 +601,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
struct iscsi_conn *conn = iser_conn->iscsi_conn;
struct iscsi_mgmt_task *mtask;
+ int resume_tx = 0;
iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
@@ -617,15 +610,16 @@ void iser_snd_completion(struct iser_desc *tx_desc)
if (tx_desc->type == ISCSI_TX_DATAOUT)
kmem_cache_free(ig.desc_cache, tx_desc);
+ if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
+ ISER_QP_MAX_REQ_DTOS)
+ resume_tx = 1;
+
atomic_dec(&ib_conn->post_send_buf_count);
- write_lock(conn->recv_lock);
- if (conn->suspend_tx) {
+ if (resume_tx) {
iser_dbg("%ld resuming tx\n",jiffies);
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
scsi_queue_work(conn->session->host, &conn->xmitwork);
}
- write_unlock(conn->recv_lock);
if (tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 0606744c3f84..fc9f1fd0ae54 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/highmem.h>
#include <asm/io.h>
#include <asm/scatterlist.h>
#include <linux/scatterlist.h>
@@ -51,7 +52,7 @@
*/
int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
{
- struct device *dma_device;
+ struct ib_device *dev;
if ((atomic_read(&regd_buf->ref_count) == 0) ||
atomic_dec_and_test(&regd_buf->ref_count)) {
@@ -60,8 +61,8 @@ int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
iser_unreg_mem(&regd_buf->reg);
if (regd_buf->dma_addr) {
- dma_device = regd_buf->device->ib_device->dma_device;
- dma_unmap_single(dma_device,
+ dev = regd_buf->device->ib_device;
+ ib_dma_unmap_single(dev,
regd_buf->dma_addr,
regd_buf->data_size,
regd_buf->direction);
@@ -83,12 +84,12 @@ void iser_reg_single(struct iser_device *device,
struct iser_regd_buf *regd_buf,
enum dma_data_direction direction)
{
- dma_addr_t dma_addr;
+ u64 dma_addr;
- dma_addr = dma_map_single(device->ib_device->dma_device,
- regd_buf->virt_addr,
- regd_buf->data_size, direction);
- BUG_ON(dma_mapping_error(dma_addr));
+ dma_addr = ib_dma_map_single(device->ib_device,
+ regd_buf->virt_addr,
+ regd_buf->data_size, direction);
+ BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr));
regd_buf->reg.lkey = device->mr->lkey;
regd_buf->reg.len = regd_buf->data_size;
@@ -106,14 +107,14 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
enum iser_data_dir cmd_dir)
{
int dma_nents;
- struct device *dma_device;
+ struct ib_device *dev;
char *mem = NULL;
struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
unsigned long cmd_data_len = data->data_len;
if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
mem = (void *)__get_free_pages(GFP_NOIO,
- long_log2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
+ ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
else
mem = kmalloc(cmd_data_len, GFP_NOIO);
@@ -146,17 +147,12 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
iser_ctask->data_copy[cmd_dir].copy_buf = mem;
- dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
-
- if (cmd_dir == ISER_DIR_OUT)
- dma_nents = dma_map_sg(dma_device,
- &iser_ctask->data_copy[cmd_dir].sg_single,
- 1, DMA_TO_DEVICE);
- else
- dma_nents = dma_map_sg(dma_device,
- &iser_ctask->data_copy[cmd_dir].sg_single,
- 1, DMA_FROM_DEVICE);
-
+ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ dma_nents = ib_dma_map_sg(dev,
+ &iser_ctask->data_copy[cmd_dir].sg_single,
+ 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
BUG_ON(dma_nents == 0);
iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
@@ -169,19 +165,16 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
enum iser_data_dir cmd_dir)
{
- struct device *dma_device;
+ struct ib_device *dev;
struct iser_data_buf *mem_copy;
unsigned long cmd_data_len;
- dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
- mem_copy = &iser_ctask->data_copy[cmd_dir];
+ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ mem_copy = &iser_ctask->data_copy[cmd_dir];
- if (cmd_dir == ISER_DIR_OUT)
- dma_unmap_sg(dma_device, &mem_copy->sg_single, 1,
- DMA_TO_DEVICE);
- else
- dma_unmap_sg(dma_device, &mem_copy->sg_single, 1,
- DMA_FROM_DEVICE);
+ ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (cmd_dir == ISER_DIR_IN) {
char *mem;
@@ -210,7 +203,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
free_pages((unsigned long)mem_copy->copy_buf,
- long_log2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
+ ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
else
kfree(mem_copy->copy_buf);
@@ -230,11 +223,12 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
* consecutive elements. Also, it handles one entry SG.
*/
static int iser_sg_to_page_vec(struct iser_data_buf *data,
- struct iser_page_vec *page_vec)
+ struct iser_page_vec *page_vec,
+ struct ib_device *ibdev)
{
struct scatterlist *sg = (struct scatterlist *)data->buf;
- dma_addr_t first_addr, last_addr, page;
- int start_aligned, end_aligned;
+ u64 first_addr, last_addr, page;
+ int end_aligned;
unsigned int cur_page = 0;
unsigned long total_sz = 0;
int i;
@@ -243,19 +237,21 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
page_vec->offset = (u64) sg[0].offset & ~MASK_4K;
for (i = 0; i < data->dma_nents; i++) {
- total_sz += sg_dma_len(&sg[i]);
+ unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]);
+
+ total_sz += dma_len;
- first_addr = sg_dma_address(&sg[i]);
- last_addr = first_addr + sg_dma_len(&sg[i]);
+ first_addr = ib_sg_dma_address(ibdev, &sg[i]);
+ last_addr = first_addr + dma_len;
- start_aligned = !(first_addr & ~MASK_4K);
end_aligned = !(last_addr & ~MASK_4K);
/* continue to collect page fragments till aligned or SG ends */
while (!end_aligned && (i + 1 < data->dma_nents)) {
i++;
- total_sz += sg_dma_len(&sg[i]);
- last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]);
+ dma_len = ib_sg_dma_len(ibdev, &sg[i]);
+ total_sz += dma_len;
+ last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len;
end_aligned = !(last_addr & ~MASK_4K);
}
@@ -287,10 +283,11 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
* the number of entries which are aligned correctly. Supports the case where
* consecutive SG elements are actually fragments of the same physcial page.
*/
-static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data)
+static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
+ struct ib_device *ibdev)
{
struct scatterlist *sg;
- dma_addr_t end_addr, next_addr;
+ u64 end_addr, next_addr;
int i, cnt;
unsigned int ret_len = 0;
@@ -302,12 +299,12 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data)
(unsigned long)page_to_phys(sg[i].page),
(unsigned long)sg[i].offset,
(unsigned long)sg[i].length); */
- end_addr = sg_dma_address(&sg[i]) +
- sg_dma_len(&sg[i]);
+ end_addr = ib_sg_dma_address(ibdev, &sg[i]) +
+ ib_sg_dma_len(ibdev, &sg[i]);
/* iser_dbg("Checking sg iobuf end address "
"0x%08lX\n", end_addr); */
if (i + 1 < data->dma_nents) {
- next_addr = sg_dma_address(&sg[i+1]);
+ next_addr = ib_sg_dma_address(ibdev, &sg[i+1]);
/* are i, i+1 fragments of the same page? */
if (end_addr == next_addr)
continue;
@@ -324,7 +321,8 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data)
return ret_len;
}
-static void iser_data_buf_dump(struct iser_data_buf *data)
+static void iser_data_buf_dump(struct iser_data_buf *data,
+ struct ib_device *ibdev)
{
struct scatterlist *sg = (struct scatterlist *)data->buf;
int i;
@@ -332,9 +330,9 @@ static void iser_data_buf_dump(struct iser_data_buf *data)
for (i = 0; i < data->dma_nents; i++)
iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
"off:0x%x sz:0x%x dma_len:0x%x\n",
- i, (unsigned long)sg_dma_address(&sg[i]),
+ i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]),
sg[i].page, sg[i].offset,
- sg[i].length,sg_dma_len(&sg[i]));
+ sg[i].length, ib_sg_dma_len(ibdev, &sg[i]));
}
static void iser_dump_page_vec(struct iser_page_vec *page_vec)
@@ -348,7 +346,8 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec)
}
static void iser_page_vec_build(struct iser_data_buf *data,
- struct iser_page_vec *page_vec)
+ struct iser_page_vec *page_vec,
+ struct ib_device *ibdev)
{
int page_vec_len = 0;
@@ -356,14 +355,14 @@ static void iser_page_vec_build(struct iser_data_buf *data,
page_vec->offset = 0;
iser_dbg("Translating sg sz: %d\n", data->dma_nents);
- page_vec_len = iser_sg_to_page_vec(data,page_vec);
+ page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev);
iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len);
page_vec->length = page_vec_len;
if (page_vec_len * SIZE_4K < page_vec->data_size) {
iser_err("page_vec too short to hold this SG\n");
- iser_data_buf_dump(data);
+ iser_data_buf_dump(data, ibdev);
iser_dump_page_vec(page_vec);
BUG();
}
@@ -374,13 +373,12 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir)
{
- struct device *dma_device;
+ struct ib_device *dev;
iser_ctask->dir[iser_dir] = 1;
- dma_device =
- iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
+ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
- data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir);
+ data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
if (data->dma_nents == 0) {
iser_err("dma_map_sg failed!!!\n");
return -EINVAL;
@@ -390,20 +388,19 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
{
- struct device *dma_device;
+ struct ib_device *dev;
struct iser_data_buf *data;
- dma_device =
- iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
+ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
if (iser_ctask->dir[ISER_DIR_IN]) {
data = &iser_ctask->data[ISER_DIR_IN];
- dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE);
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
}
if (iser_ctask->dir[ISER_DIR_OUT]) {
data = &iser_ctask->data[ISER_DIR_OUT];
- dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE);
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
}
}
@@ -418,6 +415,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
{
struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
+ struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
struct iser_regd_buf *regd_buf;
int aligned_len;
@@ -427,11 +425,11 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
regd_buf = &iser_ctask->rdma_regd[cmd_dir];
- aligned_len = iser_data_buf_aligned_len(mem);
+ aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) {
iser_err("rdma alignment violation %d/%d aligned\n",
aligned_len, mem->size);
- iser_data_buf_dump(mem);
+ iser_data_buf_dump(mem, ibdev);
/* unmap the command data before accessing it */
iser_dma_unmap_task_data(iser_ctask);
@@ -449,8 +447,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
regd_buf->reg.lkey = device->mr->lkey;
regd_buf->reg.rkey = device->mr->rkey;
- regd_buf->reg.len = sg_dma_len(&sg[0]);
- regd_buf->reg.va = sg_dma_address(&sg[0]);
+ regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
+ regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
regd_buf->reg.is_fmr = 0;
iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
@@ -460,10 +458,10 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
(unsigned long)regd_buf->reg.va,
(unsigned long)regd_buf->reg.len);
} else { /* use FMR for multiple dma entries */
- iser_page_vec_build(mem, ib_conn->page_vec);
+ iser_page_vec_build(mem, ib_conn->page_vec, ibdev);
err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
if (err) {
- iser_data_buf_dump(mem);
+ iser_data_buf_dump(mem, ibdev);
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
ntoh24(iser_ctask->desc.iscsi_header.dlength));
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 18a000034996..693b77002897 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -48,7 +48,7 @@
static void iser_cq_tasklet_fn(unsigned long data);
static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
-static void iser_comp_error_worker(void *data);
+static void iser_comp_error_worker(struct work_struct *work);
static void iser_cq_event_callback(struct ib_event *cause, void *context)
{
@@ -480,8 +480,7 @@ int iser_conn_init(struct iser_conn **ibconn)
init_waitqueue_head(&ib_conn->wait);
atomic_set(&ib_conn->post_recv_buf_count, 0);
atomic_set(&ib_conn->post_send_buf_count, 0);
- INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker,
- ib_conn);
+ INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker);
INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock);
@@ -754,9 +753,10 @@ int iser_post_send(struct iser_desc *tx_desc)
return ret_val;
}
-static void iser_comp_error_worker(void *data)
+static void iser_comp_error_worker(struct work_struct *work)
{
- struct iser_conn *ib_conn = data;
+ struct iser_conn *ib_conn =
+ container_of(work, struct iser_conn, comperror_work);
/* getting here when the state is UP means that the conn is being *
* terminated asynchronously from the iSCSI layer's perspective. */
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 4b09147f438f..cdecbf5911c8 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -122,9 +122,8 @@ static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
if (!iu->buf)
goto out_free_iu;
- iu->dma = dma_map_single(host->dev->dev->dma_device,
- iu->buf, size, direction);
- if (dma_mapping_error(iu->dma))
+ iu->dma = ib_dma_map_single(host->dev->dev, iu->buf, size, direction);
+ if (ib_dma_mapping_error(host->dev->dev, iu->dma))
goto out_free_buf;
iu->size = size;
@@ -145,8 +144,7 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
if (!iu)
return;
- dma_unmap_single(host->dev->dev->dma_device,
- iu->dma, iu->size, iu->direction);
+ ib_dma_unmap_single(host->dev->dev, iu->dma, iu->size, iu->direction);
kfree(iu->buf);
kfree(iu);
}
@@ -390,9 +388,10 @@ static void srp_disconnect_target(struct srp_target_port *target)
wait_for_completion(&target->done);
}
-static void srp_remove_work(void *target_ptr)
+static void srp_remove_work(struct work_struct *work)
{
- struct srp_target_port *target = target_ptr;
+ struct srp_target_port *target =
+ container_of(work, struct srp_target_port, work);
spin_lock_irq(target->scsi_host->host_lock);
if (target->state != SRP_TARGET_DEAD) {
@@ -481,8 +480,8 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
scat = &req->fake_sg;
}
- dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents,
- scmnd->sc_data_direction);
+ ib_dma_unmap_sg(target->srp_host->dev->dev, scat, nents,
+ scmnd->sc_data_direction);
}
static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
@@ -575,7 +574,7 @@ err:
spin_lock_irq(target->scsi_host->host_lock);
if (target->state == SRP_TARGET_CONNECTING) {
target->state = SRP_TARGET_DEAD;
- INIT_WORK(&target->work, srp_remove_work, target);
+ INIT_WORK(&target->work, srp_remove_work);
schedule_work(&target->work);
}
spin_unlock_irq(target->scsi_host->host_lock);
@@ -594,23 +593,26 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
int i, j;
int ret;
struct srp_device *dev = target->srp_host->dev;
+ struct ib_device *ibdev = dev->dev;
if (!dev->fmr_pool)
return -ENODEV;
- if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) &&
+ if ((ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask) &&
mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3))
return -EINVAL;
len = page_cnt = 0;
for (i = 0; i < sg_cnt; ++i) {
- if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) {
+ unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
+
+ if (ib_sg_dma_address(ibdev, &scat[i]) & ~dev->fmr_page_mask) {
if (i > 0)
return -EINVAL;
else
++page_cnt;
}
- if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) &
+ if ((ib_sg_dma_address(ibdev, &scat[i]) + dma_len) &
~dev->fmr_page_mask) {
if (i < sg_cnt - 1)
return -EINVAL;
@@ -618,7 +620,7 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
++page_cnt;
}
- len += sg_dma_len(&scat[i]);
+ len += dma_len;
}
page_cnt += len >> dev->fmr_page_shift;
@@ -630,10 +632,14 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
return -ENOMEM;
page_cnt = 0;
- for (i = 0; i < sg_cnt; ++i)
- for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size)
+ for (i = 0; i < sg_cnt; ++i) {
+ unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
+
+ for (j = 0; j < dma_len; j += dev->fmr_page_size)
dma_pages[page_cnt++] =
- (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j;
+ (ib_sg_dma_address(ibdev, &scat[i]) &
+ dev->fmr_page_mask) + j;
+ }
req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
dma_pages, page_cnt, io_addr);
@@ -643,7 +649,8 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
goto out;
}
- buf->va = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask);
+ buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) &
+ ~dev->fmr_page_mask);
buf->key = cpu_to_be32(req->fmr->fmr->rkey);
buf->len = cpu_to_be32(len);
@@ -662,6 +669,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
struct srp_cmd *cmd = req->cmd->buf;
int len, nents, count;
u8 fmt = SRP_DATA_DESC_DIRECT;
+ struct srp_device *dev;
+ struct ib_device *ibdev;
if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
return sizeof (struct srp_cmd);
@@ -686,8 +695,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
}
- count = dma_map_sg(target->srp_host->dev->dev->dma_device,
- scat, nents, scmnd->sc_data_direction);
+ dev = target->srp_host->dev;
+ ibdev = dev->dev;
+
+ count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
fmt = SRP_DATA_DESC_DIRECT;
len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
@@ -701,9 +712,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
*/
struct srp_direct_buf *buf = (void *) cmd->add_data;
- buf->va = cpu_to_be64(sg_dma_address(scat));
- buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey);
- buf->len = cpu_to_be32(sg_dma_len(scat));
+ buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
+ buf->key = cpu_to_be32(dev->mr->rkey);
+ buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
} else if (srp_map_fmr(target, scat, count, req,
(void *) cmd->add_data)) {
/*
@@ -721,13 +732,14 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
count * sizeof (struct srp_direct_buf);
for (i = 0; i < count; ++i) {
+ unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
+
buf->desc_list[i].va =
- cpu_to_be64(sg_dma_address(&scat[i]));
+ cpu_to_be64(ib_sg_dma_address(ibdev, &scat[i]));
buf->desc_list[i].key =
- cpu_to_be32(target->srp_host->dev->mr->rkey);
- buf->desc_list[i].len =
- cpu_to_be32(sg_dma_len(&scat[i]));
- datalen += sg_dma_len(&scat[i]);
+ cpu_to_be32(dev->mr->rkey);
+ buf->desc_list[i].len = cpu_to_be32(dma_len);
+ datalen += dma_len;
}
if (scmnd->sc_data_direction == DMA_TO_DEVICE)
@@ -807,13 +819,15 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
{
+ struct ib_device *dev;
struct srp_iu *iu;
u8 opcode;
iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
- dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma,
- target->max_ti_iu_len, DMA_FROM_DEVICE);
+ dev = target->srp_host->dev->dev;
+ ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
+ DMA_FROM_DEVICE);
opcode = *(u8 *) iu->buf;
@@ -849,8 +863,8 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
break;
}
- dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma,
- target->max_ti_iu_len, DMA_FROM_DEVICE);
+ ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
+ DMA_FROM_DEVICE);
}
static void srp_completion(struct ib_cq *cq, void *target_ptr)
@@ -968,6 +982,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
struct srp_request *req;
struct srp_iu *iu;
struct srp_cmd *cmd;
+ struct ib_device *dev;
int len;
if (target->state == SRP_TARGET_CONNECTING)
@@ -984,8 +999,9 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
if (!iu)
goto err;
- dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma,
- srp_max_iu_len, DMA_TO_DEVICE);
+ dev = target->srp_host->dev->dev;
+ ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
+ DMA_TO_DEVICE);
req = list_entry(target->free_reqs.next, struct srp_request, list);
@@ -1017,8 +1033,8 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
goto err_unmap;
}
- dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma,
- srp_max_iu_len, DMA_TO_DEVICE);
+ ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
+ DMA_TO_DEVICE);
if (__srp_post_send(target, iu, len)) {
printk(KERN_ERR PFX "Send failed\n");
@@ -1176,9 +1192,11 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
break;
}
- target->status = srp_alloc_iu_bufs(target);
- if (target->status)
- break;
+ if (!target->rx_ring[0]) {
+ target->status = srp_alloc_iu_bufs(target);
+ if (target->status)
+ break;
+ }
qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
if (!qp_attr) {
@@ -1716,7 +1734,8 @@ static ssize_t srp_create_target(struct class_device *class_dev,
if (!target_host)
return -ENOMEM;
- target_host->max_lun = SRP_MAX_LUN;
+ target_host->max_lun = SRP_MAX_LUN;
+ target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
target = host_to_target(target_host);
@@ -1879,7 +1898,7 @@ static void srp_add_one(struct ib_device *device)
*/
srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1);
srp_dev->fmr_page_size = 1 << srp_dev->fmr_page_shift;
- srp_dev->fmr_page_mask = ~((unsigned long) srp_dev->fmr_page_size - 1);
+ srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
INIT_LIST_HEAD(&srp_dev->dev_list);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index d4e35ef51374..c21772317b86 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -87,7 +87,7 @@ struct srp_device {
struct ib_fmr_pool *fmr_pool;
int fmr_page_shift;
int fmr_page_size;
- unsigned long fmr_page_mask;
+ u64 fmr_page_mask;
};
struct srp_host {
@@ -161,7 +161,7 @@ struct srp_target_port {
};
struct srp_iu {
- dma_addr_t dma;
+ u64 dma;
void *buf;
size_t size;
enum dma_data_direction direction;