diff options
Diffstat (limited to 'drivers/infiniband')
69 files changed, 2488 insertions, 5584 deletions
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index 10be36731ed7..678a7e097f32 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -1,5 +1,3 @@ -EXTRA_CFLAGS += -Idrivers/infiniband/include - obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ ib_cm.o ib_umad.o ib_ucm.o obj-$(CONFIG_INFINIBAND_USER_VERBS) += ib_uverbs.o diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index 729f0b0d983a..5ac86f566dc0 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. - * Copyright (c) 2004 Infinicon Corporation. All rights reserved. - * Copyright (c) 2004 Intel Corporation. All rights reserved. - * Copyright (c) 2004 Topspin Corporation. All rights reserved. - * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -40,7 +41,7 @@ #include <asm/bug.h> -#include <ib_smi.h> +#include <rdma/ib_smi.h> #include "smi.h" #include "agent_priv.h" diff --git a/drivers/infiniband/core/agent_priv.h b/drivers/infiniband/core/agent_priv.h index 17435af1e914..2ec6d7f1b7d0 100644 --- a/drivers/infiniband/core/agent_priv.h +++ b/drivers/infiniband/core/agent_priv.h @@ -1,9 +1,9 @@ /* - * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. - * Copyright (c) 2004 Infinicon Corporation. All rights reserved. - * Copyright (c) 2004 Intel Corporation. All rights reserved. - * Copyright (c) 2004 Topspin Corporation. All rights reserved. - * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 3042360c97e1..f014e639088c 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -1,5 +1,8 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Intel Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -32,12 +35,11 @@ * $Id: cache.c 1349 2004-12-16 21:09:43Z roland $ */ -#include <linux/version.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> -#include <ib_cache.h> +#include <rdma/ib_cache.h> #include "core_priv.h" diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 403ed125d8f4..4de93ba274a6 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -43,8 +43,8 @@ #include <linux/spinlock.h> #include <linux/workqueue.h> -#include <ib_cache.h> -#include <ib_cm.h> +#include <rdma/ib_cache.h> +#include <rdma/ib_cm.h> #include "cm_msgs.h" MODULE_AUTHOR("Sean Hefty"); @@ -83,7 +83,7 @@ struct cm_port { struct cm_device { struct list_head list; struct ib_device *device; - u64 ca_guid; + __be64 ca_guid; struct cm_port port[0]; }; @@ -100,8 +100,8 @@ struct cm_work { struct list_head list; struct cm_port *port; struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ - u32 local_id; /* Established / timewait */ - u32 remote_id; + __be32 local_id; /* Established / timewait */ + __be32 remote_id; struct ib_cm_event cm_event; struct ib_sa_path_rec path[0]; }; @@ -110,8 +110,8 @@ struct cm_timewait_info { struct cm_work work; /* Must be first. */ struct rb_node remote_qp_node; struct rb_node remote_id_node; - u64 remote_ca_guid; - u32 remote_qpn; + __be64 remote_ca_guid; + __be32 remote_qpn; u8 inserted_remote_qp; u8 inserted_remote_id; }; @@ -132,11 +132,11 @@ struct cm_id_private { struct cm_av alt_av; void *private_data; - u64 tid; - u32 local_qpn; - u32 remote_qpn; - u32 sq_psn; - u32 rq_psn; + __be64 tid; + __be32 local_qpn; + __be32 remote_qpn; + __be32 sq_psn; + __be32 rq_psn; int timeout_ms; enum ib_mtu path_mtu; u8 private_data_len; @@ -253,7 +253,7 @@ static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num, u16 dlid, u8 sl, u16 src_path_bits) { memset(ah_attr, 0, sizeof ah_attr); - ah_attr->dlid = be16_to_cpu(dlid); + ah_attr->dlid = dlid; ah_attr->sl = sl; ah_attr->src_path_bits = src_path_bits; ah_attr->port_num = port_num; @@ -264,7 +264,7 @@ static void cm_init_av_for_response(struct cm_port *port, { av->port = port; av->pkey_index = wc->pkey_index; - cm_set_ah_attr(&av->ah_attr, port->port_num, cpu_to_be16(wc->slid), + cm_set_ah_attr(&av->ah_attr, port->port_num, wc->slid, wc->sl, wc->dlid_path_bits); } @@ -295,8 +295,9 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) return ret; av->port = port; - cm_set_ah_attr(&av->ah_attr, av->port->port_num, path->dlid, - path->sl, path->slid & 0x7F); + cm_set_ah_attr(&av->ah_attr, av->port->port_num, + be16_to_cpu(path->dlid), path->sl, + be16_to_cpu(path->slid) & 0x7F); av->packet_life_time = path->packet_life_time; return 0; } @@ -309,26 +310,26 @@ static int cm_alloc_id(struct cm_id_private *cm_id_priv) do { spin_lock_irqsave(&cm.lock, flags); ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1, - (int *) &cm_id_priv->id.local_id); + (__force int *) &cm_id_priv->id.local_id); spin_unlock_irqrestore(&cm.lock, flags); } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); return ret; } -static void cm_free_id(u32 local_id) +static void cm_free_id(__be32 local_id) { unsigned long flags; spin_lock_irqsave(&cm.lock, flags); - idr_remove(&cm.local_id_table, (int) local_id); + idr_remove(&cm.local_id_table, (__force int) local_id); spin_unlock_irqrestore(&cm.lock, flags); } -static struct cm_id_private * cm_get_id(u32 local_id, u32 remote_id) +static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) { struct cm_id_private *cm_id_priv; - cm_id_priv = idr_find(&cm.local_id_table, (int) local_id); + cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id); if (cm_id_priv) { if (cm_id_priv->id.remote_id == remote_id) atomic_inc(&cm_id_priv->refcount); @@ -339,7 +340,7 @@ static struct cm_id_private * cm_get_id(u32 local_id, u32 remote_id) return cm_id_priv; } -static struct cm_id_private * cm_acquire_id(u32 local_id, u32 remote_id) +static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) { struct cm_id_private *cm_id_priv; unsigned long flags; @@ -356,8 +357,8 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) struct rb_node **link = &cm.listen_service_table.rb_node; struct rb_node *parent = NULL; struct cm_id_private *cur_cm_id_priv; - u64 service_id = cm_id_priv->id.service_id; - u64 service_mask = cm_id_priv->id.service_mask; + __be64 service_id = cm_id_priv->id.service_id; + __be64 service_mask = cm_id_priv->id.service_mask; while (*link) { parent = *link; @@ -376,7 +377,7 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) return NULL; } -static struct cm_id_private * cm_find_listen(u64 service_id) +static struct cm_id_private * cm_find_listen(__be64 service_id) { struct rb_node *node = cm.listen_service_table.rb_node; struct cm_id_private *cm_id_priv; @@ -400,8 +401,8 @@ static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info struct rb_node **link = &cm.remote_id_table.rb_node; struct rb_node *parent = NULL; struct cm_timewait_info *cur_timewait_info; - u64 remote_ca_guid = timewait_info->remote_ca_guid; - u32 remote_id = timewait_info->work.remote_id; + __be64 remote_ca_guid = timewait_info->remote_ca_guid; + __be32 remote_id = timewait_info->work.remote_id; while (*link) { parent = *link; @@ -424,8 +425,8 @@ static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info return NULL; } -static struct cm_timewait_info * cm_find_remote_id(u64 remote_ca_guid, - u32 remote_id) +static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid, + __be32 remote_id) { struct rb_node *node = cm.remote_id_table.rb_node; struct cm_timewait_info *timewait_info; @@ -453,8 +454,8 @@ static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info struct rb_node **link = &cm.remote_qp_table.rb_node; struct rb_node *parent = NULL; struct cm_timewait_info *cur_timewait_info; - u64 remote_ca_guid = timewait_info->remote_ca_guid; - u32 remote_qpn = timewait_info->remote_qpn; + __be64 remote_ca_guid = timewait_info->remote_ca_guid; + __be32 remote_qpn = timewait_info->remote_qpn; while (*link) { parent = *link; @@ -484,7 +485,7 @@ static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private struct rb_node *parent = NULL; struct cm_id_private *cur_cm_id_priv; union ib_gid *port_gid = &cm_id_priv->av.dgid; - u32 remote_id = cm_id_priv->id.remote_id; + __be32 remote_id = cm_id_priv->id.remote_id; while (*link) { parent = *link; @@ -598,7 +599,7 @@ static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info) spin_unlock_irqrestore(&cm.lock, flags); } -static struct cm_timewait_info * cm_create_timewait_info(u32 local_id) +static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) { struct cm_timewait_info *timewait_info; @@ -715,14 +716,15 @@ retest: EXPORT_SYMBOL(ib_destroy_cm_id); int ib_cm_listen(struct ib_cm_id *cm_id, - u64 service_id, - u64 service_mask) + __be64 service_id, + __be64 service_mask) { struct cm_id_private *cm_id_priv, *cur_cm_id_priv; unsigned long flags; int ret = 0; - service_mask = service_mask ? service_mask : ~0ULL; + service_mask = service_mask ? service_mask : + __constant_cpu_to_be64(~0ULL); service_id &= service_mask; if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && (service_id != IB_CM_ASSIGN_SERVICE_ID)) @@ -735,8 +737,8 @@ int ib_cm_listen(struct ib_cm_id *cm_id, spin_lock_irqsave(&cm.lock, flags); if (service_id == IB_CM_ASSIGN_SERVICE_ID) { - cm_id->service_id = __cpu_to_be64(cm.listen_service_id++); - cm_id->service_mask = ~0ULL; + cm_id->service_id = cpu_to_be64(cm.listen_service_id++); + cm_id->service_mask = __constant_cpu_to_be64(~0ULL); } else { cm_id->service_id = service_id; cm_id->service_mask = service_mask; @@ -752,18 +754,19 @@ int ib_cm_listen(struct ib_cm_id *cm_id, } EXPORT_SYMBOL(ib_cm_listen); -static u64 cm_form_tid(struct cm_id_private *cm_id_priv, - enum cm_msg_sequence msg_seq) +static __be64 cm_form_tid(struct cm_id_private *cm_id_priv, + enum cm_msg_sequence msg_seq) { u64 hi_tid, low_tid; hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; - low_tid = (u64) (cm_id_priv->id.local_id | (msg_seq << 30)); + low_tid = (u64) ((__force u32)cm_id_priv->id.local_id | + (msg_seq << 30)); return cpu_to_be64(hi_tid | low_tid); } static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, - enum cm_msg_attr_id attr_id, u64 tid) + __be16 attr_id, __be64 tid) { hdr->base_version = IB_MGMT_BASE_VERSION; hdr->mgmt_class = IB_MGMT_CLASS_CM; @@ -896,7 +899,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, goto error1; } cm_id->service_id = param->service_id; - cm_id->service_mask = ~0ULL; + cm_id->service_mask = __constant_cpu_to_be64(~0ULL); cm_id_priv->timeout_ms = cm_convert_to_ms( param->primary_path->packet_life_time) * 2 + cm_convert_to_ms( @@ -963,7 +966,7 @@ static int cm_issue_rej(struct cm_port *port, rej_msg->remote_comm_id = rcv_msg->local_comm_id; rej_msg->local_comm_id = rcv_msg->remote_comm_id; cm_rej_set_msg_rejected(rej_msg, msg_rejected); - rej_msg->reason = reason; + rej_msg->reason = cpu_to_be16(reason); if (ari && ari_length) { cm_rej_set_reject_info_len(rej_msg, ari_length); @@ -977,8 +980,8 @@ static int cm_issue_rej(struct cm_port *port, return ret; } -static inline int cm_is_active_peer(u64 local_ca_guid, u64 remote_ca_guid, - u32 local_qpn, u32 remote_qpn) +static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid, + __be32 local_qpn, __be32 remote_qpn) { return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || ((local_ca_guid == remote_ca_guid) && @@ -1137,7 +1140,7 @@ static void cm_format_rej(struct cm_rej_msg *rej_msg, break; } - rej_msg->reason = reason; + rej_msg->reason = cpu_to_be16(reason); if (ari && ari_length) { cm_rej_set_reject_info_len(rej_msg, ari_length); memcpy(rej_msg->ari, ari, ari_length); @@ -1276,7 +1279,7 @@ static int cm_req_handler(struct cm_work *work) cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; cm_id_priv->id.context = listen_cm_id_priv->id.context; cm_id_priv->id.service_id = req_msg->service_id; - cm_id_priv->id.service_mask = ~0ULL; + cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); @@ -1969,7 +1972,7 @@ static void cm_format_rej_event(struct cm_work *work) param = &work->cm_event.param.rej_rcvd; param->ari = rej_msg->ari; param->ari_length = cm_rej_get_reject_info_len(rej_msg); - param->reason = rej_msg->reason; + param->reason = __be16_to_cpu(rej_msg->reason); work->cm_event.private_data = &rej_msg->private_data; } @@ -1978,20 +1981,20 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) struct cm_timewait_info *timewait_info; struct cm_id_private *cm_id_priv; unsigned long flags; - u32 remote_id; + __be32 remote_id; remote_id = rej_msg->local_comm_id; - if (rej_msg->reason == IB_CM_REJ_TIMEOUT) { + if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { spin_lock_irqsave(&cm.lock, flags); - timewait_info = cm_find_remote_id( *((u64 *) rej_msg->ari), + timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), remote_id); if (!timewait_info) { spin_unlock_irqrestore(&cm.lock, flags); return NULL; } cm_id_priv = idr_find(&cm.local_id_table, - (int) timewait_info->work.local_id); + (__force int) timewait_info->work.local_id); if (cm_id_priv) { if (cm_id_priv->id.remote_id == remote_id) atomic_inc(&cm_id_priv->refcount); @@ -2032,7 +2035,7 @@ static int cm_rej_handler(struct cm_work *work) /* fall through */ case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: - if (rej_msg->reason == IB_CM_REJ_STALE_CONN) + if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN) cm_enter_timewait(cm_id_priv); else cm_reset_to_idle(cm_id_priv); @@ -2553,7 +2556,7 @@ static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); sidr_req_msg->request_id = cm_id_priv->id.local_id; - sidr_req_msg->pkey = param->pkey; + sidr_req_msg->pkey = cpu_to_be16(param->pkey); sidr_req_msg->service_id = param->service_id; if (param->private_data && param->private_data_len) @@ -2580,7 +2583,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, goto out; cm_id->service_id = param->service_id; - cm_id->service_mask = ~0ULL; + cm_id->service_mask = __constant_cpu_to_be64(~0ULL); cm_id_priv->timeout_ms = param->timeout_ms; cm_id_priv->max_cm_retries = param->max_cm_retries; ret = cm_alloc_msg(cm_id_priv, &msg); @@ -2621,7 +2624,7 @@ static void cm_format_sidr_req_event(struct cm_work *work, sidr_req_msg = (struct cm_sidr_req_msg *) work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.sidr_req_rcvd; - param->pkey = sidr_req_msg->pkey; + param->pkey = __be16_to_cpu(sidr_req_msg->pkey); param->listen_id = listen_id; param->device = work->port->mad_agent->device; param->port = work->port->port_num; @@ -2645,7 +2648,7 @@ static int cm_sidr_req_handler(struct cm_work *work) sidr_req_msg = (struct cm_sidr_req_msg *) work->mad_recv_wc->recv_buf.mad; wc = work->mad_recv_wc->wc; - cm_id_priv->av.dgid.global.subnet_prefix = wc->slid; + cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); cm_id_priv->av.dgid.global.interface_id = 0; cm_init_av_for_response(work->port, work->mad_recv_wc->wc, &cm_id_priv->av); @@ -2673,7 +2676,7 @@ static int cm_sidr_req_handler(struct cm_work *work) cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; cm_id_priv->id.context = cur_cm_id_priv->id.context; cm_id_priv->id.service_id = sidr_req_msg->service_id; - cm_id_priv->id.service_mask = ~0ULL; + cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); cm_format_sidr_req_event(work, &cur_cm_id_priv->id); cm_process_work(cm_id_priv, work); @@ -3175,10 +3178,10 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, } EXPORT_SYMBOL(ib_cm_init_qp_attr); -static u64 cm_get_ca_guid(struct ib_device *device) +static __be64 cm_get_ca_guid(struct ib_device *device) { struct ib_device_attr *device_attr; - u64 guid; + __be64 guid; int ret; device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h index 15a309a77b2b..813ab70bf6d5 100644 --- a/drivers/infiniband/core/cm_msgs.h +++ b/drivers/infiniband/core/cm_msgs.h @@ -34,7 +34,7 @@ #if !defined(CM_MSGS_H) #define CM_MSGS_H -#include <ib_mad.h> +#include <rdma/ib_mad.h> /* * Parameters to routines below should be in network-byte order, and values @@ -43,19 +43,17 @@ #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ -enum cm_msg_attr_id { - CM_REQ_ATTR_ID = __constant_htons(0x0010), - CM_MRA_ATTR_ID = __constant_htons(0x0011), - CM_REJ_ATTR_ID = __constant_htons(0x0012), - CM_REP_ATTR_ID = __constant_htons(0x0013), - CM_RTU_ATTR_ID = __constant_htons(0x0014), - CM_DREQ_ATTR_ID = __constant_htons(0x0015), - CM_DREP_ATTR_ID = __constant_htons(0x0016), - CM_SIDR_REQ_ATTR_ID = __constant_htons(0x0017), - CM_SIDR_REP_ATTR_ID = __constant_htons(0x0018), - CM_LAP_ATTR_ID = __constant_htons(0x0019), - CM_APR_ATTR_ID = __constant_htons(0x001A) -}; +#define CM_REQ_ATTR_ID __constant_htons(0x0010) +#define CM_MRA_ATTR_ID __constant_htons(0x0011) +#define CM_REJ_ATTR_ID __constant_htons(0x0012) +#define CM_REP_ATTR_ID __constant_htons(0x0013) +#define CM_RTU_ATTR_ID __constant_htons(0x0014) +#define CM_DREQ_ATTR_ID __constant_htons(0x0015) +#define CM_DREP_ATTR_ID __constant_htons(0x0016) +#define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017) +#define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018) +#define CM_LAP_ATTR_ID __constant_htons(0x0019) +#define CM_APR_ATTR_ID __constant_htons(0x001A) enum cm_msg_sequence { CM_MSG_SEQUENCE_REQ, @@ -67,35 +65,35 @@ enum cm_msg_sequence { struct cm_req_msg { struct ib_mad_hdr hdr; - u32 local_comm_id; - u32 rsvd4; - u64 service_id; - u64 local_ca_guid; - u32 rsvd24; - u32 local_qkey; + __be32 local_comm_id; + __be32 rsvd4; + __be64 service_id; + __be64 local_ca_guid; + __be32 rsvd24; + __be32 local_qkey; /* local QPN:24, responder resources:8 */ - u32 offset32; + __be32 offset32; /* local EECN:24, initiator depth:8 */ - u32 offset36; + __be32 offset36; /* * remote EECN:24, remote CM response timeout:5, * transport service type:2, end-to-end flow control:1 */ - u32 offset40; + __be32 offset40; /* starting PSN:24, local CM response timeout:5, retry count:3 */ - u32 offset44; - u16 pkey; + __be32 offset44; + __be16 pkey; /* path MTU:4, RDC exists:1, RNR retry count:3. */ u8 offset50; /* max CM Retries:4, SRQ:1, rsvd:3 */ u8 offset51; - u16 primary_local_lid; - u16 primary_remote_lid; + __be16 primary_local_lid; + __be16 primary_remote_lid; union ib_gid primary_local_gid; union ib_gid primary_remote_gid; /* flow label:20, rsvd:6, packet rate:6 */ - u32 primary_offset88; + __be32 primary_offset88; u8 primary_traffic_class; u8 primary_hop_limit; /* SL:4, subnet local:1, rsvd:3 */ @@ -103,12 +101,12 @@ struct cm_req_msg { /* local ACK timeout:5, rsvd:3 */ u8 primary_offset95; - u16 alt_local_lid; - u16 alt_remote_lid; + __be16 alt_local_lid; + __be16 alt_remote_lid; union ib_gid alt_local_gid; union ib_gid alt_remote_gid; /* flow label:20, rsvd:6, packet rate:6 */ - u32 alt_offset132; + __be32 alt_offset132; u8 alt_traffic_class; u8 alt_hop_limit; /* SL:4, subnet local:1, rsvd:3 */ @@ -120,12 +118,12 @@ struct cm_req_msg { } __attribute__ ((packed)); -static inline u32 cm_req_get_local_qpn(struct cm_req_msg *req_msg) +static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg) { return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8); } -static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, u32 qpn) +static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn) { req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) | (be32_to_cpu(req_msg->offset32) & @@ -208,13 +206,13 @@ static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg, 0xFFFFFFFE)); } -static inline u32 cm_req_get_starting_psn(struct cm_req_msg *req_msg) +static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg) { return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8); } static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg, - u32 starting_psn) + __be32 starting_psn) { req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) | (be32_to_cpu(req_msg->offset44) & 0x000000FF)); @@ -288,13 +286,13 @@ static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq) ((srq & 0x1) << 3)); } -static inline u32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg) +static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg) { - return cpu_to_be32((be32_to_cpu(req_msg->primary_offset88) >> 12)); + return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12); } static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg, - u32 flow_label) + __be32 flow_label) { req_msg->primary_offset88 = cpu_to_be32( (be32_to_cpu(req_msg->primary_offset88) & @@ -350,13 +348,13 @@ static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_m (local_ack_timeout << 3)); } -static inline u32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg) +static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg) { - return cpu_to_be32((be32_to_cpu(req_msg->alt_offset132) >> 12)); + return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12); } static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg, - u32 flow_label) + __be32 flow_label) { req_msg->alt_offset132 = cpu_to_be32( (be32_to_cpu(req_msg->alt_offset132) & @@ -422,8 +420,8 @@ enum cm_msg_response { struct cm_mra_msg { struct ib_mad_hdr hdr; - u32 local_comm_id; - u32 remote_comm_id; + __be32 local_comm_id; + __be32 remote_comm_id; /* message MRAed:2, rsvd:6 */ u8 offset8; /* service timeout:5, rsvd:3 */ @@ -458,13 +456,13 @@ static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg, struct cm_rej_msg { struct ib_mad_hdr hdr; - u32 local_comm_id; - u32 remote_comm_id; + __be32 local_comm_id; + __be32 remote_comm_id; /* message REJected:2, rsvd:6 */ u8 offset8; /* reject info length:7, rsvd:1. */ u8 offset9; - u16 reason; + __be16 reason; u8 ari[IB_CM_REJ_ARI_LENGTH]; u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE]; @@ -495,45 +493,45 @@ static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg, struct cm_rep_msg { struct ib_mad_hdr hdr; - u32 local_comm_id; - u32 remote_comm_id; - u32 local_qkey; + __be32 local_comm_id; + __be32 remote_comm_id; + __be32 local_qkey; /* local QPN:24, rsvd:8 */ - u32 offset12; + __be32 offset12; /* local EECN:24, rsvd:8 */ - u32 offset16; + __be32 offset16; /* starting PSN:24 rsvd:8 */ - u32 offset20; + __be32 offset20; u8 resp_resources; u8 initiator_depth; /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */ u8 offset26; /* RNR retry count:3, SRQ:1, rsvd:5 */ u8 offset27; - u64 local_ca_guid; + __be64 local_ca_guid; u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE]; } __attribute__ ((packed)); -static inline u32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg) +static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg) { return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8); } -static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, u32 qpn) +static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn) { rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | (be32_to_cpu(rep_msg->offset12) & 0x000000FF)); } -static inline u32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg) +static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg) { return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8); } static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg, - u32 starting_psn) + __be32 starting_psn) { rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) | (be32_to_cpu(rep_msg->offset20) & 0x000000FF)); @@ -600,8 +598,8 @@ static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq) struct cm_rtu_msg { struct ib_mad_hdr hdr; - u32 local_comm_id; - u32 remote_comm_id; + __be32 local_comm_id; + __be32 remote_comm_id; u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE]; @@ -610,21 +608,21 @@ struct cm_rtu_msg { struct cm_dreq_msg { struct ib_mad_hdr hdr; - u32 local_comm_id; - u32 remote_comm_id; + __be32 local_comm_id; + __be32 remote_comm_id; /* remote QPN/EECN:24, rsvd:8 */ - u32 offset8; + __be32 offset8; u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE]; } __attribute__ ((packed)); -static inline u32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg) +static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg) { return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8); } -static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, u32 qpn) +static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn) { dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | (be32_to_cpu(dreq_msg->offset8) & 0x000000FF)); @@ -633,8 +631,8 @@ static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, u32 qpn) struct cm_drep_msg { struct ib_mad_hdr hdr; - u32 local_comm_id; - u32 remote_comm_id; + __be32 local_comm_id; + __be32 remote_comm_id; u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE]; @@ -643,37 +641,37 @@ struct cm_drep_msg { struct cm_lap_msg { struct ib_mad_hdr hdr; - u32 local_comm_id; - u32 remote_comm_id; + __be32 local_comm_id; + __be32 remote_comm_id; - u32 rsvd8; + __be32 rsvd8; /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */ - u32 offset12; - u32 rsvd16; + __be32 offset12; + __be32 rsvd16; - u16 alt_local_lid; - u16 alt_remote_lid; + __be16 alt_local_lid; + __be16 alt_remote_lid; union ib_gid alt_local_gid; union ib_gid alt_remote_gid; /* flow label:20, rsvd:4, traffic class:8 */ - u32 offset56; + __be32 offset56; u8 alt_hop_limit; /* rsvd:2, packet rate:6 */ - uint8_t offset61; + u8 offset61; /* SL:4, subnet local:1, rsvd:3 */ - uint8_t offset62; + u8 offset62; /* local ACK timeout:5, rsvd:3 */ - uint8_t offset63; + u8 offset63; u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE]; } __attribute__ ((packed)); -static inline u32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg) +static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg) { return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8); } -static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, u32 qpn) +static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn) { lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | (be32_to_cpu(lap_msg->offset12) & @@ -693,17 +691,17 @@ static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg, 0xFFFFFF07)); } -static inline u32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg) +static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg) { - return be32_to_cpu(lap_msg->offset56) >> 12; + return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12); } static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg, - u32 flow_label) + __be32 flow_label) { - lap_msg->offset56 = cpu_to_be32((flow_label << 12) | - (be32_to_cpu(lap_msg->offset56) & - 0x00000FFF)); + lap_msg->offset56 = cpu_to_be32( + (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) | + (be32_to_cpu(flow_label) << 12)); } static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg) @@ -766,8 +764,8 @@ static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg, struct cm_apr_msg { struct ib_mad_hdr hdr; - u32 local_comm_id; - u32 remote_comm_id; + __be32 local_comm_id; + __be32 remote_comm_id; u8 info_length; u8 ap_status; @@ -779,10 +777,10 @@ struct cm_apr_msg { struct cm_sidr_req_msg { struct ib_mad_hdr hdr; - u32 request_id; - u16 pkey; - u16 rsvd; - u64 service_id; + __be32 request_id; + __be16 pkey; + __be16 rsvd; + __be64 service_id; u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE]; } __attribute__ ((packed)); @@ -790,26 +788,26 @@ struct cm_sidr_req_msg { struct cm_sidr_rep_msg { struct ib_mad_hdr hdr; - u32 request_id; + __be32 request_id; u8 status; u8 info_length; - u16 rsvd; + __be16 rsvd; /* QPN:24, rsvd:8 */ - u32 offset8; - u64 service_id; - u32 qkey; + __be32 offset8; + __be64 service_id; + __be32 qkey; u8 info[IB_CM_SIDR_REP_INFO_LENGTH]; u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE]; } __attribute__ ((packed)); -static inline u32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg) +static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg) { return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8); } static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg, - u32 qpn) + __be32 qpn) { sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | (be32_to_cpu(sidr_rep_msg->offset8) & diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 797049626ff6..7ad47a4b166b 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -38,7 +38,7 @@ #include <linux/list.h> #include <linux/spinlock.h> -#include <ib_verbs.h> +#include <rdma/ib_verbs.h> int ib_device_register_sysfs(struct ib_device *device); void ib_device_unregister_sysfs(struct ib_device *device); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 9197e92d708a..d3cf84e01587 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c index 7763b31abba7..d34a6f1c4f4c 100644 --- a/drivers/infiniband/core/fmr_pool.c +++ b/drivers/infiniband/core/fmr_pool.c @@ -39,7 +39,7 @@ #include <linux/jhash.h> #include <linux/kthread.h> -#include <ib_fmr_pool.h> +#include <rdma/ib_fmr_pool.h> #include "core_priv.h" @@ -334,6 +334,7 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) { struct ib_pool_fmr *fmr; struct ib_pool_fmr *tmp; + LIST_HEAD(fmr_list); int i; kthread_stop(pool->thread); @@ -341,6 +342,11 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) i = 0; list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { + if (fmr->remap_count) { + INIT_LIST_HEAD(&fmr_list); + list_add_tail(&fmr->fmr->list, &fmr_list); + ib_unmap_fmr(&fmr_list); + } ib_dealloc_fmr(fmr->fmr); list_del(&fmr->list); kfree(fmr); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index b97e210ce9c8..a4a4d9c1eef3 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -693,7 +693,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, goto out; } - build_smp_wc(send_wr->wr_id, smp->dr_slid, send_wr->wr.ud.pkey_index, + build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid), + send_wr->wr.ud.pkey_index, send_wr->wr.ud.port_num, &mad_wc); /* No GRH for DR SMP */ @@ -1554,7 +1555,7 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, } struct ib_mad_send_wr_private* -ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid) +ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid) { struct ib_mad_send_wr_private *mad_send_wr; @@ -1597,7 +1598,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc; unsigned long flags; - u64 tid; + __be64 tid; INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); @@ -2165,7 +2166,8 @@ static void local_completions(void *data) * Defined behavior is to complete response * before request */ - build_smp_wc(local->wr_id, IB_LID_PERMISSIVE, + build_smp_wc(local->wr_id, + be16_to_cpu(IB_LID_PERMISSIVE), 0 /* pkey index */, recv_mad_agent->agent.port_num, &wc); @@ -2294,7 +2296,7 @@ static void timeout_sends(void *data) spin_unlock_irqrestore(&mad_agent_priv->lock, flags); } -static void ib_mad_thread_completion_handler(struct ib_cq *cq) +static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg) { struct ib_mad_port_private *port_priv = cq->cq_context; @@ -2574,8 +2576,7 @@ static int ib_mad_port_open(struct ib_device *device, cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2; port_priv->cq = ib_create_cq(port_priv->device, - (ib_comp_handler) - ib_mad_thread_completion_handler, + ib_mad_thread_completion_handler, NULL, port_priv, cq_size); if (IS_ERR(port_priv->cq)) { printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index 568da10b05ab..f1ba794e0daa 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h @@ -40,8 +40,8 @@ #include <linux/pci.h> #include <linux/kthread.h> #include <linux/workqueue.h> -#include <ib_mad.h> -#include <ib_smi.h> +#include <rdma/ib_mad.h> +#include <rdma/ib_smi.h> #define PFX "ib_mad: " @@ -121,7 +121,7 @@ struct ib_mad_send_wr_private { struct ib_send_wr send_wr; struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; u64 wr_id; /* client WR ID */ - u64 tid; + __be64 tid; unsigned long timeout; int retries; int retry; @@ -144,7 +144,7 @@ struct ib_mad_local_private { struct ib_send_wr send_wr; struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; u64 wr_id; /* client WR ID */ - u64 tid; + __be64 tid; }; struct ib_mad_mgmt_method_table { @@ -210,7 +210,7 @@ extern kmem_cache_t *ib_mad_cache; int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr); struct ib_mad_send_wr_private * -ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid); +ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid); void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_send_wc *mad_send_wc); diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index 8f1eb80e421f..43fd805e0265 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c @@ -61,7 +61,7 @@ struct mad_rmpp_recv { int seg_num; int newwin; - u64 tid; + __be64 tid; u32 src_qp; u16 slid; u8 mgmt_class; @@ -100,6 +100,121 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent) } } +static int data_offset(u8 mgmt_class) +{ + if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) + return offsetof(struct ib_sa_mad, data); + else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && + (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) + return offsetof(struct ib_vendor_mad, data); + else + return offsetof(struct ib_rmpp_mad, data); +} + +static void format_ack(struct ib_rmpp_mad *ack, + struct ib_rmpp_mad *data, + struct mad_rmpp_recv *rmpp_recv) +{ + unsigned long flags; + + memcpy(&ack->mad_hdr, &data->mad_hdr, + data_offset(data->mad_hdr.mgmt_class)); + + ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP; + ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK; + ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); + + spin_lock_irqsave(&rmpp_recv->lock, flags); + rmpp_recv->last_ack = rmpp_recv->seg_num; + ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num); + ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin); + spin_unlock_irqrestore(&rmpp_recv->lock, flags); +} + +static void ack_recv(struct mad_rmpp_recv *rmpp_recv, + struct ib_mad_recv_wc *recv_wc) +{ + struct ib_mad_send_buf *msg; + struct ib_send_wr *bad_send_wr; + int hdr_len, ret; + + hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); + msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, + recv_wc->wc->pkey_index, rmpp_recv->ah, 1, + hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len, + GFP_KERNEL); + if (!msg) + return; + + format_ack((struct ib_rmpp_mad *) msg->mad, + (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); + ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr, + &bad_send_wr); + if (ret) + ib_free_send_mad(msg); +} + +static int alloc_response_msg(struct ib_mad_agent *agent, + struct ib_mad_recv_wc *recv_wc, + struct ib_mad_send_buf **msg) +{ + struct ib_mad_send_buf *m; + struct ib_ah *ah; + int hdr_len; + + ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, + recv_wc->recv_buf.grh, agent->port_num); + if (IS_ERR(ah)) + return PTR_ERR(ah); + + hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); + m = ib_create_send_mad(agent, recv_wc->wc->src_qp, + recv_wc->wc->pkey_index, ah, 1, hdr_len, + sizeof(struct ib_rmpp_mad) - hdr_len, + GFP_KERNEL); + if (IS_ERR(m)) { + ib_destroy_ah(ah); + return PTR_ERR(m); + } + *msg = m; + return 0; +} + +static void free_msg(struct ib_mad_send_buf *msg) +{ + ib_destroy_ah(msg->send_wr.wr.ud.ah); + ib_free_send_mad(msg); +} + +static void nack_recv(struct ib_mad_agent_private *agent, + struct ib_mad_recv_wc *recv_wc, u8 rmpp_status) +{ + struct ib_mad_send_buf *msg; + struct ib_rmpp_mad *rmpp_mad; + struct ib_send_wr *bad_send_wr; + int ret; + + ret = alloc_response_msg(&agent->agent, recv_wc, &msg); + if (ret) + return; + + rmpp_mad = (struct ib_rmpp_mad *) msg->mad; + memcpy(rmpp_mad, recv_wc->recv_buf.mad, + data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class)); + + rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; + rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION; + rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT; + ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); + rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status; + rmpp_mad->rmpp_hdr.seg_num = 0; + rmpp_mad->rmpp_hdr.paylen_newwin = 0; + + ret = ib_post_send_mad(&agent->agent, &msg->send_wr, &bad_send_wr); + if (ret) + free_msg(msg); +} + static void recv_timeout_handler(void *data) { struct mad_rmpp_recv *rmpp_recv = data; @@ -115,8 +230,8 @@ static void recv_timeout_handler(void *data) list_del(&rmpp_recv->list); spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); - /* TODO: send abort. */ rmpp_wc = rmpp_recv->rmpp_wc; + nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L); destroy_rmpp_recv(rmpp_recv); ib_free_recv_mad(rmpp_wc); } @@ -230,60 +345,6 @@ insert_rmpp_recv(struct ib_mad_agent_private *agent, return cur_rmpp_recv; } -static int data_offset(u8 mgmt_class) -{ - if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) - return offsetof(struct ib_sa_mad, data); - else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && - (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) - return offsetof(struct ib_vendor_mad, data); - else - return offsetof(struct ib_rmpp_mad, data); -} - -static void format_ack(struct ib_rmpp_mad *ack, - struct ib_rmpp_mad *data, - struct mad_rmpp_recv *rmpp_recv) -{ - unsigned long flags; - - memcpy(&ack->mad_hdr, &data->mad_hdr, - data_offset(data->mad_hdr.mgmt_class)); - - ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP; - ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK; - ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); - - spin_lock_irqsave(&rmpp_recv->lock, flags); - rmpp_recv->last_ack = rmpp_recv->seg_num; - ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num); - ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin); - spin_unlock_irqrestore(&rmpp_recv->lock, flags); -} - -static void ack_recv(struct mad_rmpp_recv *rmpp_recv, - struct ib_mad_recv_wc *recv_wc) -{ - struct ib_mad_send_buf *msg; - struct ib_send_wr *bad_send_wr; - int hdr_len, ret; - - hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); - msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, - recv_wc->wc->pkey_index, rmpp_recv->ah, 1, - hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len, - GFP_KERNEL); - if (!msg) - return; - - format_ack((struct ib_rmpp_mad *) msg->mad, - (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); - ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr, - &bad_send_wr); - if (ret) - ib_free_send_mad(msg); -} - static inline int get_last_flag(struct ib_mad_recv_buf *seg) { struct ib_rmpp_mad *rmpp_mad; @@ -559,6 +620,34 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) return ib_send_mad(mad_send_wr); } +static void abort_send(struct ib_mad_agent_private *agent, __be64 tid, + u8 rmpp_status) +{ + struct ib_mad_send_wr_private *mad_send_wr; + struct ib_mad_send_wc wc; + unsigned long flags; + + spin_lock_irqsave(&agent->lock, flags); + mad_send_wr = ib_find_send_mad(agent, tid); + if (!mad_send_wr) + goto out; /* Unmatched send */ + + if ((mad_send_wr->last_ack == mad_send_wr->total_seg) || + (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) + goto out; /* Send is already done */ + + ib_mark_mad_done(mad_send_wr); + spin_unlock_irqrestore(&agent->lock, flags); + + wc.status = IB_WC_REM_ABORT_ERR; + wc.vendor_err = rmpp_status; + wc.wr_id = mad_send_wr->wr_id; + ib_mad_complete_send_wr(mad_send_wr, &wc); + return; +out: + spin_unlock_irqrestore(&agent->lock, flags); +} + static void process_rmpp_ack(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { @@ -568,11 +657,21 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, int seg_num, newwin, ret; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; - if (rmpp_mad->rmpp_hdr.rmpp_status) + if (rmpp_mad->rmpp_hdr.rmpp_status) { + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_BAD_STATUS); + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); return; + } seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); + if (newwin < seg_num) { + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_W2S); + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); + return; + } spin_lock_irqsave(&agent->lock, flags); mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid); @@ -583,8 +682,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) goto out; /* Send is already done */ - if (seg_num > mad_send_wr->total_seg) - goto out; /* Bad ACK */ + if (seg_num > mad_send_wr->total_seg || seg_num > mad_send_wr->newwin) { + spin_unlock_irqrestore(&agent->lock, flags); + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_S2B); + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); + return; + } if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack) goto out; /* Old ACK */ @@ -628,6 +732,72 @@ out: spin_unlock_irqrestore(&agent->lock, flags); } +static struct ib_mad_recv_wc * +process_rmpp_data(struct ib_mad_agent_private *agent, + struct ib_mad_recv_wc *mad_recv_wc) +{ + struct ib_rmpp_hdr *rmpp_hdr; + u8 rmpp_status; + + rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr; + + if (rmpp_hdr->rmpp_status) { + rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS; + goto bad; + } + + if (rmpp_hdr->seg_num == __constant_htonl(1)) { + if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) { + rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; + goto bad; + } + return start_rmpp(agent, mad_recv_wc); + } else { + if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) { + rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; + goto bad; + } + return continue_rmpp(agent, mad_recv_wc); + } +bad: + nack_recv(agent, mad_recv_wc, rmpp_status); + ib_free_recv_mad(mad_recv_wc); + return NULL; +} + +static void process_rmpp_stop(struct ib_mad_agent_private *agent, + struct ib_mad_recv_wc *mad_recv_wc) +{ + struct ib_rmpp_mad *rmpp_mad; + + rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; + + if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) { + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_BAD_STATUS); + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); + } else + abort_send(agent, rmpp_mad->mad_hdr.tid, + rmpp_mad->rmpp_hdr.rmpp_status); +} + +static void process_rmpp_abort(struct ib_mad_agent_private *agent, + struct ib_mad_recv_wc *mad_recv_wc) +{ + struct ib_rmpp_mad *rmpp_mad; + + rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; + + if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN || + rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) { + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_BAD_STATUS); + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); + } else + abort_send(agent, rmpp_mad->mad_hdr.tid, + rmpp_mad->rmpp_hdr.rmpp_status); +} + struct ib_mad_recv_wc * ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) @@ -638,23 +808,29 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE)) return mad_recv_wc; - if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) + if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) { + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_UNV); + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); goto out; + } switch (rmpp_mad->rmpp_hdr.rmpp_type) { case IB_MGMT_RMPP_TYPE_DATA: - if (rmpp_mad->rmpp_hdr.seg_num == __constant_htonl(1)) - return start_rmpp(agent, mad_recv_wc); - else - return continue_rmpp(agent, mad_recv_wc); + return process_rmpp_data(agent, mad_recv_wc); case IB_MGMT_RMPP_TYPE_ACK: process_rmpp_ack(agent, mad_recv_wc); break; case IB_MGMT_RMPP_TYPE_STOP: + process_rmpp_stop(agent, mad_recv_wc); + break; case IB_MGMT_RMPP_TYPE_ABORT: - /* TODO: process_rmpp_nack(agent, mad_recv_wc); */ + process_rmpp_abort(agent, mad_recv_wc); break; default: + abort_send(agent, rmpp_mad->mad_hdr.tid, + IB_MGMT_RMPP_STATUS_BADT); + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); break; } out: @@ -714,7 +890,10 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { msg = (struct ib_mad_send_buf *) (unsigned long) mad_send_wc->wr_id; - ib_free_send_mad(msg); + if (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_ACK) + ib_free_send_mad(msg); + else + free_msg(msg); return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ } diff --git a/drivers/infiniband/core/packer.c b/drivers/infiniband/core/packer.c index eb5ff54c10d7..35df5010e723 100644 --- a/drivers/infiniband/core/packer.c +++ b/drivers/infiniband/core/packer.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -32,7 +33,7 @@ * $Id: packer.c 1349 2004-12-16 21:09:43Z roland $ */ -#include <ib_pack.h> +#include <rdma/ib_pack.h> static u64 value_read(int offset, int size, void *structure) { diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 795184931c83..126ac80db7b8 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. - * Copyright (c) 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -44,8 +44,8 @@ #include <linux/kref.h> #include <linux/idr.h> -#include <ib_pack.h> -#include <ib_sa.h> +#include <rdma/ib_pack.h> +#include <rdma/ib_sa.h> MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("InfiniBand subnet administration query support"); diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c index b4b284324a33..35852e794e26 100644 --- a/drivers/infiniband/core/smi.c +++ b/drivers/infiniband/core/smi.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. - * Copyright (c) 2004 Infinicon Corporation. All rights reserved. - * Copyright (c) 2004 Intel Corporation. All rights reserved. - * Copyright (c) 2004 Topspin Corporation. All rights reserved. - * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -36,7 +37,7 @@ * $Id: smi.c 1389 2004-12-27 22:56:47Z roland $ */ -#include <ib_smi.h> +#include <rdma/ib_smi.h> #include "smi.h" /* diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 90d51b179abe..fae1c2dcee51 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -1,5 +1,7 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -34,7 +36,7 @@ #include "core_priv.h" -#include <ib_mad.h> +#include <rdma/ib_mad.h> struct ib_port { struct kobject kobj; @@ -253,14 +255,14 @@ static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr, return ret; return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", - be16_to_cpu(((u16 *) gid.raw)[0]), - be16_to_cpu(((u16 *) gid.raw)[1]), - be16_to_cpu(((u16 *) gid.raw)[2]), - be16_to_cpu(((u16 *) gid.raw)[3]), - be16_to_cpu(((u16 *) gid.raw)[4]), - be16_to_cpu(((u16 *) gid.raw)[5]), - be16_to_cpu(((u16 *) gid.raw)[6]), - be16_to_cpu(((u16 *) gid.raw)[7])); + be16_to_cpu(((__be16 *) gid.raw)[0]), + be16_to_cpu(((__be16 *) gid.raw)[1]), + be16_to_cpu(((__be16 *) gid.raw)[2]), + be16_to_cpu(((__be16 *) gid.raw)[3]), + be16_to_cpu(((__be16 *) gid.raw)[4]), + be16_to_cpu(((__be16 *) gid.raw)[5]), + be16_to_cpu(((__be16 *) gid.raw)[6]), + be16_to_cpu(((__be16 *) gid.raw)[7])); } static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr, @@ -332,11 +334,11 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr, break; case 16: ret = sprintf(buf, "%u\n", - be16_to_cpup((u16 *)(out_mad->data + 40 + offset / 8))); + be16_to_cpup((__be16 *)(out_mad->data + 40 + offset / 8))); break; case 32: ret = sprintf(buf, "%u\n", - be32_to_cpup((u32 *)(out_mad->data + 40 + offset / 8))); + be32_to_cpup((__be32 *)(out_mad->data + 40 + offset / 8))); break; default: ret = 0; @@ -598,10 +600,10 @@ static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf) return ret; return sprintf(buf, "%04x:%04x:%04x:%04x\n", - be16_to_cpu(((u16 *) &attr.sys_image_guid)[0]), - be16_to_cpu(((u16 *) &attr.sys_image_guid)[1]), - be16_to_cpu(((u16 *) &attr.sys_image_guid)[2]), - be16_to_cpu(((u16 *) &attr.sys_image_guid)[3])); + be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]), + be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]), + be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]), + be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3])); } static ssize_t show_node_guid(struct class_device *cdev, char *buf) @@ -615,10 +617,10 @@ static ssize_t show_node_guid(struct class_device *cdev, char *buf) return ret; return sprintf(buf, "%04x:%04x:%04x:%04x\n", - be16_to_cpu(((u16 *) &attr.node_guid)[0]), - be16_to_cpu(((u16 *) &attr.node_guid)[1]), - be16_to_cpu(((u16 *) &attr.node_guid)[2]), - be16_to_cpu(((u16 *) &attr.node_guid)[3])); + be16_to_cpu(((__be16 *) &attr.node_guid)[0]), + be16_to_cpu(((__be16 *) &attr.node_guid)[1]), + be16_to_cpu(((__be16 *) &attr.node_guid)[2]), + be16_to_cpu(((__be16 *) &attr.node_guid)[3])); } static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL); diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 61d07c732f49..79595826ccc7 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -73,14 +74,18 @@ static struct semaphore ctx_id_mutex; static struct idr ctx_id_table; static int ctx_id_rover = 0; -static struct ib_ucm_context *ib_ucm_ctx_get(int id) +static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id) { struct ib_ucm_context *ctx; down(&ctx_id_mutex); ctx = idr_find(&ctx_id_table, id); - if (ctx) - ctx->ref++; + if (!ctx) + ctx = ERR_PTR(-ENOENT); + else if (ctx->file != file) + ctx = ERR_PTR(-EINVAL); + else + atomic_inc(&ctx->ref); up(&ctx_id_mutex); return ctx; @@ -88,21 +93,37 @@ static struct ib_ucm_context *ib_ucm_ctx_get(int id) static void ib_ucm_ctx_put(struct ib_ucm_context *ctx) { + if (atomic_dec_and_test(&ctx->ref)) + wake_up(&ctx->wait); +} + +static ssize_t ib_ucm_destroy_ctx(struct ib_ucm_file *file, int id) +{ + struct ib_ucm_context *ctx; struct ib_ucm_event *uevent; down(&ctx_id_mutex); - - ctx->ref--; - if (!ctx->ref) + ctx = idr_find(&ctx_id_table, id); + if (!ctx) + ctx = ERR_PTR(-ENOENT); + else if (ctx->file != file) + ctx = ERR_PTR(-EINVAL); + else idr_remove(&ctx_id_table, ctx->id); - up(&ctx_id_mutex); - if (ctx->ref) - return; + if (IS_ERR(ctx)) + return PTR_ERR(ctx); - down(&ctx->file->mutex); + atomic_dec(&ctx->ref); + wait_event(ctx->wait, !atomic_read(&ctx->ref)); + + /* No new events will be generated after destroying the cm_id. */ + if (!IS_ERR(ctx->cm_id)) + ib_destroy_cm_id(ctx->cm_id); + /* Cleanup events not yet reported to the user. */ + down(&file->mutex); list_del(&ctx->file_list); while (!list_empty(&ctx->events)) { @@ -117,13 +138,10 @@ static void ib_ucm_ctx_put(struct ib_ucm_context *ctx) kfree(uevent); } + up(&file->mutex); - up(&ctx->file->mutex); - - ucm_dbg("Destroyed CM ID <%d>\n", ctx->id); - - ib_destroy_cm_id(ctx->cm_id); kfree(ctx); + return 0; } static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) @@ -135,11 +153,11 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) if (!ctx) return NULL; - ctx->ref = 1; /* user reference */ + atomic_set(&ctx->ref, 1); + init_waitqueue_head(&ctx->wait); ctx->file = file; INIT_LIST_HEAD(&ctx->events); - init_MUTEX(&ctx->mutex); list_add_tail(&ctx->file_list, &file->ctxs); @@ -177,8 +195,8 @@ static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath, if (!kpath || !upath) return; - memcpy(upath->dgid, kpath->dgid.raw, sizeof(union ib_gid)); - memcpy(upath->sgid, kpath->sgid.raw, sizeof(union ib_gid)); + memcpy(upath->dgid, kpath->dgid.raw, sizeof *upath->dgid); + memcpy(upath->sgid, kpath->sgid.raw, sizeof *upath->sgid); upath->dlid = kpath->dlid; upath->slid = kpath->slid; @@ -201,10 +219,11 @@ static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath, kpath->packet_life_time_selector; } -static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq, +static void ib_ucm_event_req_get(struct ib_ucm_context *ctx, + struct ib_ucm_req_event_resp *ureq, struct ib_cm_req_event_param *kreq) { - ureq->listen_id = (long)kreq->listen_id->context; + ureq->listen_id = ctx->id; ureq->remote_ca_guid = kreq->remote_ca_guid; ureq->remote_qkey = kreq->remote_qkey; @@ -240,34 +259,11 @@ static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep, urep->srq = krep->srq; } -static void ib_ucm_event_rej_get(struct ib_ucm_rej_event_resp *urej, - struct ib_cm_rej_event_param *krej) -{ - urej->reason = krej->reason; -} - -static void ib_ucm_event_mra_get(struct ib_ucm_mra_event_resp *umra, - struct ib_cm_mra_event_param *kmra) -{ - umra->timeout = kmra->service_timeout; -} - -static void ib_ucm_event_lap_get(struct ib_ucm_lap_event_resp *ulap, - struct ib_cm_lap_event_param *klap) -{ - ib_ucm_event_path_get(&ulap->path, klap->alternate_path); -} - -static void ib_ucm_event_apr_get(struct ib_ucm_apr_event_resp *uapr, - struct ib_cm_apr_event_param *kapr) -{ - uapr->status = kapr->ap_status; -} - -static void ib_ucm_event_sidr_req_get(struct ib_ucm_sidr_req_event_resp *ureq, +static void ib_ucm_event_sidr_req_get(struct ib_ucm_context *ctx, + struct ib_ucm_sidr_req_event_resp *ureq, struct ib_cm_sidr_req_event_param *kreq) { - ureq->listen_id = (long)kreq->listen_id->context; + ureq->listen_id = ctx->id; ureq->pkey = kreq->pkey; } @@ -279,19 +275,18 @@ static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep, urep->qpn = krep->qpn; }; -static int ib_ucm_event_process(struct ib_cm_event *evt, +static int ib_ucm_event_process(struct ib_ucm_context *ctx, + struct ib_cm_event *evt, struct ib_ucm_event *uvt) { void *info = NULL; - int result; switch (evt->event) { case IB_CM_REQ_RECEIVED: - ib_ucm_event_req_get(&uvt->resp.u.req_resp, + ib_ucm_event_req_get(ctx, &uvt->resp.u.req_resp, &evt->param.req_rcvd); uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE; - uvt->resp.present |= (evt->param.req_rcvd.primary_path ? - IB_UCM_PRES_PRIMARY : 0); + uvt->resp.present = IB_UCM_PRES_PRIMARY; uvt->resp.present |= (evt->param.req_rcvd.alternate_path ? IB_UCM_PRES_ALTERNATE : 0); break; @@ -299,57 +294,46 @@ static int ib_ucm_event_process(struct ib_cm_event *evt, ib_ucm_event_rep_get(&uvt->resp.u.rep_resp, &evt->param.rep_rcvd); uvt->data_len = IB_CM_REP_PRIVATE_DATA_SIZE; - break; case IB_CM_RTU_RECEIVED: uvt->data_len = IB_CM_RTU_PRIVATE_DATA_SIZE; uvt->resp.u.send_status = evt->param.send_status; - break; case IB_CM_DREQ_RECEIVED: uvt->data_len = IB_CM_DREQ_PRIVATE_DATA_SIZE; uvt->resp.u.send_status = evt->param.send_status; - break; case IB_CM_DREP_RECEIVED: uvt->data_len = IB_CM_DREP_PRIVATE_DATA_SIZE; uvt->resp.u.send_status = evt->param.send_status; - break; case IB_CM_MRA_RECEIVED: - ib_ucm_event_mra_get(&uvt->resp.u.mra_resp, - &evt->param.mra_rcvd); + uvt->resp.u.mra_resp.timeout = + evt->param.mra_rcvd.service_timeout; uvt->data_len = IB_CM_MRA_PRIVATE_DATA_SIZE; - break; case IB_CM_REJ_RECEIVED: - ib_ucm_event_rej_get(&uvt->resp.u.rej_resp, - &evt->param.rej_rcvd); + uvt->resp.u.rej_resp.reason = evt->param.rej_rcvd.reason; uvt->data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; uvt->info_len = evt->param.rej_rcvd.ari_length; info = evt->param.rej_rcvd.ari; - break; case IB_CM_LAP_RECEIVED: - ib_ucm_event_lap_get(&uvt->resp.u.lap_resp, - &evt->param.lap_rcvd); + ib_ucm_event_path_get(&uvt->resp.u.lap_resp.path, + evt->param.lap_rcvd.alternate_path); uvt->data_len = IB_CM_LAP_PRIVATE_DATA_SIZE; - uvt->resp.present |= (evt->param.lap_rcvd.alternate_path ? - IB_UCM_PRES_ALTERNATE : 0); + uvt->resp.present = IB_UCM_PRES_ALTERNATE; break; case IB_CM_APR_RECEIVED: - ib_ucm_event_apr_get(&uvt->resp.u.apr_resp, - &evt->param.apr_rcvd); + uvt->resp.u.apr_resp.status = evt->param.apr_rcvd.ap_status; uvt->data_len = IB_CM_APR_PRIVATE_DATA_SIZE; uvt->info_len = evt->param.apr_rcvd.info_len; info = evt->param.apr_rcvd.apr_info; - break; case IB_CM_SIDR_REQ_RECEIVED: - ib_ucm_event_sidr_req_get(&uvt->resp.u.sidr_req_resp, + ib_ucm_event_sidr_req_get(ctx, &uvt->resp.u.sidr_req_resp, &evt->param.sidr_req_rcvd); uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE; - break; case IB_CM_SIDR_REP_RECEIVED: ib_ucm_event_sidr_rep_get(&uvt->resp.u.sidr_rep_resp, @@ -357,43 +341,35 @@ static int ib_ucm_event_process(struct ib_cm_event *evt, uvt->data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; uvt->info_len = evt->param.sidr_rep_rcvd.info_len; info = evt->param.sidr_rep_rcvd.info; - break; default: uvt->resp.u.send_status = evt->param.send_status; - break; } - if (uvt->data_len && evt->private_data) { - + if (uvt->data_len) { uvt->data = kmalloc(uvt->data_len, GFP_KERNEL); - if (!uvt->data) { - result = -ENOMEM; - goto error; - } + if (!uvt->data) + goto err1; memcpy(uvt->data, evt->private_data, uvt->data_len); uvt->resp.present |= IB_UCM_PRES_DATA; } - if (uvt->info_len && info) { - + if (uvt->info_len) { uvt->info = kmalloc(uvt->info_len, GFP_KERNEL); - if (!uvt->info) { - result = -ENOMEM; - goto error; - } + if (!uvt->info) + goto err2; memcpy(uvt->info, info, uvt->info_len); uvt->resp.present |= IB_UCM_PRES_INFO; } - return 0; -error: - kfree(uvt->info); + +err2: kfree(uvt->data); - return result; +err1: + return -ENOMEM; } static int ib_ucm_event_handler(struct ib_cm_id *cm_id, @@ -403,63 +379,42 @@ static int ib_ucm_event_handler(struct ib_cm_id *cm_id, struct ib_ucm_context *ctx; int result = 0; int id; - /* - * lookup correct context based on event type. - */ - switch (event->event) { - case IB_CM_REQ_RECEIVED: - id = (long)event->param.req_rcvd.listen_id->context; - break; - case IB_CM_SIDR_REQ_RECEIVED: - id = (long)event->param.sidr_req_rcvd.listen_id->context; - break; - default: - id = (long)cm_id->context; - break; - } - ucm_dbg("Event. CM ID <%d> event <%d>\n", id, event->event); - - ctx = ib_ucm_ctx_get(id); - if (!ctx) - return -ENOENT; + ctx = cm_id->context; if (event->event == IB_CM_REQ_RECEIVED || event->event == IB_CM_SIDR_REQ_RECEIVED) id = IB_UCM_CM_ID_INVALID; + else + id = ctx->id; uevent = kmalloc(sizeof(*uevent), GFP_KERNEL); - if (!uevent) { - result = -ENOMEM; - goto done; - } + if (!uevent) + goto err1; memset(uevent, 0, sizeof(*uevent)); - uevent->resp.id = id; uevent->resp.event = event->event; - result = ib_ucm_event_process(event, uevent); + result = ib_ucm_event_process(ctx, event, uevent); if (result) - goto done; + goto err2; uevent->ctx = ctx; - uevent->cm_id = ((event->event == IB_CM_REQ_RECEIVED || - event->event == IB_CM_SIDR_REQ_RECEIVED ) ? - cm_id : NULL); + uevent->cm_id = (id == IB_UCM_CM_ID_INVALID) ? cm_id : NULL; down(&ctx->file->mutex); - list_add_tail(&uevent->file_list, &ctx->file->events); list_add_tail(&uevent->ctx_list, &ctx->events); - wake_up_interruptible(&ctx->file->poll_wait); - up(&ctx->file->mutex); -done: - ctx->error = result; - ib_ucm_ctx_put(ctx); /* func reference */ - return result; + return 0; + +err2: + kfree(uevent); +err1: + /* Destroy new cm_id's */ + return (id == IB_UCM_CM_ID_INVALID); } static ssize_t ib_ucm_event(struct ib_ucm_file *file, @@ -517,9 +472,8 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file, goto done; } - ctx->cm_id = uevent->cm_id; - ctx->cm_id->cm_handler = ib_ucm_event_handler; - ctx->cm_id->context = (void *)(unsigned long)ctx->id; + ctx->cm_id = uevent->cm_id; + ctx->cm_id->context = ctx; uevent->resp.id = ctx->id; @@ -585,30 +539,29 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + down(&file->mutex); ctx = ib_ucm_ctx_alloc(file); + up(&file->mutex); if (!ctx) return -ENOMEM; - ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, - (void *)(unsigned long)ctx->id); - if (!ctx->cm_id) { - result = -ENOMEM; - goto err_cm; + ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, ctx); + if (IS_ERR(ctx->cm_id)) { + result = PTR_ERR(ctx->cm_id); + goto err; } resp.id = ctx->id; if (copy_to_user((void __user *)(unsigned long)cmd.response, &resp, sizeof(resp))) { result = -EFAULT; - goto err_ret; + goto err; } return 0; -err_ret: - ib_destroy_cm_id(ctx->cm_id); -err_cm: - ib_ucm_ctx_put(ctx); /* user reference */ +err: + ib_ucm_destroy_ctx(file, ctx->id); return result; } @@ -617,19 +570,11 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file, int in_len, int out_len) { struct ib_ucm_destroy_id cmd; - struct ib_ucm_context *ctx; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) - return -ENOENT; - - ib_ucm_ctx_put(ctx); /* user reference */ - ib_ucm_ctx_put(ctx); /* func reference */ - - return 0; + return ib_ucm_destroy_ctx(file, cmd.id); } static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file, @@ -647,15 +592,9 @@ static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) - return -ENOENT; - - down(&ctx->file->mutex); - if (ctx->file != file) { - result = -EINVAL; - goto done; - } + ctx = ib_ucm_ctx_get(file, cmd.id); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); resp.service_id = ctx->cm_id->service_id; resp.service_mask = ctx->cm_id->service_mask; @@ -666,9 +605,7 @@ static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file, &resp, sizeof(resp))) result = -EFAULT; -done: - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ + ib_ucm_ctx_put(ctx); return result; } @@ -683,19 +620,12 @@ static ssize_t ib_ucm_listen(struct ib_ucm_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) - return -ENOENT; + ctx = ib_ucm_ctx_get(file, cmd.id); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else - result = ib_cm_listen(ctx->cm_id, cmd.service_id, - cmd.service_mask); - - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ + result = ib_cm_listen(ctx->cm_id, cmd.service_id, cmd.service_mask); + ib_ucm_ctx_put(ctx); return result; } @@ -710,18 +640,12 @@ static ssize_t ib_ucm_establish(struct ib_ucm_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) - return -ENOENT; - - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else - result = ib_cm_establish(ctx->cm_id); + ctx = ib_ucm_ctx_get(file, cmd.id); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ + result = ib_cm_establish(ctx->cm_id); + ib_ucm_ctx_put(ctx); return result; } @@ -768,8 +692,8 @@ static int ib_ucm_path_get(struct ib_sa_path_rec **path, u64 src) return -EFAULT; } - memcpy(sa_path->dgid.raw, ucm_path.dgid, sizeof(union ib_gid)); - memcpy(sa_path->sgid.raw, ucm_path.sgid, sizeof(union ib_gid)); + memcpy(sa_path->dgid.raw, ucm_path.dgid, sizeof sa_path->dgid); + memcpy(sa_path->sgid.raw, ucm_path.sgid, sizeof sa_path->sgid); sa_path->dlid = ucm_path.dlid; sa_path->slid = ucm_path.slid; @@ -839,25 +763,17 @@ static ssize_t ib_ucm_send_req(struct ib_ucm_file *file, param.max_cm_retries = cmd.max_cm_retries; param.srq = cmd.srq; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) { - result = -ENOENT; - goto done; - } - - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else + ctx = ib_ucm_ctx_get(file, cmd.id); + if (!IS_ERR(ctx)) { result = ib_send_cm_req(ctx->cm_id, ¶m); + ib_ucm_ctx_put(ctx); + } else + result = PTR_ERR(ctx); - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ done: kfree(param.private_data); kfree(param.primary_path); kfree(param.alternate_path); - return result; } @@ -890,23 +806,14 @@ static ssize_t ib_ucm_send_rep(struct ib_ucm_file *file, param.rnr_retry_count = cmd.rnr_retry_count; param.srq = cmd.srq; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) { - result = -ENOENT; - goto done; - } - - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else + ctx = ib_ucm_ctx_get(file, cmd.id); + if (!IS_ERR(ctx)) { result = ib_send_cm_rep(ctx->cm_id, ¶m); + ib_ucm_ctx_put(ctx); + } else + result = PTR_ERR(ctx); - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ -done: kfree(param.private_data); - return result; } @@ -928,23 +835,14 @@ static ssize_t ib_ucm_send_private_data(struct ib_ucm_file *file, if (result) return result; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) { - result = -ENOENT; - goto done; - } - - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else + ctx = ib_ucm_ctx_get(file, cmd.id); + if (!IS_ERR(ctx)) { result = func(ctx->cm_id, private_data, cmd.len); + ib_ucm_ctx_put(ctx); + } else + result = PTR_ERR(ctx); - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ -done: kfree(private_data); - return result; } @@ -995,26 +893,17 @@ static ssize_t ib_ucm_send_info(struct ib_ucm_file *file, if (result) goto done; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) { - result = -ENOENT; - goto done; - } - - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else - result = func(ctx->cm_id, cmd.status, - info, cmd.info_len, + ctx = ib_ucm_ctx_get(file, cmd.id); + if (!IS_ERR(ctx)) { + result = func(ctx->cm_id, cmd.status, info, cmd.info_len, data, cmd.data_len); + ib_ucm_ctx_put(ctx); + } else + result = PTR_ERR(ctx); - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ done: kfree(data); kfree(info); - return result; } @@ -1048,24 +937,14 @@ static ssize_t ib_ucm_send_mra(struct ib_ucm_file *file, if (result) return result; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) { - result = -ENOENT; - goto done; - } + ctx = ib_ucm_ctx_get(file, cmd.id); + if (!IS_ERR(ctx)) { + result = ib_send_cm_mra(ctx->cm_id, cmd.timeout, data, cmd.len); + ib_ucm_ctx_put(ctx); + } else + result = PTR_ERR(ctx); - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else - result = ib_send_cm_mra(ctx->cm_id, cmd.timeout, - data, cmd.len); - - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ -done: kfree(data); - return result; } @@ -1090,24 +969,16 @@ static ssize_t ib_ucm_send_lap(struct ib_ucm_file *file, if (result) goto done; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) { - result = -ENOENT; - goto done; - } - - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else + ctx = ib_ucm_ctx_get(file, cmd.id); + if (!IS_ERR(ctx)) { result = ib_send_cm_lap(ctx->cm_id, path, data, cmd.len); + ib_ucm_ctx_put(ctx); + } else + result = PTR_ERR(ctx); - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ done: kfree(data); kfree(path); - return result; } @@ -1140,24 +1011,16 @@ static ssize_t ib_ucm_send_sidr_req(struct ib_ucm_file *file, param.max_cm_retries = cmd.max_cm_retries; param.pkey = cmd.pkey; - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) { - result = -ENOENT; - goto done; - } - - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else + ctx = ib_ucm_ctx_get(file, cmd.id); + if (!IS_ERR(ctx)) { result = ib_send_cm_sidr_req(ctx->cm_id, ¶m); + ib_ucm_ctx_put(ctx); + } else + result = PTR_ERR(ctx); - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ done: kfree(param.private_data); kfree(param.path); - return result; } @@ -1184,30 +1047,22 @@ static ssize_t ib_ucm_send_sidr_rep(struct ib_ucm_file *file, if (result) goto done; - param.qp_num = cmd.qpn; - param.qkey = cmd.qkey; - param.status = cmd.status; - param.info_length = cmd.info_len; - param.private_data_len = cmd.data_len; - - ctx = ib_ucm_ctx_get(cmd.id); - if (!ctx) { - result = -ENOENT; - goto done; - } + param.qp_num = cmd.qpn; + param.qkey = cmd.qkey; + param.status = cmd.status; + param.info_length = cmd.info_len; + param.private_data_len = cmd.data_len; - down(&ctx->file->mutex); - if (ctx->file != file) - result = -EINVAL; - else + ctx = ib_ucm_ctx_get(file, cmd.id); + if (!IS_ERR(ctx)) { result = ib_send_cm_sidr_rep(ctx->cm_id, ¶m); + ib_ucm_ctx_put(ctx); + } else + result = PTR_ERR(ctx); - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* func reference */ done: kfree(param.private_data); kfree(param.info); - return result; } @@ -1305,22 +1160,17 @@ static int ib_ucm_close(struct inode *inode, struct file *filp) struct ib_ucm_context *ctx; down(&file->mutex); - while (!list_empty(&file->ctxs)) { ctx = list_entry(file->ctxs.next, struct ib_ucm_context, file_list); - up(&ctx->file->mutex); - ib_ucm_ctx_put(ctx); /* user reference */ + up(&file->mutex); + ib_ucm_destroy_ctx(file, ctx->id); down(&file->mutex); } - up(&file->mutex); - kfree(file); - - ucm_dbg("Deleted struct\n"); return 0; } diff --git a/drivers/infiniband/core/ucm.h b/drivers/infiniband/core/ucm.h index 6d36606151b2..c8819b928a1b 100644 --- a/drivers/infiniband/core/ucm.h +++ b/drivers/infiniband/core/ucm.h @@ -40,17 +40,15 @@ #include <linux/cdev.h> #include <linux/idr.h> -#include <ib_cm.h> -#include <ib_user_cm.h> +#include <rdma/ib_cm.h> +#include <rdma/ib_user_cm.h> #define IB_UCM_CM_ID_INVALID 0xffffffff struct ib_ucm_file { struct semaphore mutex; struct file *filp; - /* - * list of pending events - */ + struct list_head ctxs; /* list of active connections */ struct list_head events; /* list of pending events */ wait_queue_head_t poll_wait; @@ -58,12 +56,11 @@ struct ib_ucm_file { struct ib_ucm_context { int id; - int ref; - int error; + wait_queue_head_t wait; + atomic_t ref; struct ib_ucm_file *file; struct ib_cm_id *cm_id; - struct semaphore mutex; struct list_head events; /* list of pending events. */ struct list_head file_list; /* member in file ctx list */ diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c index dc4eb1db5e96..527b23450ab3 100644 --- a/drivers/infiniband/core/ud_header.c +++ b/drivers/infiniband/core/ud_header.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -34,7 +35,7 @@ #include <linux/errno.h> -#include <ib_pack.h> +#include <rdma/ib_pack.h> #define STRUCT_FIELD(header, field) \ .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ @@ -194,6 +195,7 @@ void ib_ud_header_init(int payload_bytes, struct ib_ud_header *header) { int header_len; + u16 packet_length; memset(header, 0, sizeof *header); @@ -208,7 +210,7 @@ void ib_ud_header_init(int payload_bytes, header->lrh.link_version = 0; header->lrh.link_next_header = grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL; - header->lrh.packet_length = (IB_LRH_BYTES + + packet_length = (IB_LRH_BYTES + IB_BTH_BYTES + IB_DETH_BYTES + payload_bytes + @@ -217,8 +219,7 @@ void ib_ud_header_init(int payload_bytes, header->grh_present = grh_present; if (grh_present) { - header->lrh.packet_length += IB_GRH_BYTES / 4; - + packet_length += IB_GRH_BYTES / 4; header->grh.ip_version = 6; header->grh.payload_length = cpu_to_be16((IB_BTH_BYTES + @@ -229,7 +230,7 @@ void ib_ud_header_init(int payload_bytes, header->grh.next_header = 0x1b; } - cpu_to_be16s(&header->lrh.packet_length); + header->lrh.packet_length = cpu_to_be16(packet_length); if (header->immediate_present) header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 2e38792df533..7c2f03057ddb 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. - * Copyright (c) 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -49,8 +49,8 @@ #include <asm/uaccess.h> #include <asm/semaphore.h> -#include <ib_mad.h> -#include <ib_user_mad.h> +#include <rdma/ib_mad.h> +#include <rdma/ib_user_mad.h> MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("InfiniBand userspace MAD packet access"); @@ -271,7 +271,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, struct ib_send_wr *bad_wr; struct ib_rmpp_mad *rmpp_mad; u8 method; - u64 *tid; + __be64 *tid; int ret, length, hdr_len, data_len, rmpp_hdr_size; int rmpp_active = 0; @@ -316,7 +316,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, if (packet->mad.hdr.grh_present) { ah_attr.ah_flags = IB_AH_GRH; memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); - ah_attr.grh.flow_label = packet->mad.hdr.flow_label; + ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label); ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; } diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index 7696022f9a4e..180b3d4765e4 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -1,6 +1,8 @@ /* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -43,8 +45,8 @@ #include <linux/kref.h> #include <linux/idr.h> -#include <ib_verbs.h> -#include <ib_user_verbs.h> +#include <rdma/ib_verbs.h> +#include <rdma/ib_user_verbs.h> struct ib_uverbs_device { int devnum; @@ -97,10 +99,12 @@ extern struct idr ib_uverbs_mw_idr; extern struct idr ib_uverbs_ah_idr; extern struct idr ib_uverbs_cq_idr; extern struct idr ib_uverbs_qp_idr; +extern struct idr ib_uverbs_srq_idr; void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context); void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr); void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr); +void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); int ib_umem_get(struct ib_device *dev, struct ib_umem *mem, void *addr, size_t size, int write); @@ -129,5 +133,8 @@ IB_UVERBS_DECLARE_CMD(modify_qp); IB_UVERBS_DECLARE_CMD(destroy_qp); IB_UVERBS_DECLARE_CMD(attach_mcast); IB_UVERBS_DECLARE_CMD(detach_mcast); +IB_UVERBS_DECLARE_CMD(create_srq); +IB_UVERBS_DECLARE_CMD(modify_srq); +IB_UVERBS_DECLARE_CMD(destroy_srq); #endif /* UVERBS_H */ diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 5f2bbcda4c73..ebccf9f38af9 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -724,6 +724,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, struct ib_uobject *uobj; struct ib_pd *pd; struct ib_cq *scq, *rcq; + struct ib_srq *srq; struct ib_qp *qp; struct ib_qp_init_attr attr; int ret; @@ -747,10 +748,12 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle); rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle); + srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL; if (!pd || pd->uobject->context != file->ucontext || !scq || scq->uobject->context != file->ucontext || - !rcq || rcq->uobject->context != file->ucontext) { + !rcq || rcq->uobject->context != file->ucontext || + (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) { ret = -EINVAL; goto err_up; } @@ -759,7 +762,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, attr.qp_context = file; attr.send_cq = scq; attr.recv_cq = rcq; - attr.srq = NULL; + attr.srq = srq; attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; attr.qp_type = cmd.qp_type; @@ -1004,3 +1007,178 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, return ret ? ret : in_len; } + +ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, + const char __user *buf, int in_len, + int out_len) +{ + struct ib_uverbs_create_srq cmd; + struct ib_uverbs_create_srq_resp resp; + struct ib_udata udata; + struct ib_uobject *uobj; + struct ib_pd *pd; + struct ib_srq *srq; + struct ib_srq_init_attr attr; + int ret; + + if (out_len < sizeof resp) + return -ENOSPC; + + if (copy_from_user(&cmd, buf, sizeof cmd)) + return -EFAULT; + + INIT_UDATA(&udata, buf + sizeof cmd, + (unsigned long) cmd.response + sizeof resp, + in_len - sizeof cmd, out_len - sizeof resp); + + uobj = kmalloc(sizeof *uobj, GFP_KERNEL); + if (!uobj) + return -ENOMEM; + + down(&ib_uverbs_idr_mutex); + + pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); + + if (!pd || pd->uobject->context != file->ucontext) { + ret = -EINVAL; + goto err_up; + } + + attr.event_handler = ib_uverbs_srq_event_handler; + attr.srq_context = file; + attr.attr.max_wr = cmd.max_wr; + attr.attr.max_sge = cmd.max_sge; + attr.attr.srq_limit = cmd.srq_limit; + + uobj->user_handle = cmd.user_handle; + uobj->context = file->ucontext; + + srq = pd->device->create_srq(pd, &attr, &udata); + if (IS_ERR(srq)) { + ret = PTR_ERR(srq); + goto err_up; + } + + srq->device = pd->device; + srq->pd = pd; + srq->uobject = uobj; + srq->event_handler = attr.event_handler; + srq->srq_context = attr.srq_context; + atomic_inc(&pd->usecnt); + atomic_set(&srq->usecnt, 0); + + memset(&resp, 0, sizeof resp); + +retry: + if (!idr_pre_get(&ib_uverbs_srq_idr, GFP_KERNEL)) { + ret = -ENOMEM; + goto err_destroy; + } + + ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->id); + + if (ret == -EAGAIN) + goto retry; + if (ret) + goto err_destroy; + + resp.srq_handle = uobj->id; + + spin_lock_irq(&file->ucontext->lock); + list_add_tail(&uobj->list, &file->ucontext->srq_list); + spin_unlock_irq(&file->ucontext->lock); + + if (copy_to_user((void __user *) (unsigned long) cmd.response, + &resp, sizeof resp)) { + ret = -EFAULT; + goto err_list; + } + + up(&ib_uverbs_idr_mutex); + + return in_len; + +err_list: + spin_lock_irq(&file->ucontext->lock); + list_del(&uobj->list); + spin_unlock_irq(&file->ucontext->lock); + +err_destroy: + ib_destroy_srq(srq); + +err_up: + up(&ib_uverbs_idr_mutex); + + kfree(uobj); + return ret; +} + +ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, + const char __user *buf, int in_len, + int out_len) +{ + struct ib_uverbs_modify_srq cmd; + struct ib_srq *srq; + struct ib_srq_attr attr; + int ret; + + if (copy_from_user(&cmd, buf, sizeof cmd)) + return -EFAULT; + + down(&ib_uverbs_idr_mutex); + + srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); + if (!srq || srq->uobject->context != file->ucontext) { + ret = -EINVAL; + goto out; + } + + attr.max_wr = cmd.max_wr; + attr.max_sge = cmd.max_sge; + attr.srq_limit = cmd.srq_limit; + + ret = ib_modify_srq(srq, &attr, cmd.attr_mask); + +out: + up(&ib_uverbs_idr_mutex); + + return ret ? ret : in_len; +} + +ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, + const char __user *buf, int in_len, + int out_len) +{ + struct ib_uverbs_destroy_srq cmd; + struct ib_srq *srq; + struct ib_uobject *uobj; + int ret = -EINVAL; + + if (copy_from_user(&cmd, buf, sizeof cmd)) + return -EFAULT; + + down(&ib_uverbs_idr_mutex); + + srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); + if (!srq || srq->uobject->context != file->ucontext) + goto out; + + uobj = srq->uobject; + + ret = ib_destroy_srq(srq); + if (ret) + goto out; + + idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle); + + spin_lock_irq(&file->ucontext->lock); + list_del(&uobj->list); + spin_unlock_irq(&file->ucontext->lock); + + kfree(uobj); + +out: + up(&ib_uverbs_idr_mutex); + + return ret ? ret : in_len; +} diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index eb99e693dec2..09caf5b1ef36 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -1,6 +1,8 @@ /* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -67,6 +69,7 @@ DEFINE_IDR(ib_uverbs_mw_idr); DEFINE_IDR(ib_uverbs_ah_idr); DEFINE_IDR(ib_uverbs_cq_idr); DEFINE_IDR(ib_uverbs_qp_idr); +DEFINE_IDR(ib_uverbs_srq_idr); static spinlock_t map_lock; static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); @@ -91,6 +94,9 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp, [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast, [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast, + [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq, + [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, + [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, }; static struct vfsmount *uverbs_event_mnt; @@ -125,18 +131,26 @@ static int ib_dealloc_ucontext(struct ib_ucontext *context) kfree(uobj); } - /* XXX Free SRQs */ + list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { + struct ib_srq *srq = idr_find(&ib_uverbs_srq_idr, uobj->id); + idr_remove(&ib_uverbs_srq_idr, uobj->id); + ib_destroy_srq(srq); + list_del(&uobj->list); + kfree(uobj); + } + /* XXX Free MWs */ list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { struct ib_mr *mr = idr_find(&ib_uverbs_mr_idr, uobj->id); + struct ib_device *mrdev = mr->device; struct ib_umem_object *memobj; idr_remove(&ib_uverbs_mr_idr, uobj->id); ib_dereg_mr(mr); memobj = container_of(uobj, struct ib_umem_object, uobject); - ib_umem_release_on_close(mr->device, &memobj->umem); + ib_umem_release_on_close(mrdev, &memobj->umem); list_del(&uobj->list); kfree(memobj); @@ -343,6 +357,13 @@ void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr) event->event); } +void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) +{ + ib_uverbs_async_handler(context_ptr, + event->element.srq->uobject->user_handle, + event->event); +} + static void ib_uverbs_event_handler(struct ib_event_handler *handler, struct ib_event *event) { diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c index ed550f6595bd..36a32c315668 100644 --- a/drivers/infiniband/core/uverbs_mem.c +++ b/drivers/infiniband/core/uverbs_mem.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 506fdf1f2a26..5081d903e561 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -4,6 +4,7 @@ * Copyright (c) 2004 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. * * This software is available to you under a choice of one of two @@ -40,8 +41,8 @@ #include <linux/errno.h> #include <linux/err.h> -#include <ib_verbs.h> -#include <ib_cache.h> +#include <rdma/ib_verbs.h> +#include <rdma/ib_cache.h> /* Protection domains */ @@ -153,6 +154,66 @@ int ib_destroy_ah(struct ib_ah *ah) } EXPORT_SYMBOL(ib_destroy_ah); +/* Shared receive queues */ + +struct ib_srq *ib_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *srq_init_attr) +{ + struct ib_srq *srq; + + if (!pd->device->create_srq) + return ERR_PTR(-ENOSYS); + + srq = pd->device->create_srq(pd, srq_init_attr, NULL); + + if (!IS_ERR(srq)) { + srq->device = pd->device; + srq->pd = pd; + srq->uobject = NULL; + srq->event_handler = srq_init_attr->event_handler; + srq->srq_context = srq_init_attr->srq_context; + atomic_inc(&pd->usecnt); + atomic_set(&srq->usecnt, 0); + } + + return srq; +} +EXPORT_SYMBOL(ib_create_srq); + +int ib_modify_srq(struct ib_srq *srq, + struct ib_srq_attr *srq_attr, + enum ib_srq_attr_mask srq_attr_mask) +{ + return srq->device->modify_srq(srq, srq_attr, srq_attr_mask); +} +EXPORT_SYMBOL(ib_modify_srq); + +int ib_query_srq(struct ib_srq *srq, + struct ib_srq_attr *srq_attr) +{ + return srq->device->query_srq ? + srq->device->query_srq(srq, srq_attr) : -ENOSYS; +} +EXPORT_SYMBOL(ib_query_srq); + +int ib_destroy_srq(struct ib_srq *srq) +{ + struct ib_pd *pd; + int ret; + + if (atomic_read(&srq->usecnt)) + return -EBUSY; + + pd = srq->pd; + + ret = srq->device->destroy_srq(srq); + if (!ret) + atomic_dec(&pd->usecnt); + + return ret; +} +EXPORT_SYMBOL(ib_destroy_srq); + /* Queue pairs */ struct ib_qp *ib_create_qp(struct ib_pd *pd, diff --git a/drivers/infiniband/hw/mthca/Makefile b/drivers/infiniband/hw/mthca/Makefile index 5dcbd43073e2..c44f7bae5424 100644 --- a/drivers/infiniband/hw/mthca/Makefile +++ b/drivers/infiniband/hw/mthca/Makefile @@ -1,5 +1,3 @@ -EXTRA_CFLAGS += -Idrivers/infiniband/include - ifdef CONFIG_INFINIBAND_MTHCA_DEBUG EXTRA_CFLAGS += -DDEBUG endif @@ -9,4 +7,4 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \ mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \ mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \ - mthca_provider.o mthca_memfree.o mthca_uar.o + mthca_provider.o mthca_memfree.o mthca_uar.o mthca_srq.o diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c index b1db48dd91d6..9ba3211cef7c 100644 --- a/drivers/infiniband/hw/mthca/mthca_allocator.c +++ b/drivers/infiniband/hw/mthca/mthca_allocator.c @@ -177,3 +177,119 @@ void mthca_array_cleanup(struct mthca_array *array, int nent) kfree(array->page_list); } + +/* + * Handling for queue buffers -- we allocate a bunch of memory and + * register it in a memory region at HCA virtual address 0. If the + * requested size is > max_direct, we split the allocation into + * multiple pages, so we don't require too much contiguous memory. + */ + +int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct, + union mthca_buf *buf, int *is_direct, struct mthca_pd *pd, + int hca_write, struct mthca_mr *mr) +{ + int err = -ENOMEM; + int npages, shift; + u64 *dma_list = NULL; + dma_addr_t t; + int i; + + if (size <= max_direct) { + *is_direct = 1; + npages = 1; + shift = get_order(size) + PAGE_SHIFT; + + buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, + size, &t, GFP_KERNEL); + if (!buf->direct.buf) + return -ENOMEM; + + pci_unmap_addr_set(&buf->direct, mapping, t); + + memset(buf->direct.buf, 0, size); + + while (t & ((1 << shift) - 1)) { + --shift; + npages *= 2; + } + + dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); + if (!dma_list) + goto err_free; + + for (i = 0; i < npages; ++i) + dma_list[i] = t + i * (1 << shift); + } else { + *is_direct = 0; + npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; + shift = PAGE_SHIFT; + + dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); + if (!dma_list) + return -ENOMEM; + + buf->page_list = kmalloc(npages * sizeof *buf->page_list, + GFP_KERNEL); + if (!buf->page_list) + goto err_out; + + for (i = 0; i < npages; ++i) + buf->page_list[i].buf = NULL; + + for (i = 0; i < npages; ++i) { + buf->page_list[i].buf = + dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, + &t, GFP_KERNEL); + if (!buf->page_list[i].buf) + goto err_free; + + dma_list[i] = t; + pci_unmap_addr_set(&buf->page_list[i], mapping, t); + + memset(buf->page_list[i].buf, 0, PAGE_SIZE); + } + } + + err = mthca_mr_alloc_phys(dev, pd->pd_num, + dma_list, shift, npages, + 0, size, + MTHCA_MPT_FLAG_LOCAL_READ | + (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0), + mr); + if (err) + goto err_free; + + kfree(dma_list); + + return 0; + +err_free: + mthca_buf_free(dev, size, buf, *is_direct, NULL); + +err_out: + kfree(dma_list); + + return err; +} + +void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf, + int is_direct, struct mthca_mr *mr) +{ + int i; + + if (mr) + mthca_free_mr(dev, mr); + + if (is_direct) + dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, + pci_unmap_addr(&buf->direct, mapping)); + else { + for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + buf->page_list[i].buf, + pci_unmap_addr(&buf->page_list[i], + mapping)); + kfree(buf->page_list); + } +} diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c index d58dcbe66488..889e85096736 100644 --- a/drivers/infiniband/hw/mthca/mthca_av.c +++ b/drivers/infiniband/hw/mthca/mthca_av.c @@ -35,22 +35,22 @@ #include <linux/init.h> -#include <ib_verbs.h> -#include <ib_cache.h> +#include <rdma/ib_verbs.h> +#include <rdma/ib_cache.h> #include "mthca_dev.h" struct mthca_av { - u32 port_pd; - u8 reserved1; - u8 g_slid; - u16 dlid; - u8 reserved2; - u8 gid_index; - u8 msg_sr; - u8 hop_limit; - u32 sl_tclass_flowlabel; - u32 dgid[4]; + __be32 port_pd; + u8 reserved1; + u8 g_slid; + __be16 dlid; + u8 reserved2; + u8 gid_index; + u8 msg_sr; + u8 hop_limit; + __be32 sl_tclass_flowlabel; + __be32 dgid[4]; }; int mthca_create_ah(struct mthca_dev *dev, @@ -128,7 +128,7 @@ on_hca_fail: av, (unsigned long) ah->avdma); for (j = 0; j < 8; ++j) printk(KERN_DEBUG " [%2x] %08x\n", - j * 4, be32_to_cpu(((u32 *) av)[j])); + j * 4, be32_to_cpu(((__be32 *) av)[j])); } if (ah->type == MTHCA_AH_ON_HCA) { @@ -169,7 +169,7 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah, header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28; header->lrh.destination_lid = ah->av->dlid; - header->lrh.source_lid = ah->av->g_slid & 0x7f; + header->lrh.source_lid = cpu_to_be16(ah->av->g_slid & 0x7f); if (ah->av->g_slid & 0x80) { header->grh_present = 1; header->grh.traffic_class = diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 1557a522d831..cc758a2d2bc6 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -36,7 +37,7 @@ #include <linux/pci.h> #include <linux/errno.h> #include <asm/io.h> -#include <ib_mad.h> +#include <rdma/ib_mad.h> #include "mthca_dev.h" #include "mthca_config_reg.h" @@ -108,6 +109,7 @@ enum { CMD_SW2HW_SRQ = 0x35, CMD_HW2SW_SRQ = 0x36, CMD_QUERY_SRQ = 0x37, + CMD_ARM_SRQ = 0x40, /* QP/EE commands */ CMD_RST2INIT_QPEE = 0x19, @@ -219,20 +221,20 @@ static int mthca_cmd_post(struct mthca_dev *dev, * (and some architectures such as ia64 implement memcpy_toio * in terms of writeb). */ - __raw_writel(cpu_to_be32(in_param >> 32), dev->hcr + 0 * 4); - __raw_writel(cpu_to_be32(in_param & 0xfffffffful), dev->hcr + 1 * 4); - __raw_writel(cpu_to_be32(in_modifier), dev->hcr + 2 * 4); - __raw_writel(cpu_to_be32(out_param >> 32), dev->hcr + 3 * 4); - __raw_writel(cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4); - __raw_writel(cpu_to_be32(token << 16), dev->hcr + 5 * 4); + __raw_writel((__force u32) cpu_to_be32(in_param >> 32), dev->hcr + 0 * 4); + __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), dev->hcr + 1 * 4); + __raw_writel((__force u32) cpu_to_be32(in_modifier), dev->hcr + 2 * 4); + __raw_writel((__force u32) cpu_to_be32(out_param >> 32), dev->hcr + 3 * 4); + __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4); + __raw_writel((__force u32) cpu_to_be32(token << 16), dev->hcr + 5 * 4); /* __raw_writel may not order writes. */ wmb(); - __raw_writel(cpu_to_be32((1 << HCR_GO_BIT) | - (event ? (1 << HCA_E_BIT) : 0) | - (op_modifier << HCR_OPMOD_SHIFT) | - op), dev->hcr + 6 * 4); + __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) | + (event ? (1 << HCA_E_BIT) : 0) | + (op_modifier << HCR_OPMOD_SHIFT) | + op), dev->hcr + 6 * 4); out: up(&dev->cmd.hcr_sem); @@ -273,12 +275,14 @@ static int mthca_cmd_poll(struct mthca_dev *dev, goto out; } - if (out_is_imm) { - memcpy_fromio(out_param, dev->hcr + HCR_OUT_PARAM_OFFSET, sizeof (u64)); - be64_to_cpus(out_param); - } + if (out_is_imm) + *out_param = + (u64) be32_to_cpu((__force __be32) + __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 | + (u64) be32_to_cpu((__force __be32) + __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4)); - *status = be32_to_cpu(__raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24; + *status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24; out: up(&dev->cmd.poll_sem); @@ -1029,6 +1033,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz); + mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", + dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz); mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz); mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", @@ -1082,6 +1088,34 @@ out: return err; } +static void get_board_id(void *vsd, char *board_id) +{ + int i; + +#define VSD_OFFSET_SIG1 0x00 +#define VSD_OFFSET_SIG2 0xde +#define VSD_OFFSET_MLX_BOARD_ID 0xd0 +#define VSD_OFFSET_TS_BOARD_ID 0x20 + +#define VSD_SIGNATURE_TOPSPIN 0x5ad + + memset(board_id, 0, MTHCA_BOARD_ID_LEN); + + if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && + be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { + strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN); + } else { + /* + * The board ID is a string but the firmware byte + * swaps each 4-byte word before passing it back to + * us. Therefore we need to swab it before printing. + */ + for (i = 0; i < 4; ++i) + ((u32 *) board_id)[i] = + swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4)); + } +} + int mthca_QUERY_ADAPTER(struct mthca_dev *dev, struct mthca_adapter *adapter, u8 *status) { @@ -1094,6 +1128,7 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev, #define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04 #define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 +#define QUERY_ADAPTER_VSD_OFFSET 0x20 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) @@ -1111,6 +1146,9 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev, MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET); MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); + get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, + adapter->board_id); + out: mthca_free_mailbox(dev, mailbox); return err; @@ -1121,7 +1159,7 @@ int mthca_INIT_HCA(struct mthca_dev *dev, u8 *status) { struct mthca_mailbox *mailbox; - u32 *inbox; + __be32 *inbox; int err; #define INIT_HCA_IN_SIZE 0x200 @@ -1247,10 +1285,8 @@ int mthca_INIT_IB(struct mthca_dev *dev, #define INIT_IB_FLAG_SIG (1 << 18) #define INIT_IB_FLAG_NG (1 << 17) #define INIT_IB_FLAG_G0 (1 << 16) -#define INIT_IB_FLAG_1X (1 << 8) -#define INIT_IB_FLAG_4X (1 << 9) -#define INIT_IB_FLAG_12X (1 << 11) #define INIT_IB_VL_SHIFT 4 +#define INIT_IB_PORT_WIDTH_SHIFT 8 #define INIT_IB_MTU_SHIFT 12 #define INIT_IB_MAX_GID_OFFSET 0x06 #define INIT_IB_MAX_PKEY_OFFSET 0x0a @@ -1266,12 +1302,11 @@ int mthca_INIT_IB(struct mthca_dev *dev, memset(inbox, 0, INIT_IB_IN_SIZE); flags = 0; - flags |= param->enable_1x ? INIT_IB_FLAG_1X : 0; - flags |= param->enable_4x ? INIT_IB_FLAG_4X : 0; flags |= param->set_guid0 ? INIT_IB_FLAG_G0 : 0; flags |= param->set_node_guid ? INIT_IB_FLAG_NG : 0; flags |= param->set_si_guid ? INIT_IB_FLAG_SIG : 0; flags |= param->vl_cap << INIT_IB_VL_SHIFT; + flags |= param->port_width << INIT_IB_PORT_WIDTH_SHIFT; flags |= param->mtu_cap << INIT_IB_MTU_SHIFT; MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET); @@ -1342,7 +1377,7 @@ int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *st int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status) { struct mthca_mailbox *mailbox; - u64 *inbox; + __be64 *inbox; int err; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); @@ -1468,6 +1503,27 @@ int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, CMD_TIME_CLASS_A, status); } +int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int srq_num, u8 *status) +{ + return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ, + CMD_TIME_CLASS_A, status); +} + +int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int srq_num, u8 *status) +{ + return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0, + CMD_HW2SW_SRQ, + CMD_TIME_CLASS_A, status); +} + +int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status) +{ + return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ, + CMD_TIME_CLASS_B, status); +} + int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, int is_ee, struct mthca_mailbox *mailbox, u32 optmask, u8 *status) @@ -1513,7 +1569,7 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, if (i % 8 == 0) printk(" [%02x] ", i * 4); printk(" %08x", - be32_to_cpu(((u32 *) mailbox->buf)[i + 2])); + be32_to_cpu(((__be32 *) mailbox->buf)[i + 2])); if ((i + 1) % 8 == 0) printk("\n"); } @@ -1533,7 +1589,7 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, if (i % 8 == 0) printk("[%02x] ", i * 4); printk(" %08x", - be32_to_cpu(((u32 *) mailbox->buf)[i + 2])); + be32_to_cpu(((__be32 *) mailbox->buf)[i + 2])); if ((i + 1) % 8 == 0) printk("\n"); } diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h index ed517f175dd6..65f976a13e02 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.h +++ b/drivers/infiniband/hw/mthca/mthca_cmd.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -35,7 +36,7 @@ #ifndef MTHCA_CMD_H #define MTHCA_CMD_H -#include <ib_verbs.h> +#include <rdma/ib_verbs.h> #define MTHCA_MAILBOX_SIZE 4096 @@ -183,10 +184,11 @@ struct mthca_dev_lim { }; struct mthca_adapter { - u32 vendor_id; - u32 device_id; - u32 revision_id; - u8 inta_pin; + u32 vendor_id; + u32 device_id; + u32 revision_id; + char board_id[MTHCA_BOARD_ID_LEN]; + u8 inta_pin; }; struct mthca_init_hca_param { @@ -218,8 +220,7 @@ struct mthca_init_hca_param { }; struct mthca_init_ib_param { - int enable_1x; - int enable_4x; + int port_width; int vl_cap; int mtu_cap; u16 gid_cap; @@ -297,6 +298,11 @@ int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int cq_num, u8 *status); int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int cq_num, u8 *status); +int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int srq_num, u8 *status); +int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int srq_num, u8 *status); +int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status); int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, int is_ee, struct mthca_mailbox *mailbox, u32 optmask, u8 *status); diff --git a/drivers/infiniband/hw/mthca/mthca_config_reg.h b/drivers/infiniband/hw/mthca/mthca_config_reg.h index b4bfbbfe2c3d..afa56bfaab2e 100644 --- a/drivers/infiniband/hw/mthca/mthca_config_reg.h +++ b/drivers/infiniband/hw/mthca/mthca_config_reg.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 5687c3014522..8600b6c3e0c2 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c @@ -2,6 +2,8 @@ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -37,7 +39,7 @@ #include <linux/init.h> #include <linux/hardirq.h> -#include <ib_pack.h> +#include <rdma/ib_pack.h> #include "mthca_dev.h" #include "mthca_cmd.h" @@ -55,21 +57,21 @@ enum { * Must be packed because start is 64 bits but only aligned to 32 bits. */ struct mthca_cq_context { - u32 flags; - u64 start; - u32 logsize_usrpage; - u32 error_eqn; /* Tavor only */ - u32 comp_eqn; - u32 pd; - u32 lkey; - u32 last_notified_index; - u32 solicit_producer_index; - u32 consumer_index; - u32 producer_index; - u32 cqn; - u32 ci_db; /* Arbel only */ - u32 state_db; /* Arbel only */ - u32 reserved; + __be32 flags; + __be64 start; + __be32 logsize_usrpage; + __be32 error_eqn; /* Tavor only */ + __be32 comp_eqn; + __be32 pd; + __be32 lkey; + __be32 last_notified_index; + __be32 solicit_producer_index; + __be32 consumer_index; + __be32 producer_index; + __be32 cqn; + __be32 ci_db; /* Arbel only */ + __be32 state_db; /* Arbel only */ + u32 reserved; } __attribute__((packed)); #define MTHCA_CQ_STATUS_OK ( 0 << 28) @@ -108,31 +110,31 @@ enum { }; struct mthca_cqe { - u32 my_qpn; - u32 my_ee; - u32 rqpn; - u16 sl_g_mlpath; - u16 rlid; - u32 imm_etype_pkey_eec; - u32 byte_cnt; - u32 wqe; - u8 opcode; - u8 is_send; - u8 reserved; - u8 owner; + __be32 my_qpn; + __be32 my_ee; + __be32 rqpn; + __be16 sl_g_mlpath; + __be16 rlid; + __be32 imm_etype_pkey_eec; + __be32 byte_cnt; + __be32 wqe; + u8 opcode; + u8 is_send; + u8 reserved; + u8 owner; }; struct mthca_err_cqe { - u32 my_qpn; - u32 reserved1[3]; - u8 syndrome; - u8 reserved2; - u16 db_cnt; - u32 reserved3; - u32 wqe; - u8 opcode; - u8 reserved4[2]; - u8 owner; + __be32 my_qpn; + u32 reserved1[3]; + u8 syndrome; + u8 reserved2; + __be16 db_cnt; + u32 reserved3; + __be32 wqe; + u8 opcode; + u8 reserved4[2]; + u8 owner; }; #define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7) @@ -191,7 +193,7 @@ static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr) static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, int incr) { - u32 doorbell[2]; + __be32 doorbell[2]; if (mthca_is_memfree(dev)) { *cq->set_ci_db = cpu_to_be32(cq->cons_index); @@ -222,7 +224,8 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn) cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); } -void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn) +void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, + struct mthca_srq *srq) { struct mthca_cq *cq; struct mthca_cqe *cqe; @@ -263,8 +266,11 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn) */ while (prod_index > cq->cons_index) { cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe); - if (cqe->my_qpn == cpu_to_be32(qpn)) + if (cqe->my_qpn == cpu_to_be32(qpn)) { + if (srq) + mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); ++nfreed; + } else if (nfreed) memcpy(get_cqe(cq, (prod_index - 1 + nfreed) & cq->ibcq.cqe), @@ -291,7 +297,7 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, { int err; int dbd; - u32 new_wqe; + __be32 new_wqe; if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) { mthca_dbg(dev, "local QP operation err " @@ -365,6 +371,13 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, break; } + /* + * Mem-free HCAs always generate one CQE per WQE, even in the + * error case, so we don't have to check the doorbell count, etc. + */ + if (mthca_is_memfree(dev)) + return 0; + err = mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe); if (err) return err; @@ -373,12 +386,8 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, * If we're at the end of the WQE chain, or we've used up our * doorbell count, free the CQE. Otherwise just update it for * the next poll operation. - * - * This does not apply to mem-free HCAs: they don't use the - * doorbell count field, and so we should always free the CQE. */ - if (mthca_is_memfree(dev) || - !(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd)) + if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd)) return 0; cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd); @@ -450,23 +459,27 @@ static inline int mthca_poll_one(struct mthca_dev *dev, >> wq->wqe_shift); entry->wr_id = (*cur_qp)->wrid[wqe_index + (*cur_qp)->rq.max]; + } else if ((*cur_qp)->ibqp.srq) { + struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq); + u32 wqe = be32_to_cpu(cqe->wqe); + wq = NULL; + wqe_index = wqe >> srq->wqe_shift; + entry->wr_id = srq->wrid[wqe_index]; + mthca_free_srq_wqe(srq, wqe); } else { wq = &(*cur_qp)->rq; wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift; entry->wr_id = (*cur_qp)->wrid[wqe_index]; } - if (wq->last_comp < wqe_index) - wq->tail += wqe_index - wq->last_comp; - else - wq->tail += wqe_index + wq->max - wq->last_comp; - - wq->last_comp = wqe_index; + if (wq) { + if (wq->last_comp < wqe_index) + wq->tail += wqe_index - wq->last_comp; + else + wq->tail += wqe_index + wq->max - wq->last_comp; - if (0) - mthca_dbg(dev, "%s completion for QP %06x, index %d (nr %d)\n", - is_send ? "Send" : "Receive", - (*cur_qp)->qpn, wqe_index, wq->max); + wq->last_comp = wqe_index; + } if (is_error) { err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send, @@ -584,13 +597,13 @@ int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) { - u32 doorbell[2]; + __be32 doorbell[2]; doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ? MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : MTHCA_TAVOR_CQ_DB_REQ_NOT) | to_mcq(cq)->cqn); - doorbell[1] = 0xffffffff; + doorbell[1] = (__force __be32) 0xffffffff; mthca_write64(doorbell, to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL, @@ -602,9 +615,9 @@ int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) { struct mthca_cq *cq = to_mcq(ibcq); - u32 doorbell[2]; + __be32 doorbell[2]; u32 sn; - u32 ci; + __be32 ci; sn = cq->arm_sn & 3; ci = cpu_to_be32(cq->cons_index); @@ -637,113 +650,8 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq) { - int i; - int size; - - if (cq->is_direct) - dma_free_coherent(&dev->pdev->dev, - (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE, - cq->queue.direct.buf, - pci_unmap_addr(&cq->queue.direct, - mapping)); - else { - size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE; - for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) - if (cq->queue.page_list[i].buf) - dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, - cq->queue.page_list[i].buf, - pci_unmap_addr(&cq->queue.page_list[i], - mapping)); - - kfree(cq->queue.page_list); - } -} - -static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size, - struct mthca_cq *cq) -{ - int err = -ENOMEM; - int npages, shift; - u64 *dma_list = NULL; - dma_addr_t t; - int i; - - if (size <= MTHCA_MAX_DIRECT_CQ_SIZE) { - cq->is_direct = 1; - npages = 1; - shift = get_order(size) + PAGE_SHIFT; - - cq->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, - size, &t, GFP_KERNEL); - if (!cq->queue.direct.buf) - return -ENOMEM; - - pci_unmap_addr_set(&cq->queue.direct, mapping, t); - - memset(cq->queue.direct.buf, 0, size); - - while (t & ((1 << shift) - 1)) { - --shift; - npages *= 2; - } - - dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); - if (!dma_list) - goto err_free; - - for (i = 0; i < npages; ++i) - dma_list[i] = t + i * (1 << shift); - } else { - cq->is_direct = 0; - npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; - shift = PAGE_SHIFT; - - dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); - if (!dma_list) - return -ENOMEM; - - cq->queue.page_list = kmalloc(npages * sizeof *cq->queue.page_list, - GFP_KERNEL); - if (!cq->queue.page_list) - goto err_out; - - for (i = 0; i < npages; ++i) - cq->queue.page_list[i].buf = NULL; - - for (i = 0; i < npages; ++i) { - cq->queue.page_list[i].buf = - dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, - &t, GFP_KERNEL); - if (!cq->queue.page_list[i].buf) - goto err_free; - - dma_list[i] = t; - pci_unmap_addr_set(&cq->queue.page_list[i], mapping, t); - - memset(cq->queue.page_list[i].buf, 0, PAGE_SIZE); - } - } - - err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num, - dma_list, shift, npages, - 0, size, - MTHCA_MPT_FLAG_LOCAL_WRITE | - MTHCA_MPT_FLAG_LOCAL_READ, - &cq->mr); - if (err) - goto err_free; - - kfree(dma_list); - - return 0; - -err_free: - mthca_free_cq_buf(dev, cq); - -err_out: - kfree(dma_list); - - return err; + mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE, + &cq->queue, cq->is_direct, &cq->mr); } int mthca_init_cq(struct mthca_dev *dev, int nent, @@ -795,7 +703,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, cq_context = mailbox->buf; if (cq->is_kernel) { - err = mthca_alloc_cq_buf(dev, size, cq); + err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE, + &cq->queue, &cq->is_direct, + &dev->driver_pd, 1, &cq->mr); if (err) goto err_out_mailbox; @@ -811,7 +721,6 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK | MTHCA_CQ_STATE_DISARMED | MTHCA_CQ_FLAG_TR); - cq_context->start = cpu_to_be64(0); cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); if (ctx) cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index); @@ -857,10 +766,8 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, return 0; err_out_free_mr: - if (cq->is_kernel) { - mthca_free_mr(dev, &cq->mr); + if (cq->is_kernel) mthca_free_cq_buf(dev, cq); - } err_out_mailbox: mthca_free_mailbox(dev, mailbox); @@ -904,7 +811,7 @@ void mthca_free_cq(struct mthca_dev *dev, mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status); if (0) { - u32 *ctx = mailbox->buf; + __be32 *ctx = mailbox->buf; int j; printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n", @@ -928,7 +835,6 @@ void mthca_free_cq(struct mthca_dev *dev, wait_event(cq->wait, !atomic_read(&cq->refcount)); if (cq->is_kernel) { - mthca_free_mr(dev, &cq->mr); mthca_free_cq_buf(dev, cq); if (mthca_is_memfree(dev)) { mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 5ecdd2eeeb0f..7bff5a8425f4 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h @@ -2,6 +2,8 @@ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -67,6 +69,10 @@ enum { }; enum { + MTHCA_BOARD_ID_LEN = 64 +}; + +enum { MTHCA_EQ_CONTEXT_SIZE = 0x40, MTHCA_CQ_CONTEXT_SIZE = 0x40, MTHCA_QP_CONTEXT_SIZE = 0x200, @@ -142,6 +148,7 @@ struct mthca_limits { int reserved_mcgs; int num_pds; int reserved_pds; + u8 port_width_cap; }; struct mthca_alloc { @@ -211,6 +218,13 @@ struct mthca_cq_table { struct mthca_icm_table *table; }; +struct mthca_srq_table { + struct mthca_alloc alloc; + spinlock_t lock; + struct mthca_array srq; + struct mthca_icm_table *table; +}; + struct mthca_qp_table { struct mthca_alloc alloc; u32 rdb_base; @@ -246,6 +260,7 @@ struct mthca_dev { unsigned long device_cap_flags; u32 rev_id; + char board_id[MTHCA_BOARD_ID_LEN]; /* firmware info */ u64 fw_ver; @@ -291,6 +306,7 @@ struct mthca_dev { struct mthca_mr_table mr_table; struct mthca_eq_table eq_table; struct mthca_cq_table cq_table; + struct mthca_srq_table srq_table; struct mthca_qp_table qp_table; struct mthca_av_table av_table; struct mthca_mcg_table mcg_table; @@ -331,14 +347,13 @@ extern void __buggy_use_of_MTHCA_PUT(void); #define MTHCA_PUT(dest, source, offset) \ do { \ - __typeof__(source) *__p = \ - (__typeof__(source) *) ((char *) (dest) + (offset)); \ + void *__d = ((char *) (dest) + (offset)); \ switch (sizeof(source)) { \ - case 1: *__p = (source); break; \ - case 2: *__p = cpu_to_be16(source); break; \ - case 4: *__p = cpu_to_be32(source); break; \ - case 8: *__p = cpu_to_be64(source); break; \ - default: __buggy_use_of_MTHCA_PUT(); \ + case 1: *(u8 *) __d = (source); break; \ + case 2: *(__be16 *) __d = cpu_to_be16(source); break; \ + case 4: *(__be32 *) __d = cpu_to_be32(source); break; \ + case 8: *(__be64 *) __d = cpu_to_be64(source); break; \ + default: __buggy_use_of_MTHCA_PUT(); \ } \ } while (0) @@ -354,12 +369,18 @@ int mthca_array_set(struct mthca_array *array, int index, void *value); void mthca_array_clear(struct mthca_array *array, int index); int mthca_array_init(struct mthca_array *array, int nent); void mthca_array_cleanup(struct mthca_array *array, int nent); +int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct, + union mthca_buf *buf, int *is_direct, struct mthca_pd *pd, + int hca_write, struct mthca_mr *mr); +void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf, + int is_direct, struct mthca_mr *mr); int mthca_init_uar_table(struct mthca_dev *dev); int mthca_init_pd_table(struct mthca_dev *dev); int mthca_init_mr_table(struct mthca_dev *dev); int mthca_init_eq_table(struct mthca_dev *dev); int mthca_init_cq_table(struct mthca_dev *dev); +int mthca_init_srq_table(struct mthca_dev *dev); int mthca_init_qp_table(struct mthca_dev *dev); int mthca_init_av_table(struct mthca_dev *dev); int mthca_init_mcg_table(struct mthca_dev *dev); @@ -369,6 +390,7 @@ void mthca_cleanup_pd_table(struct mthca_dev *dev); void mthca_cleanup_mr_table(struct mthca_dev *dev); void mthca_cleanup_eq_table(struct mthca_dev *dev); void mthca_cleanup_cq_table(struct mthca_dev *dev); +void mthca_cleanup_srq_table(struct mthca_dev *dev); void mthca_cleanup_qp_table(struct mthca_dev *dev); void mthca_cleanup_av_table(struct mthca_dev *dev); void mthca_cleanup_mcg_table(struct mthca_dev *dev); @@ -419,7 +441,19 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, void mthca_free_cq(struct mthca_dev *dev, struct mthca_cq *cq); void mthca_cq_event(struct mthca_dev *dev, u32 cqn); -void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn); +void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, + struct mthca_srq *srq); + +int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, + struct ib_srq_attr *attr, struct mthca_srq *srq); +void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq); +void mthca_srq_event(struct mthca_dev *dev, u32 srqn, + enum ib_event_type event_type); +void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr); +int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); +int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); void mthca_qp_event(struct mthca_dev *dev, u32 qpn, enum ib_event_type event_type); @@ -433,7 +467,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr); int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, - int index, int *dbd, u32 *new_wqe); + int index, int *dbd, __be32 *new_wqe); int mthca_alloc_qp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, diff --git a/drivers/infiniband/hw/mthca/mthca_doorbell.h b/drivers/infiniband/hw/mthca/mthca_doorbell.h index 535fad7710fb..dd9a44d170c9 100644 --- a/drivers/infiniband/hw/mthca/mthca_doorbell.h +++ b/drivers/infiniband/hw/mthca/mthca_doorbell.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -57,13 +58,13 @@ static inline void mthca_write64_raw(__be64 val, void __iomem *dest) __raw_writeq((__force u64) val, dest); } -static inline void mthca_write64(u32 val[2], void __iomem *dest, +static inline void mthca_write64(__be32 val[2], void __iomem *dest, spinlock_t *doorbell_lock) { __raw_writeq(*(u64 *) val, dest); } -static inline void mthca_write_db_rec(u32 val[2], u32 *db) +static inline void mthca_write_db_rec(__be32 val[2], __be32 *db) { *(u64 *) db = *(u64 *) val; } @@ -86,18 +87,18 @@ static inline void mthca_write64_raw(__be64 val, void __iomem *dest) __raw_writel(((__force u32 *) &val)[1], dest + 4); } -static inline void mthca_write64(u32 val[2], void __iomem *dest, +static inline void mthca_write64(__be32 val[2], void __iomem *dest, spinlock_t *doorbell_lock) { unsigned long flags; spin_lock_irqsave(doorbell_lock, flags); - __raw_writel(val[0], dest); - __raw_writel(val[1], dest + 4); + __raw_writel((__force u32) val[0], dest); + __raw_writel((__force u32) val[1], dest + 4); spin_unlock_irqrestore(doorbell_lock, flags); } -static inline void mthca_write_db_rec(u32 val[2], u32 *db) +static inline void mthca_write_db_rec(__be32 val[2], __be32 *db) { db[0] = val[0]; wmb(); diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index cbcf2b4722e4..18f0981eb0c1 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -51,18 +52,18 @@ enum { * Must be packed because start is 64 bits but only aligned to 32 bits. */ struct mthca_eq_context { - u32 flags; - u64 start; - u32 logsize_usrpage; - u32 tavor_pd; /* reserved for Arbel */ - u8 reserved1[3]; - u8 intr; - u32 arbel_pd; /* lost_count for Tavor */ - u32 lkey; - u32 reserved2[2]; - u32 consumer_index; - u32 producer_index; - u32 reserved3[4]; + __be32 flags; + __be64 start; + __be32 logsize_usrpage; + __be32 tavor_pd; /* reserved for Arbel */ + u8 reserved1[3]; + u8 intr; + __be32 arbel_pd; /* lost_count for Tavor */ + __be32 lkey; + u32 reserved2[2]; + __be32 consumer_index; + __be32 producer_index; + u32 reserved3[4]; } __attribute__((packed)); #define MTHCA_EQ_STATUS_OK ( 0 << 28) @@ -127,28 +128,28 @@ struct mthca_eqe { union { u32 raw[6]; struct { - u32 cqn; + __be32 cqn; } __attribute__((packed)) comp; struct { - u16 reserved1; - u16 token; - u32 reserved2; - u8 reserved3[3]; - u8 status; - u64 out_param; + u16 reserved1; + __be16 token; + u32 reserved2; + u8 reserved3[3]; + u8 status; + __be64 out_param; } __attribute__((packed)) cmd; struct { - u32 qpn; + __be32 qpn; } __attribute__((packed)) qp; struct { - u32 cqn; - u32 reserved1; - u8 reserved2[3]; - u8 syndrome; + __be32 cqn; + u32 reserved1; + u8 reserved2[3]; + u8 syndrome; } __attribute__((packed)) cq_err; struct { - u32 reserved1[2]; - u32 port; + u32 reserved1[2]; + __be32 port; } __attribute__((packed)) port_change; } event; u8 reserved3[3]; @@ -167,7 +168,7 @@ static inline u64 async_mask(struct mthca_dev *dev) static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) { - u32 doorbell[2]; + __be32 doorbell[2]; doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_SET_CI | eq->eqn); doorbell[1] = cpu_to_be32(ci & (eq->nent - 1)); @@ -190,8 +191,8 @@ static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u { /* See comment in tavor_set_eq_ci() above. */ wmb(); - __raw_writel(cpu_to_be32(ci), dev->eq_regs.arbel.eq_set_ci_base + - eq->eqn * 8); + __raw_writel((__force u32) cpu_to_be32(ci), + dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8); /* We still want ordering, just not swabbing, so add a barrier */ mb(); } @@ -206,7 +207,7 @@ static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn) { - u32 doorbell[2]; + __be32 doorbell[2]; doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_REQ_NOT | eqn); doorbell[1] = 0; @@ -224,7 +225,7 @@ static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask) static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) { if (!mthca_is_memfree(dev)) { - u32 doorbell[2]; + __be32 doorbell[2]; doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn); doorbell[1] = cpu_to_be32(cqn); diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index 7df223642015..9804174f7f3c 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c @@ -1,5 +1,7 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -32,9 +34,9 @@ * $Id: mthca_mad.c 1349 2004-12-16 21:09:43Z roland $ */ -#include <ib_verbs.h> -#include <ib_mad.h> -#include <ib_smi.h> +#include <rdma/ib_verbs.h> +#include <rdma/ib_mad.h> +#include <rdma/ib_smi.h> #include "mthca_dev.h" #include "mthca_cmd.h" @@ -192,7 +194,7 @@ int mthca_process_mad(struct ib_device *ibdev, { int err; u8 status; - u16 slid = in_wc ? in_wc->slid : IB_LID_PERMISSIVE; + u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); /* Forward locally generated traps to the SM */ if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 2ef916859e17..3241d6c9dc11 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -34,7 +35,6 @@ */ #include <linux/config.h> -#include <linux/version.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> @@ -171,6 +171,7 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim mdev->limits.reserved_mrws = dev_lim->reserved_mrws; mdev->limits.reserved_uars = dev_lim->reserved_uars; mdev->limits.reserved_pds = dev_lim->reserved_pds; + mdev->limits.port_width_cap = dev_lim->max_port_width; /* IB_DEVICE_RESIZE_MAX_WR not supported by driver. May be doable since hardware supports it for SRQ. @@ -212,7 +213,6 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev) struct mthca_dev_lim dev_lim; struct mthca_profile profile; struct mthca_init_hca_param init_hca; - struct mthca_adapter adapter; err = mthca_SYS_EN(mdev, &status); if (err) { @@ -253,6 +253,8 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev) profile = default_profile; profile.num_uar = dev_lim.uar_size / PAGE_SIZE; profile.uarc_size = 0; + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + profile.num_srq = dev_lim.max_srqs; err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); if (err < 0) @@ -270,26 +272,8 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev) goto err_disable; } - err = mthca_QUERY_ADAPTER(mdev, &adapter, &status); - if (err) { - mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n"); - goto err_close; - } - if (status) { - mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, " - "aborting.\n", status); - err = -EINVAL; - goto err_close; - } - - mdev->eq_table.inta_pin = adapter.inta_pin; - mdev->rev_id = adapter.revision_id; - return 0; -err_close: - mthca_CLOSE_HCA(mdev, 0, &status); - err_disable: mthca_SYS_DIS(mdev, &status); @@ -442,15 +426,29 @@ static int __devinit mthca_init_icm(struct mthca_dev *mdev, } mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, - dev_lim->cqc_entry_sz, - mdev->limits.num_cqs, - mdev->limits.reserved_cqs, 0); + dev_lim->cqc_entry_sz, + mdev->limits.num_cqs, + mdev->limits.reserved_cqs, 0); if (!mdev->cq_table.table) { mthca_err(mdev, "Failed to map CQ context memory, aborting.\n"); err = -ENOMEM; goto err_unmap_rdb; } + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) { + mdev->srq_table.table = + mthca_alloc_icm_table(mdev, init_hca->srqc_base, + dev_lim->srq_entry_sz, + mdev->limits.num_srqs, + mdev->limits.reserved_srqs, 0); + if (!mdev->srq_table.table) { + mthca_err(mdev, "Failed to map SRQ context memory, " + "aborting.\n"); + err = -ENOMEM; + goto err_unmap_cq; + } + } + /* * It's not strictly required, but for simplicity just map the * whole multicast group table now. The table isn't very big @@ -466,11 +464,15 @@ static int __devinit mthca_init_icm(struct mthca_dev *mdev, if (!mdev->mcg_table.table) { mthca_err(mdev, "Failed to map MCG context memory, aborting.\n"); err = -ENOMEM; - goto err_unmap_cq; + goto err_unmap_srq; } return 0; +err_unmap_srq: + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + mthca_free_icm_table(mdev, mdev->srq_table.table); + err_unmap_cq: mthca_free_icm_table(mdev, mdev->cq_table.table); @@ -506,7 +508,6 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev) struct mthca_dev_lim dev_lim; struct mthca_profile profile; struct mthca_init_hca_param init_hca; - struct mthca_adapter adapter; u64 icm_size; u8 status; int err; @@ -551,6 +552,8 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev) profile = default_profile; profile.num_uar = dev_lim.uar_size / PAGE_SIZE; profile.num_udav = 0; + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + profile.num_srq = dev_lim.max_srqs; icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); if ((int) icm_size < 0) { @@ -574,24 +577,11 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev) goto err_free_icm; } - err = mthca_QUERY_ADAPTER(mdev, &adapter, &status); - if (err) { - mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n"); - goto err_free_icm; - } - if (status) { - mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, " - "aborting.\n", status); - err = -EINVAL; - goto err_free_icm; - } - - mdev->eq_table.inta_pin = adapter.inta_pin; - mdev->rev_id = adapter.revision_id; - return 0; err_free_icm: + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + mthca_free_icm_table(mdev, mdev->srq_table.table); mthca_free_icm_table(mdev, mdev->cq_table.table); mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); @@ -614,12 +604,70 @@ err_disable: return err; } +static void mthca_close_hca(struct mthca_dev *mdev) +{ + u8 status; + + mthca_CLOSE_HCA(mdev, 0, &status); + + if (mthca_is_memfree(mdev)) { + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + mthca_free_icm_table(mdev, mdev->srq_table.table); + mthca_free_icm_table(mdev, mdev->cq_table.table); + mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); + mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); + mthca_free_icm_table(mdev, mdev->qp_table.qp_table); + mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); + mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); + mthca_unmap_eq_icm(mdev); + + mthca_UNMAP_ICM_AUX(mdev, &status); + mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); + + mthca_UNMAP_FA(mdev, &status); + mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); + + if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) + mthca_DISABLE_LAM(mdev, &status); + } else + mthca_SYS_DIS(mdev, &status); +} + static int __devinit mthca_init_hca(struct mthca_dev *mdev) { + u8 status; + int err; + struct mthca_adapter adapter; + if (mthca_is_memfree(mdev)) - return mthca_init_arbel(mdev); + err = mthca_init_arbel(mdev); else - return mthca_init_tavor(mdev); + err = mthca_init_tavor(mdev); + + if (err) + return err; + + err = mthca_QUERY_ADAPTER(mdev, &adapter, &status); + if (err) { + mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n"); + goto err_close; + } + if (status) { + mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, " + "aborting.\n", status); + err = -EINVAL; + goto err_close; + } + + mdev->eq_table.inta_pin = adapter.inta_pin; + mdev->rev_id = adapter.revision_id; + memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id); + + return 0; + +err_close: + mthca_close_hca(mdev); + return err; } static int __devinit mthca_setup_hca(struct mthca_dev *dev) @@ -709,11 +757,18 @@ static int __devinit mthca_setup_hca(struct mthca_dev *dev) goto err_cmd_poll; } + err = mthca_init_srq_table(dev); + if (err) { + mthca_err(dev, "Failed to initialize " + "shared receive queue table, aborting.\n"); + goto err_cq_table_free; + } + err = mthca_init_qp_table(dev); if (err) { mthca_err(dev, "Failed to initialize " "queue pair table, aborting.\n"); - goto err_cq_table_free; + goto err_srq_table_free; } err = mthca_init_av_table(dev); @@ -738,6 +793,9 @@ err_av_table_free: err_qp_table_free: mthca_cleanup_qp_table(dev); +err_srq_table_free: + mthca_cleanup_srq_table(dev); + err_cq_table_free: mthca_cleanup_cq_table(dev); @@ -844,33 +902,6 @@ static int __devinit mthca_enable_msi_x(struct mthca_dev *mdev) return 0; } -static void mthca_close_hca(struct mthca_dev *mdev) -{ - u8 status; - - mthca_CLOSE_HCA(mdev, 0, &status); - - if (mthca_is_memfree(mdev)) { - mthca_free_icm_table(mdev, mdev->cq_table.table); - mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); - mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); - mthca_free_icm_table(mdev, mdev->qp_table.qp_table); - mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); - mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); - mthca_unmap_eq_icm(mdev); - - mthca_UNMAP_ICM_AUX(mdev, &status); - mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); - - mthca_UNMAP_FA(mdev, &status); - mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); - - if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) - mthca_DISABLE_LAM(mdev, &status); - } else - mthca_SYS_DIS(mdev, &status); -} - /* Types of supported HCA */ enum { TAVOR, /* MT23108 */ @@ -887,9 +918,9 @@ static struct { int is_memfree; int is_pcie; } mthca_hca_table[] = { - [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 3, 2), .is_memfree = 0, .is_pcie = 0 }, - [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 6, 2), .is_memfree = 0, .is_pcie = 1 }, - [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 0, 1), .is_memfree = 1, .is_pcie = 1 }, + [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 3, 3), .is_memfree = 0, .is_pcie = 0 }, + [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 0), .is_memfree = 0, .is_pcie = 1 }, + [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 0), .is_memfree = 1, .is_pcie = 1 }, [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 1), .is_memfree = 1, .is_pcie = 1 } }; @@ -1051,6 +1082,7 @@ err_cleanup: mthca_cleanup_mcg_table(mdev); mthca_cleanup_av_table(mdev); mthca_cleanup_qp_table(mdev); + mthca_cleanup_srq_table(mdev); mthca_cleanup_cq_table(mdev); mthca_cmd_use_polling(mdev); mthca_cleanup_eq_table(mdev); @@ -1100,6 +1132,7 @@ static void __devexit mthca_remove_one(struct pci_dev *pdev) mthca_cleanup_mcg_table(mdev); mthca_cleanup_av_table(mdev); mthca_cleanup_qp_table(mdev); + mthca_cleanup_srq_table(mdev); mthca_cleanup_cq_table(mdev); mthca_cmd_use_polling(mdev); mthca_cleanup_eq_table(mdev); diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c index 5be7d949dbf6..a2707605f4c8 100644 --- a/drivers/infiniband/hw/mthca/mthca_mcg.c +++ b/drivers/infiniband/hw/mthca/mthca_mcg.c @@ -42,10 +42,10 @@ enum { }; struct mthca_mgm { - u32 next_gid_index; - u32 reserved[3]; - u8 gid[16]; - u32 qp[MTHCA_QP_PER_MGM]; + __be32 next_gid_index; + u32 reserved[3]; + u8 gid[16]; + __be32 qp[MTHCA_QP_PER_MGM]; }; static const u8 zero_gid[16]; /* automatically initialized to 0 */ @@ -94,10 +94,14 @@ static int find_mgm(struct mthca_dev *dev, if (0) mthca_dbg(dev, "Hash for %04x:%04x:%04x:%04x:" "%04x:%04x:%04x:%04x is %04x\n", - be16_to_cpu(((u16 *) gid)[0]), be16_to_cpu(((u16 *) gid)[1]), - be16_to_cpu(((u16 *) gid)[2]), be16_to_cpu(((u16 *) gid)[3]), - be16_to_cpu(((u16 *) gid)[4]), be16_to_cpu(((u16 *) gid)[5]), - be16_to_cpu(((u16 *) gid)[6]), be16_to_cpu(((u16 *) gid)[7]), + be16_to_cpu(((__be16 *) gid)[0]), + be16_to_cpu(((__be16 *) gid)[1]), + be16_to_cpu(((__be16 *) gid)[2]), + be16_to_cpu(((__be16 *) gid)[3]), + be16_to_cpu(((__be16 *) gid)[4]), + be16_to_cpu(((__be16 *) gid)[5]), + be16_to_cpu(((__be16 *) gid)[6]), + be16_to_cpu(((__be16 *) gid)[7]), *hash); *index = *hash; @@ -258,14 +262,14 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) if (index == -1) { mthca_err(dev, "MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x " "not found\n", - be16_to_cpu(((u16 *) gid->raw)[0]), - be16_to_cpu(((u16 *) gid->raw)[1]), - be16_to_cpu(((u16 *) gid->raw)[2]), - be16_to_cpu(((u16 *) gid->raw)[3]), - be16_to_cpu(((u16 *) gid->raw)[4]), - be16_to_cpu(((u16 *) gid->raw)[5]), - be16_to_cpu(((u16 *) gid->raw)[6]), - be16_to_cpu(((u16 *) gid->raw)[7])); + be16_to_cpu(((__be16 *) gid->raw)[0]), + be16_to_cpu(((__be16 *) gid->raw)[1]), + be16_to_cpu(((__be16 *) gid->raw)[2]), + be16_to_cpu(((__be16 *) gid->raw)[3]), + be16_to_cpu(((__be16 *) gid->raw)[4]), + be16_to_cpu(((__be16 *) gid->raw)[5]), + be16_to_cpu(((__be16 *) gid->raw)[6]), + be16_to_cpu(((__be16 *) gid->raw)[7])); err = -EINVAL; goto out; } diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index 2a8646150355..1827400f189b 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -285,6 +286,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev, { struct mthca_icm_table *table; int num_icm; + unsigned chunk_size; int i; u8 status; @@ -305,7 +307,11 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev, table->icm[i] = NULL; for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { - table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT, + chunk_size = MTHCA_TABLE_CHUNK_SIZE; + if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size) + chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE; + + table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT, (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN); if (!table->icm[i]) @@ -481,7 +487,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, } } -int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db) +int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db) { int group; int start, end, dir; @@ -564,7 +570,7 @@ found: page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5)); - *db = (u32 *) &page->db_rec[j]; + *db = (__be32 *) &page->db_rec[j]; out: up(&dev->db_tab->mutex); diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h index 4761d844cb5f..bafa51544aa3 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.h +++ b/drivers/infiniband/hw/mthca/mthca_memfree.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -137,7 +138,7 @@ enum { struct mthca_db_page { DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE); - u64 *db_rec; + __be64 *db_rec; dma_addr_t mapping; }; @@ -172,7 +173,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, int mthca_init_db_tab(struct mthca_dev *dev); void mthca_cleanup_db_tab(struct mthca_dev *dev); -int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db); +int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db); void mthca_free_db(struct mthca_dev *dev, int type, int db_index); #endif /* MTHCA_MEMFREE_H */ diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index cbe50feaf680..1f97a44477f5 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -50,18 +51,18 @@ struct mthca_mtt { * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. */ struct mthca_mpt_entry { - u32 flags; - u32 page_size; - u32 key; - u32 pd; - u64 start; - u64 length; - u32 lkey; - u32 window_count; - u32 window_count_limit; - u64 mtt_seg; - u32 mtt_sz; /* Arbel only */ - u32 reserved[2]; + __be32 flags; + __be32 page_size; + __be32 key; + __be32 pd; + __be64 start; + __be64 length; + __be32 lkey; + __be32 window_count; + __be32 window_count_limit; + __be64 mtt_seg; + __be32 mtt_sz; /* Arbel only */ + u32 reserved[2]; } __attribute__((packed)); #define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28) @@ -247,7 +248,7 @@ int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, int start_index, u64 *buffer_list, int list_len) { struct mthca_mailbox *mailbox; - u64 *mtt_entry; + __be64 *mtt_entry; int err = 0; u8 status; int i; @@ -389,7 +390,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { if (i % 4 == 0) printk("[%02x] ", i * 4); - printk(" %08x", be32_to_cpu(((u32 *) mpt_entry)[i])); + printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i])); if ((i + 1) % 4 == 0) printk("\n"); } @@ -458,7 +459,7 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, static void mthca_free_region(struct mthca_dev *dev, u32 lkey) { mthca_table_put(dev, dev->mr_table.mpt_table, - arbel_key_to_hw_index(lkey)); + key_to_hw_index(dev, lkey)); mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey)); } @@ -562,7 +563,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { if (i % 4 == 0) printk("[%02x] ", i * 4); - printk(" %08x", be32_to_cpu(((u32 *) mpt_entry)[i])); + printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i])); if ((i + 1) % 4 == 0) printk("\n"); } @@ -669,7 +670,7 @@ int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_size)); mpt_entry.start = cpu_to_be64(iova); - writel(mpt_entry.lkey, &fmr->mem.tavor.mpt->key); + __raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key); memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start, offsetof(struct mthca_mpt_entry, window_count) - offsetof(struct mthca_mpt_entry, start)); diff --git a/drivers/infiniband/hw/mthca/mthca_pd.c b/drivers/infiniband/hw/mthca/mthca_pd.c index c2c899844e98..3dbf06a6e6f4 100644 --- a/drivers/infiniband/hw/mthca/mthca_pd.c +++ b/drivers/infiniband/hw/mthca/mthca_pd.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c index 4fedc32d5871..0576056b34f4 100644 --- a/drivers/infiniband/hw/mthca/mthca_profile.c +++ b/drivers/infiniband/hw/mthca/mthca_profile.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -101,6 +102,7 @@ u64 mthca_make_profile(struct mthca_dev *dev, profile[MTHCA_RES_UARC].size = request->uarc_size; profile[MTHCA_RES_QP].num = request->num_qp; + profile[MTHCA_RES_SRQ].num = request->num_srq; profile[MTHCA_RES_EQP].num = request->num_qp; profile[MTHCA_RES_RDB].num = request->num_qp * request->rdb_per_qp; profile[MTHCA_RES_CQ].num = request->num_cq; diff --git a/drivers/infiniband/hw/mthca/mthca_profile.h b/drivers/infiniband/hw/mthca/mthca_profile.h index 17aef3357661..94641808f97f 100644 --- a/drivers/infiniband/hw/mthca/mthca_profile.h +++ b/drivers/infiniband/hw/mthca/mthca_profile.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -41,6 +42,7 @@ struct mthca_profile { int num_qp; int rdb_per_qp; + int num_srq; int num_cq; int num_mcg; int num_mpt; diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 81919a7b4935..1c1c2e230871 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -2,6 +2,8 @@ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -34,7 +36,7 @@ * $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $ */ -#include <ib_smi.h> +#include <rdma/ib_smi.h> #include <linux/mm.h> #include "mthca_dev.h" @@ -79,10 +81,10 @@ static int mthca_query_device(struct ib_device *ibdev, } props->device_cap_flags = mdev->device_cap_flags; - props->vendor_id = be32_to_cpup((u32 *) (out_mad->data + 36)) & + props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 0xffffff; - props->vendor_part_id = be16_to_cpup((u16 *) (out_mad->data + 30)); - props->hw_ver = be16_to_cpup((u16 *) (out_mad->data + 32)); + props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); + props->hw_ver = be16_to_cpup((__be16 *) (out_mad->data + 32)); memcpy(&props->sys_image_guid, out_mad->data + 4, 8); memcpy(&props->node_guid, out_mad->data + 12, 8); @@ -118,6 +120,8 @@ static int mthca_query_port(struct ib_device *ibdev, if (!in_mad || !out_mad) goto out; + memset(props, 0, sizeof *props); + memset(in_mad, 0, sizeof *in_mad); in_mad->base_version = 1; in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; @@ -136,16 +140,17 @@ static int mthca_query_port(struct ib_device *ibdev, goto out; } - props->lid = be16_to_cpup((u16 *) (out_mad->data + 16)); + props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); props->lmc = out_mad->data[34] & 0x7; - props->sm_lid = be16_to_cpup((u16 *) (out_mad->data + 18)); + props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); props->sm_sl = out_mad->data[36] & 0xf; props->state = out_mad->data[32] & 0xf; props->phys_state = out_mad->data[33] >> 4; - props->port_cap_flags = be32_to_cpup((u32 *) (out_mad->data + 20)); + props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; + props->max_msg_sz = 0x80000000; props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; - props->qkey_viol_cntr = be16_to_cpup((u16 *) (out_mad->data + 48)); + props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); props->active_width = out_mad->data[31] & 0xf; props->active_speed = out_mad->data[35] >> 4; @@ -221,7 +226,7 @@ static int mthca_query_pkey(struct ib_device *ibdev, goto out; } - *pkey = be16_to_cpu(((u16 *) out_mad->data)[index % 32]); + *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); out: kfree(in_mad); @@ -420,6 +425,77 @@ static int mthca_ah_destroy(struct ib_ah *ah) return 0; } +static struct ib_srq *mthca_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *init_attr, + struct ib_udata *udata) +{ + struct mthca_create_srq ucmd; + struct mthca_ucontext *context = NULL; + struct mthca_srq *srq; + int err; + + srq = kmalloc(sizeof *srq, GFP_KERNEL); + if (!srq) + return ERR_PTR(-ENOMEM); + + if (pd->uobject) { + context = to_mucontext(pd->uobject->context); + + if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) + return ERR_PTR(-EFAULT); + + err = mthca_map_user_db(to_mdev(pd->device), &context->uar, + context->db_tab, ucmd.db_index, + ucmd.db_page); + + if (err) + goto err_free; + + srq->mr.ibmr.lkey = ucmd.lkey; + srq->db_index = ucmd.db_index; + } + + err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd), + &init_attr->attr, srq); + + if (err && pd->uobject) + mthca_unmap_user_db(to_mdev(pd->device), &context->uar, + context->db_tab, ucmd.db_index); + + if (err) + goto err_free; + + if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) { + mthca_free_srq(to_mdev(pd->device), srq); + err = -EFAULT; + goto err_free; + } + + return &srq->ibsrq; + +err_free: + kfree(srq); + + return ERR_PTR(err); +} + +static int mthca_destroy_srq(struct ib_srq *srq) +{ + struct mthca_ucontext *context; + + if (srq->uobject) { + context = to_mucontext(srq->uobject->context); + + mthca_unmap_user_db(to_mdev(srq->device), &context->uar, + context->db_tab, to_msrq(srq)->db_index); + } + + mthca_free_srq(to_mdev(srq->device), to_msrq(srq)); + kfree(srq); + + return 0; +} + static struct ib_qp *mthca_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) @@ -956,14 +1032,22 @@ static ssize_t show_hca(struct class_device *cdev, char *buf) } } +static ssize_t show_board(struct class_device *cdev, char *buf) +{ + struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev); + return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id); +} + static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); +static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); static struct class_device_attribute *mthca_class_attributes[] = { &class_device_attr_hw_rev, &class_device_attr_fw_ver, - &class_device_attr_hca_type + &class_device_attr_hca_type, + &class_device_attr_board_id }; int mthca_register_device(struct mthca_dev *dev) @@ -990,6 +1074,17 @@ int mthca_register_device(struct mthca_dev *dev) dev->ib_dev.dealloc_pd = mthca_dealloc_pd; dev->ib_dev.create_ah = mthca_ah_create; dev->ib_dev.destroy_ah = mthca_ah_destroy; + + if (dev->mthca_flags & MTHCA_FLAG_SRQ) { + dev->ib_dev.create_srq = mthca_create_srq; + dev->ib_dev.destroy_srq = mthca_destroy_srq; + + if (mthca_is_memfree(dev)) + dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv; + else + dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv; + } + dev->ib_dev.create_qp = mthca_create_qp; dev->ib_dev.modify_qp = mthca_modify_qp; dev->ib_dev.destroy_qp = mthca_destroy_qp; diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 1d032791cc8b..bcd4b01a339c 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -36,8 +37,8 @@ #ifndef MTHCA_PROVIDER_H #define MTHCA_PROVIDER_H -#include <ib_verbs.h> -#include <ib_pack.h> +#include <rdma/ib_verbs.h> +#include <rdma/ib_pack.h> #define MTHCA_MPT_FLAG_ATOMIC (1 << 14) #define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13) @@ -50,6 +51,11 @@ struct mthca_buf_list { DECLARE_PCI_UNMAP_ADDR(mapping) }; +union mthca_buf { + struct mthca_buf_list direct; + struct mthca_buf_list *page_list; +}; + struct mthca_uar { unsigned long pfn; int index; @@ -181,19 +187,39 @@ struct mthca_cq { /* Next fields are Arbel only */ int set_ci_db_index; - u32 *set_ci_db; + __be32 *set_ci_db; int arm_db_index; - u32 *arm_db; + __be32 *arm_db; int arm_sn; - union { - struct mthca_buf_list direct; - struct mthca_buf_list *page_list; - } queue; + union mthca_buf queue; struct mthca_mr mr; wait_queue_head_t wait; }; +struct mthca_srq { + struct ib_srq ibsrq; + spinlock_t lock; + atomic_t refcount; + int srqn; + int max; + int max_gs; + int wqe_shift; + int first_free; + int last_free; + u16 counter; /* Arbel only */ + int db_index; /* Arbel only */ + __be32 *db; /* Arbel only */ + void *last; + + int is_direct; + u64 *wrid; + union mthca_buf queue; + struct mthca_mr mr; + + wait_queue_head_t wait; +}; + struct mthca_wq { spinlock_t lock; int max; @@ -206,7 +232,7 @@ struct mthca_wq { int wqe_shift; int db_index; /* Arbel only */ - u32 *db; + __be32 *db; }; struct mthca_qp { @@ -227,10 +253,7 @@ struct mthca_qp { int send_wqe_offset; u64 *wrid; - union { - struct mthca_buf_list direct; - struct mthca_buf_list *page_list; - } queue; + union mthca_buf queue; wait_queue_head_t wait; }; @@ -277,6 +300,11 @@ static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) return container_of(ibcq, struct mthca_cq, ibcq); } +static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq) +{ + return container_of(ibsrq, struct mthca_srq, ibsrq); +} + static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp) { return container_of(ibqp, struct mthca_qp, ibqp); diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index f7126b14d5ae..0164b84d4ec6 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -1,6 +1,8 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -35,13 +37,14 @@ #include <linux/init.h> -#include <ib_verbs.h> -#include <ib_cache.h> -#include <ib_pack.h> +#include <rdma/ib_verbs.h> +#include <rdma/ib_cache.h> +#include <rdma/ib_pack.h> #include "mthca_dev.h" #include "mthca_cmd.h" #include "mthca_memfree.h" +#include "mthca_wqe.h" enum { MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, @@ -95,62 +98,62 @@ enum { }; struct mthca_qp_path { - u32 port_pkey; - u8 rnr_retry; - u8 g_mylmc; - u16 rlid; - u8 ackto; - u8 mgid_index; - u8 static_rate; - u8 hop_limit; - u32 sl_tclass_flowlabel; - u8 rgid[16]; + __be32 port_pkey; + u8 rnr_retry; + u8 g_mylmc; + __be16 rlid; + u8 ackto; + u8 mgid_index; + u8 static_rate; + u8 hop_limit; + __be32 sl_tclass_flowlabel; + u8 rgid[16]; } __attribute__((packed)); struct mthca_qp_context { - u32 flags; - u32 tavor_sched_queue; /* Reserved on Arbel */ - u8 mtu_msgmax; - u8 rq_size_stride; /* Reserved on Tavor */ - u8 sq_size_stride; /* Reserved on Tavor */ - u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ - u32 usr_page; - u32 local_qpn; - u32 remote_qpn; - u32 reserved1[2]; + __be32 flags; + __be32 tavor_sched_queue; /* Reserved on Arbel */ + u8 mtu_msgmax; + u8 rq_size_stride; /* Reserved on Tavor */ + u8 sq_size_stride; /* Reserved on Tavor */ + u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ + __be32 usr_page; + __be32 local_qpn; + __be32 remote_qpn; + u32 reserved1[2]; struct mthca_qp_path pri_path; struct mthca_qp_path alt_path; - u32 rdd; - u32 pd; - u32 wqe_base; - u32 wqe_lkey; - u32 params1; - u32 reserved2; - u32 next_send_psn; - u32 cqn_snd; - u32 snd_wqe_base_l; /* Next send WQE on Tavor */ - u32 snd_db_index; /* (debugging only entries) */ - u32 last_acked_psn; - u32 ssn; - u32 params2; - u32 rnr_nextrecvpsn; - u32 ra_buff_indx; - u32 cqn_rcv; - u32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ - u32 rcv_db_index; /* (debugging only entries) */ - u32 qkey; - u32 srqn; - u32 rmsn; - u16 rq_wqe_counter; /* reserved on Tavor */ - u16 sq_wqe_counter; /* reserved on Tavor */ - u32 reserved3[18]; + __be32 rdd; + __be32 pd; + __be32 wqe_base; + __be32 wqe_lkey; + __be32 params1; + __be32 reserved2; + __be32 next_send_psn; + __be32 cqn_snd; + __be32 snd_wqe_base_l; /* Next send WQE on Tavor */ + __be32 snd_db_index; /* (debugging only entries) */ + __be32 last_acked_psn; + __be32 ssn; + __be32 params2; + __be32 rnr_nextrecvpsn; + __be32 ra_buff_indx; + __be32 cqn_rcv; + __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ + __be32 rcv_db_index; /* (debugging only entries) */ + __be32 qkey; + __be32 srqn; + __be32 rmsn; + __be16 rq_wqe_counter; /* reserved on Tavor */ + __be16 sq_wqe_counter; /* reserved on Tavor */ + u32 reserved3[18]; } __attribute__((packed)); struct mthca_qp_param { - u32 opt_param_mask; - u32 reserved1; + __be32 opt_param_mask; + u32 reserved1; struct mthca_qp_context context; - u32 reserved2[62]; + u32 reserved2[62]; } __attribute__((packed)); enum { @@ -173,80 +176,6 @@ enum { MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 }; -enum { - MTHCA_NEXT_DBD = 1 << 7, - MTHCA_NEXT_FENCE = 1 << 6, - MTHCA_NEXT_CQ_UPDATE = 1 << 3, - MTHCA_NEXT_EVENT_GEN = 1 << 2, - MTHCA_NEXT_SOLICIT = 1 << 1, - - MTHCA_MLX_VL15 = 1 << 17, - MTHCA_MLX_SLR = 1 << 16 -}; - -enum { - MTHCA_INVAL_LKEY = 0x100 -}; - -struct mthca_next_seg { - u32 nda_op; /* [31:6] next WQE [4:0] next opcode */ - u32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */ - u32 flags; /* [3] CQ [2] Event [1] Solicit */ - u32 imm; /* immediate data */ -}; - -struct mthca_tavor_ud_seg { - u32 reserved1; - u32 lkey; - u64 av_addr; - u32 reserved2[4]; - u32 dqpn; - u32 qkey; - u32 reserved3[2]; -}; - -struct mthca_arbel_ud_seg { - u32 av[8]; - u32 dqpn; - u32 qkey; - u32 reserved[2]; -}; - -struct mthca_bind_seg { - u32 flags; /* [31] Atomic [30] rem write [29] rem read */ - u32 reserved; - u32 new_rkey; - u32 lkey; - u64 addr; - u64 length; -}; - -struct mthca_raddr_seg { - u64 raddr; - u32 rkey; - u32 reserved; -}; - -struct mthca_atomic_seg { - u64 swap_add; - u64 compare; -}; - -struct mthca_data_seg { - u32 byte_count; - u32 lkey; - u64 addr; -}; - -struct mthca_mlx_seg { - u32 nda_op; - u32 nds; - u32 flags; /* [17] VL15 [16] SLR [14:12] static rate - [11:8] SL [3] C [2] E */ - u16 rlid; - u16 vcrc; -}; - static const u8 mthca_opcode[] = { [IB_WR_SEND] = MTHCA_OPCODE_SEND, [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM, @@ -573,12 +502,11 @@ static void init_port(struct mthca_dev *dev, int port) memset(¶m, 0, sizeof param); - param.enable_1x = 1; - param.enable_4x = 1; - param.vl_cap = dev->limits.vl_cap; - param.mtu_cap = dev->limits.mtu_cap; - param.gid_cap = dev->limits.gid_table_len; - param.pkey_cap = dev->limits.pkey_table_len; + param.port_width = dev->limits.port_width_cap; + param.vl_cap = dev->limits.vl_cap; + param.mtu_cap = dev->limits.mtu_cap; + param.gid_cap = dev->limits.gid_table_len; + param.pkey_cap = dev->limits.pkey_table_len; err = mthca_INIT_IB(dev, ¶m, port, &status); if (err) @@ -684,10 +612,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; if (mthca_is_memfree(dev)) { - qp_context->rq_size_stride = - ((ffs(qp->rq.max) - 1) << 3) | (qp->rq.wqe_shift - 4); - qp_context->sq_size_stride = - ((ffs(qp->sq.max) - 1) << 3) | (qp->sq.wqe_shift - 4); + if (qp->rq.max) + qp_context->rq_size_stride = long_log2(qp->rq.max) << 3; + qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; + + if (qp->sq.max) + qp_context->sq_size_stride = long_log2(qp->sq.max) << 3; + qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; } /* leave arbel_sched_queue as 0 */ @@ -856,6 +787,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); + if (ibqp->srq) + qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC); + if (attr_mask & IB_QP_MIN_RNR_TIMER) { qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); @@ -878,6 +812,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); } + if (ibqp->srq) + qp_context->srqn = cpu_to_be32(1 << 24 | + to_msrq(ibqp->srq)->srqn); + err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, qp->qpn, 0, mailbox, 0, &status); if (status) { @@ -925,10 +863,6 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, struct mthca_qp *qp) { int size; - int i; - int npages, shift; - dma_addr_t t; - u64 *dma_list = NULL; int err = -ENOMEM; size = sizeof (struct mthca_next_seg) + @@ -978,116 +912,24 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, if (!qp->wrid) goto err_out; - if (size <= MTHCA_MAX_DIRECT_QP_SIZE) { - qp->is_direct = 1; - npages = 1; - shift = get_order(size) + PAGE_SHIFT; - - if (0) - mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n", - size, shift); - - qp->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, size, - &t, GFP_KERNEL); - if (!qp->queue.direct.buf) - goto err_out; - - pci_unmap_addr_set(&qp->queue.direct, mapping, t); - - memset(qp->queue.direct.buf, 0, size); - - while (t & ((1 << shift) - 1)) { - --shift; - npages *= 2; - } - - dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); - if (!dma_list) - goto err_out_free; - - for (i = 0; i < npages; ++i) - dma_list[i] = t + i * (1 << shift); - } else { - qp->is_direct = 0; - npages = size / PAGE_SIZE; - shift = PAGE_SHIFT; - - if (0) - mthca_dbg(dev, "Creating indirect QP with %d pages\n", npages); - - dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); - if (!dma_list) - goto err_out; - - qp->queue.page_list = kmalloc(npages * - sizeof *qp->queue.page_list, - GFP_KERNEL); - if (!qp->queue.page_list) - goto err_out; - - for (i = 0; i < npages; ++i) { - qp->queue.page_list[i].buf = - dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, - &t, GFP_KERNEL); - if (!qp->queue.page_list[i].buf) - goto err_out_free; - - memset(qp->queue.page_list[i].buf, 0, PAGE_SIZE); - - pci_unmap_addr_set(&qp->queue.page_list[i], mapping, t); - dma_list[i] = t; - } - } - - err = mthca_mr_alloc_phys(dev, pd->pd_num, dma_list, shift, - npages, 0, size, - MTHCA_MPT_FLAG_LOCAL_READ, - &qp->mr); + err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE, + &qp->queue, &qp->is_direct, pd, 0, &qp->mr); if (err) - goto err_out_free; + goto err_out; - kfree(dma_list); return 0; - err_out_free: - if (qp->is_direct) { - dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf, - pci_unmap_addr(&qp->queue.direct, mapping)); - } else - for (i = 0; i < npages; ++i) { - if (qp->queue.page_list[i].buf) - dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, - qp->queue.page_list[i].buf, - pci_unmap_addr(&qp->queue.page_list[i], - mapping)); - - } - - err_out: +err_out: kfree(qp->wrid); - kfree(dma_list); return err; } static void mthca_free_wqe_buf(struct mthca_dev *dev, struct mthca_qp *qp) { - int i; - int size = PAGE_ALIGN(qp->send_wqe_offset + - (qp->sq.max << qp->sq.wqe_shift)); - - if (qp->is_direct) { - dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf, - pci_unmap_addr(&qp->queue.direct, mapping)); - } else { - for (i = 0; i < size / PAGE_SIZE; ++i) { - dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, - qp->queue.page_list[i].buf, - pci_unmap_addr(&qp->queue.page_list[i], - mapping)); - } - } - + mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset + + (qp->sq.max << qp->sq.wqe_shift)), + &qp->queue, qp->is_direct, &qp->mr); kfree(qp->wrid); } @@ -1428,11 +1270,12 @@ void mthca_free_qp(struct mthca_dev *dev, * unref the mem-free tables and free the QPN in our table. */ if (!qp->ibqp.uobject) { - mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn); + mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); if (qp->ibqp.send_cq != qp->ibqp.recv_cq) - mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn); + mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); - mthca_free_mr(dev, &qp->mr); mthca_free_memfree(dev, qp); mthca_free_wqe_buf(dev, qp); } @@ -1457,6 +1300,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, { int header_size; int err; + u16 pkey; ib_ud_header_init(256, /* assume a MAD */ sqp->ud_header.grh_present, @@ -1467,8 +1311,8 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, return err; mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | - (sqp->ud_header.lrh.destination_lid == 0xffff ? - MTHCA_MLX_SLR : 0) | + (sqp->ud_header.lrh.destination_lid == + IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | (sqp->ud_header.lrh.service_level << 8)); mlx->rlid = sqp->ud_header.lrh.destination_lid; mlx->vcrc = 0; @@ -1488,18 +1332,16 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, } sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; - if (sqp->ud_header.lrh.destination_lid == 0xffff) - sqp->ud_header.lrh.source_lid = 0xffff; + if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) + sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); if (!sqp->qp.ibqp.qp_num) ib_get_cached_pkey(&dev->ib_dev, sqp->port, - sqp->pkey_index, - &sqp->ud_header.bth.pkey); + sqp->pkey_index, &pkey); else ib_get_cached_pkey(&dev->ib_dev, sqp->port, - wr->wr.ud.pkey_index, - &sqp->ud_header.bth.pkey); - cpu_to_be16s(&sqp->ud_header.bth.pkey); + wr->wr.ud.pkey_index, &pkey); + sqp->ud_header.bth.pkey = cpu_to_be16(pkey); sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? @@ -1742,7 +1584,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, out: if (likely(nreq)) { - u32 doorbell[2]; + __be32 doorbell[2]; doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) + qp->send_wqe_offset) | f0 | op0); @@ -1843,7 +1685,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, out: if (likely(nreq)) { - u32 doorbell[2]; + __be32 doorbell[2]; doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq); @@ -2064,7 +1906,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, out: if (likely(nreq)) { - u32 doorbell[2]; + __be32 doorbell[2]; doorbell[0] = cpu_to_be32((nreq << 24) | ((qp->sq.head & 0xffff) << 8) | @@ -2174,19 +2016,25 @@ out: } int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, - int index, int *dbd, u32 *new_wqe) + int index, int *dbd, __be32 *new_wqe) { struct mthca_next_seg *next; + /* + * For SRQs, all WQEs generate a CQE, so we're always at the + * end of the doorbell chain. + */ + if (qp->ibqp.srq) { + *new_wqe = 0; + return 0; + } + if (is_send) next = get_send_wqe(qp, index); else next = get_recv_wqe(qp, index); - if (mthca_is_memfree(dev)) - *dbd = 1; - else - *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); + *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); if (next->ee_nds & cpu_to_be32(0x3f)) *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) | (next->ee_nds & cpu_to_be32(0x3f)); diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c new file mode 100644 index 000000000000..75cd2d84ef12 --- /dev/null +++ b/drivers/infiniband/hw/mthca/mthca_srq.c @@ -0,0 +1,591 @@ +/* + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $ + */ + +#include "mthca_dev.h" +#include "mthca_cmd.h" +#include "mthca_memfree.h" +#include "mthca_wqe.h" + +enum { + MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE +}; + +struct mthca_tavor_srq_context { + __be64 wqe_base_ds; /* low 6 bits is descriptor size */ + __be32 state_pd; + __be32 lkey; + __be32 uar; + __be32 wqe_cnt; + u32 reserved[2]; +}; + +struct mthca_arbel_srq_context { + __be32 state_logsize_srqn; + __be32 lkey; + __be32 db_index; + __be32 logstride_usrpage; + __be64 wqe_base; + __be32 eq_pd; + __be16 limit_watermark; + __be16 wqe_cnt; + u16 reserved1; + __be16 wqe_counter; + u32 reserved2[3]; +}; + +static void *get_wqe(struct mthca_srq *srq, int n) +{ + if (srq->is_direct) + return srq->queue.direct.buf + (n << srq->wqe_shift); + else + return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + + ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); +} + +/* + * Return a pointer to the location within a WQE that we're using as a + * link when the WQE is in the free list. We use an offset of 4 + * because in the Tavor case, posting a WQE may overwrite the first + * four bytes of the previous WQE. The offset avoids corrupting our + * free list if the WQE has already completed and been put on the free + * list when we post the next WQE. + */ +static inline int *wqe_to_link(void *wqe) +{ + return (int *) (wqe + 4); +} + +static void mthca_tavor_init_srq_context(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_srq *srq, + struct mthca_tavor_srq_context *context) +{ + memset(context, 0, sizeof *context); + + context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); + context->state_pd = cpu_to_be32(pd->pd_num); + context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); + + if (pd->ibpd.uobject) + context->uar = + cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); + else + context->uar = cpu_to_be32(dev->driver_uar.index); +} + +static void mthca_arbel_init_srq_context(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_srq *srq, + struct mthca_arbel_srq_context *context) +{ + int logsize; + + memset(context, 0, sizeof *context); + + logsize = long_log2(srq->max) + srq->wqe_shift; + context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); + context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); + context->db_index = cpu_to_be32(srq->db_index); + context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29); + if (pd->ibpd.uobject) + context->logstride_usrpage |= + cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); + else + context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index); + context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num); +} + +static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) +{ + mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue, + srq->is_direct, &srq->mr); + kfree(srq->wrid); +} + +static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, + struct mthca_srq *srq) +{ + struct mthca_data_seg *scatter; + void *wqe; + int err; + int i; + + if (pd->ibpd.uobject) + return 0; + + srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL); + if (!srq->wrid) + return -ENOMEM; + + err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift, + MTHCA_MAX_DIRECT_SRQ_SIZE, + &srq->queue, &srq->is_direct, pd, 1, &srq->mr); + if (err) { + kfree(srq->wrid); + return err; + } + + /* + * Now initialize the SRQ buffer so that all of the WQEs are + * linked into the list of free WQEs. In addition, set the + * scatter list L_Keys to the sentry value of 0x100. + */ + for (i = 0; i < srq->max; ++i) { + wqe = get_wqe(srq, i); + + *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1; + + for (scatter = wqe + sizeof (struct mthca_next_seg); + (void *) scatter < wqe + (1 << srq->wqe_shift); + ++scatter) + scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); + } + + return 0; +} + +int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, + struct ib_srq_attr *attr, struct mthca_srq *srq) +{ + struct mthca_mailbox *mailbox; + u8 status; + int ds; + int err; + + /* Sanity check SRQ size before proceeding */ + if (attr->max_wr > 16 << 20 || attr->max_sge > 64) + return -EINVAL; + + srq->max = attr->max_wr; + srq->max_gs = attr->max_sge; + srq->last = NULL; + srq->counter = 0; + + if (mthca_is_memfree(dev)) + srq->max = roundup_pow_of_two(srq->max + 1); + + ds = min(64UL, + roundup_pow_of_two(sizeof (struct mthca_next_seg) + + srq->max_gs * sizeof (struct mthca_data_seg))); + srq->wqe_shift = long_log2(ds); + + srq->srqn = mthca_alloc(&dev->srq_table.alloc); + if (srq->srqn == -1) + return -ENOMEM; + + if (mthca_is_memfree(dev)) { + err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); + if (err) + goto err_out; + + if (!pd->ibpd.uobject) { + srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ, + srq->srqn, &srq->db); + if (srq->db_index < 0) { + err = -ENOMEM; + goto err_out_icm; + } + } + } + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto err_out_db; + } + + err = mthca_alloc_srq_buf(dev, pd, srq); + if (err) + goto err_out_mailbox; + + spin_lock_init(&srq->lock); + atomic_set(&srq->refcount, 1); + init_waitqueue_head(&srq->wait); + + if (mthca_is_memfree(dev)) + mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); + else + mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf); + + err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status); + + if (err) { + mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err); + goto err_out_free_buf; + } + if (status) { + mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n", + status); + err = -EINVAL; + goto err_out_free_buf; + } + + spin_lock_irq(&dev->srq_table.lock); + if (mthca_array_set(&dev->srq_table.srq, + srq->srqn & (dev->limits.num_srqs - 1), + srq)) { + spin_unlock_irq(&dev->srq_table.lock); + goto err_out_free_srq; + } + spin_unlock_irq(&dev->srq_table.lock); + + mthca_free_mailbox(dev, mailbox); + + srq->first_free = 0; + srq->last_free = srq->max - 1; + + return 0; + +err_out_free_srq: + err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); + if (err) + mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); + else if (status) + mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); + +err_out_free_buf: + if (!pd->ibpd.uobject) + mthca_free_srq_buf(dev, srq); + +err_out_mailbox: + mthca_free_mailbox(dev, mailbox); + +err_out_db: + if (!pd->ibpd.uobject && mthca_is_memfree(dev)) + mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); + +err_out_icm: + mthca_table_put(dev, dev->srq_table.table, srq->srqn); + +err_out: + mthca_free(&dev->srq_table.alloc, srq->srqn); + + return err; +} + +void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) +{ + struct mthca_mailbox *mailbox; + int err; + u8 status; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) { + mthca_warn(dev, "No memory for mailbox to free SRQ.\n"); + return; + } + + err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); + if (err) + mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); + else if (status) + mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); + + spin_lock_irq(&dev->srq_table.lock); + mthca_array_clear(&dev->srq_table.srq, + srq->srqn & (dev->limits.num_srqs - 1)); + spin_unlock_irq(&dev->srq_table.lock); + + atomic_dec(&srq->refcount); + wait_event(srq->wait, !atomic_read(&srq->refcount)); + + if (!srq->ibsrq.uobject) { + mthca_free_srq_buf(dev, srq); + if (mthca_is_memfree(dev)) + mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); + } + + mthca_table_put(dev, dev->srq_table.table, srq->srqn); + mthca_free(&dev->srq_table.alloc, srq->srqn); + mthca_free_mailbox(dev, mailbox); +} + +void mthca_srq_event(struct mthca_dev *dev, u32 srqn, + enum ib_event_type event_type) +{ + struct mthca_srq *srq; + struct ib_event event; + + spin_lock(&dev->srq_table.lock); + srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); + if (srq) + atomic_inc(&srq->refcount); + spin_unlock(&dev->srq_table.lock); + + if (!srq) { + mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn); + return; + } + + if (!srq->ibsrq.event_handler) + goto out; + + event.device = &dev->ib_dev; + event.event = event_type; + event.element.srq = &srq->ibsrq; + srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); + +out: + if (atomic_dec_and_test(&srq->refcount)) + wake_up(&srq->wait); +} + +/* + * This function must be called with IRQs disabled. + */ +void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) +{ + int ind; + + ind = wqe_addr >> srq->wqe_shift; + + spin_lock(&srq->lock); + + if (likely(srq->first_free >= 0)) + *wqe_to_link(get_wqe(srq, srq->last_free)) = ind; + else + srq->first_free = ind; + + *wqe_to_link(get_wqe(srq, ind)) = -1; + srq->last_free = ind; + + spin_unlock(&srq->lock); +} + +int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct mthca_dev *dev = to_mdev(ibsrq->device); + struct mthca_srq *srq = to_msrq(ibsrq); + unsigned long flags; + int err = 0; + int first_ind; + int ind; + int next_ind; + int nreq; + int i; + void *wqe; + void *prev_wqe; + + spin_lock_irqsave(&srq->lock, flags); + + first_ind = srq->first_free; + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + ind = srq->first_free; + + if (ind < 0) { + mthca_err(dev, "SRQ %06x full\n", srq->srqn); + err = -ENOMEM; + *bad_wr = wr; + return nreq; + } + + wqe = get_wqe(srq, ind); + next_ind = *wqe_to_link(wqe); + prev_wqe = srq->last; + srq->last = wqe; + + ((struct mthca_next_seg *) wqe)->nda_op = 0; + ((struct mthca_next_seg *) wqe)->ee_nds = 0; + /* flags field will always remain 0 */ + + wqe += sizeof (struct mthca_next_seg); + + if (unlikely(wr->num_sge > srq->max_gs)) { + err = -EINVAL; + *bad_wr = wr; + srq->last = prev_wqe; + return nreq; + } + + for (i = 0; i < wr->num_sge; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cpu_to_be32(wr->sg_list[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cpu_to_be32(wr->sg_list[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + cpu_to_be64(wr->sg_list[i].addr); + wqe += sizeof (struct mthca_data_seg); + } + + if (i < srq->max_gs) { + ((struct mthca_data_seg *) wqe)->byte_count = 0; + ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); + ((struct mthca_data_seg *) wqe)->addr = 0; + } + + if (likely(prev_wqe)) { + ((struct mthca_next_seg *) prev_wqe)->nda_op = + cpu_to_be32((ind << srq->wqe_shift) | 1); + wmb(); + ((struct mthca_next_seg *) prev_wqe)->ee_nds = + cpu_to_be32(MTHCA_NEXT_DBD); + } + + srq->wrid[ind] = wr->wr_id; + srq->first_free = next_ind; + } + + return nreq; + + if (likely(nreq)) { + __be32 doorbell[2]; + + doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); + doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq); + + /* + * Make sure that descriptors are written before + * doorbell is rung. + */ + wmb(); + + mthca_write64(doorbell, + dev->kar + MTHCA_RECEIVE_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + } + + spin_unlock_irqrestore(&srq->lock, flags); + return err; +} + +int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct mthca_dev *dev = to_mdev(ibsrq->device); + struct mthca_srq *srq = to_msrq(ibsrq); + unsigned long flags; + int err = 0; + int ind; + int next_ind; + int nreq; + int i; + void *wqe; + + spin_lock_irqsave(&srq->lock, flags); + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + ind = srq->first_free; + + if (ind < 0) { + mthca_err(dev, "SRQ %06x full\n", srq->srqn); + err = -ENOMEM; + *bad_wr = wr; + return nreq; + } + + wqe = get_wqe(srq, ind); + next_ind = *wqe_to_link(wqe); + + ((struct mthca_next_seg *) wqe)->nda_op = + cpu_to_be32((next_ind << srq->wqe_shift) | 1); + ((struct mthca_next_seg *) wqe)->ee_nds = 0; + /* flags field will always remain 0 */ + + wqe += sizeof (struct mthca_next_seg); + + if (unlikely(wr->num_sge > srq->max_gs)) { + err = -EINVAL; + *bad_wr = wr; + return nreq; + } + + for (i = 0; i < wr->num_sge; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cpu_to_be32(wr->sg_list[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cpu_to_be32(wr->sg_list[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + cpu_to_be64(wr->sg_list[i].addr); + wqe += sizeof (struct mthca_data_seg); + } + + if (i < srq->max_gs) { + ((struct mthca_data_seg *) wqe)->byte_count = 0; + ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); + ((struct mthca_data_seg *) wqe)->addr = 0; + } + + srq->wrid[ind] = wr->wr_id; + srq->first_free = next_ind; + } + + if (likely(nreq)) { + srq->counter += nreq; + + /* + * Make sure that descriptors are written before + * we write doorbell record. + */ + wmb(); + *srq->db = cpu_to_be32(srq->counter); + } + + spin_unlock_irqrestore(&srq->lock, flags); + return err; +} + +int __devinit mthca_init_srq_table(struct mthca_dev *dev) +{ + int err; + + if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) + return 0; + + spin_lock_init(&dev->srq_table.lock); + + err = mthca_alloc_init(&dev->srq_table.alloc, + dev->limits.num_srqs, + dev->limits.num_srqs - 1, + dev->limits.reserved_srqs); + if (err) + return err; + + err = mthca_array_init(&dev->srq_table.srq, + dev->limits.num_srqs); + if (err) + mthca_alloc_cleanup(&dev->srq_table.alloc); + + return err; +} + +void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev) +{ + if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) + return; + + mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs); + mthca_alloc_cleanup(&dev->srq_table.alloc); +} diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h index 3024c1b4547d..41613ec8a04e 100644 --- a/drivers/infiniband/hw/mthca/mthca_user.h +++ b/drivers/infiniband/hw/mthca/mthca_user.h @@ -69,6 +69,17 @@ struct mthca_create_cq_resp { __u32 reserved; }; +struct mthca_create_srq { + __u32 lkey; + __u32 db_index; + __u64 db_page; +}; + +struct mthca_create_srq_resp { + __u32 srqn; + __u32 reserved; +}; + struct mthca_create_qp { __u32 lkey; __u32 reserved; diff --git a/drivers/infiniband/hw/mthca/mthca_wqe.h b/drivers/infiniband/hw/mthca/mthca_wqe.h new file mode 100644 index 000000000000..1f4c0ff28f79 --- /dev/null +++ b/drivers/infiniband/hw/mthca/mthca_wqe.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_wqe.h 3047 2005-08-10 03:59:35Z roland $ + */ + +#ifndef MTHCA_WQE_H +#define MTHCA_WQE_H + +#include <linux/types.h> + +enum { + MTHCA_NEXT_DBD = 1 << 7, + MTHCA_NEXT_FENCE = 1 << 6, + MTHCA_NEXT_CQ_UPDATE = 1 << 3, + MTHCA_NEXT_EVENT_GEN = 1 << 2, + MTHCA_NEXT_SOLICIT = 1 << 1, + + MTHCA_MLX_VL15 = 1 << 17, + MTHCA_MLX_SLR = 1 << 16 +}; + +enum { + MTHCA_INVAL_LKEY = 0x100 +}; + +struct mthca_next_seg { + __be32 nda_op; /* [31:6] next WQE [4:0] next opcode */ + __be32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */ + __be32 flags; /* [3] CQ [2] Event [1] Solicit */ + __be32 imm; /* immediate data */ +}; + +struct mthca_tavor_ud_seg { + u32 reserved1; + __be32 lkey; + __be64 av_addr; + u32 reserved2[4]; + __be32 dqpn; + __be32 qkey; + u32 reserved3[2]; +}; + +struct mthca_arbel_ud_seg { + __be32 av[8]; + __be32 dqpn; + __be32 qkey; + u32 reserved[2]; +}; + +struct mthca_bind_seg { + __be32 flags; /* [31] Atomic [30] rem write [29] rem read */ + u32 reserved; + __be32 new_rkey; + __be32 lkey; + __be64 addr; + __be64 length; +}; + +struct mthca_raddr_seg { + __be64 raddr; + __be32 rkey; + u32 reserved; +}; + +struct mthca_atomic_seg { + __be64 swap_add; + __be64 compare; +}; + +struct mthca_data_seg { + __be32 byte_count; + __be32 lkey; + __be64 addr; +}; + +struct mthca_mlx_seg { + __be32 nda_op; + __be32 nds; + __be32 flags; /* [17] VL15 [16] SLR [14:12] static rate + [11:8] SL [3] C [2] E */ + __be16 rlid; + __be16 vcrc; +}; + +#endif /* MTHCA_WQE_H */ diff --git a/drivers/infiniband/include/ib_cache.h b/drivers/infiniband/include/ib_cache.h deleted file mode 100644 index 44ef6bb9b9df..000000000000 --- a/drivers/infiniband/include/ib_cache.h +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright (c) 2004 Topspin Communications. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * $Id: ib_cache.h 1349 2004-12-16 21:09:43Z roland $ - */ - -#ifndef _IB_CACHE_H -#define _IB_CACHE_H - -#include <ib_verbs.h> - -/** - * ib_get_cached_gid - Returns a cached GID table entry - * @device: The device to query. - * @port_num: The port number of the device to query. - * @index: The index into the cached GID table to query. - * @gid: The GID value found at the specified index. - * - * ib_get_cached_gid() fetches the specified GID table entry stored in - * the local software cache. - */ -int ib_get_cached_gid(struct ib_device *device, - u8 port_num, - int index, - union ib_gid *gid); - -/** - * ib_find_cached_gid - Returns the port number and GID table index where - * a specified GID value occurs. - * @device: The device to query. - * @gid: The GID value to search for. - * @port_num: The port number of the device where the GID value was found. - * @index: The index into the cached GID table where the GID was found. This - * parameter may be NULL. - * - * ib_find_cached_gid() searches for the specified GID value in - * the local software cache. - */ -int ib_find_cached_gid(struct ib_device *device, - union ib_gid *gid, - u8 *port_num, - u16 *index); - -/** - * ib_get_cached_pkey - Returns a cached PKey table entry - * @device: The device to query. - * @port_num: The port number of the device to query. - * @index: The index into the cached PKey table to query. - * @pkey: The PKey value found at the specified index. - * - * ib_get_cached_pkey() fetches the specified PKey table entry stored in - * the local software cache. - */ -int ib_get_cached_pkey(struct ib_device *device_handle, - u8 port_num, - int index, - u16 *pkey); - -/** - * ib_find_cached_pkey - Returns the PKey table index where a specified - * PKey value occurs. - * @device: The device to query. - * @port_num: The port number of the device to search for the PKey. - * @pkey: The PKey value to search for. - * @index: The index into the cached PKey table where the PKey was found. - * - * ib_find_cached_pkey() searches the specified PKey table in - * the local software cache. - */ -int ib_find_cached_pkey(struct ib_device *device, - u8 port_num, - u16 pkey, - u16 *index); - -#endif /* _IB_CACHE_H */ diff --git a/drivers/infiniband/include/ib_cm.h b/drivers/infiniband/include/ib_cm.h deleted file mode 100644 index da650115e79a..000000000000 --- a/drivers/infiniband/include/ib_cm.h +++ /dev/null @@ -1,569 +0,0 @@ -/* - * Copyright (c) 2004 Intel Corporation. All rights reserved. - * Copyright (c) 2004 Topspin Corporation. All rights reserved. - * Copyright (c) 2004 Voltaire Corporation. All rights reserved. - * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * $Id: ib_cm.h 2730 2005-06-28 16:43:03Z sean.hefty $ - */ -#if !defined(IB_CM_H) -#define IB_CM_H - -#include <ib_mad.h> -#include <ib_sa.h> - -enum ib_cm_state { - IB_CM_IDLE, - IB_CM_LISTEN, - IB_CM_REQ_SENT, - IB_CM_REQ_RCVD, - IB_CM_MRA_REQ_SENT, - IB_CM_MRA_REQ_RCVD, - IB_CM_REP_SENT, - IB_CM_REP_RCVD, - IB_CM_MRA_REP_SENT, - IB_CM_MRA_REP_RCVD, - IB_CM_ESTABLISHED, - IB_CM_DREQ_SENT, - IB_CM_DREQ_RCVD, - IB_CM_TIMEWAIT, - IB_CM_SIDR_REQ_SENT, - IB_CM_SIDR_REQ_RCVD -}; - -enum ib_cm_lap_state { - IB_CM_LAP_IDLE, - IB_CM_LAP_SENT, - IB_CM_LAP_RCVD, - IB_CM_MRA_LAP_SENT, - IB_CM_MRA_LAP_RCVD, -}; - -enum ib_cm_event_type { - IB_CM_REQ_ERROR, - IB_CM_REQ_RECEIVED, - IB_CM_REP_ERROR, - IB_CM_REP_RECEIVED, - IB_CM_RTU_RECEIVED, - IB_CM_USER_ESTABLISHED, - IB_CM_DREQ_ERROR, - IB_CM_DREQ_RECEIVED, - IB_CM_DREP_RECEIVED, - IB_CM_TIMEWAIT_EXIT, - IB_CM_MRA_RECEIVED, - IB_CM_REJ_RECEIVED, - IB_CM_LAP_ERROR, - IB_CM_LAP_RECEIVED, - IB_CM_APR_RECEIVED, - IB_CM_SIDR_REQ_ERROR, - IB_CM_SIDR_REQ_RECEIVED, - IB_CM_SIDR_REP_RECEIVED -}; - -enum ib_cm_data_size { - IB_CM_REQ_PRIVATE_DATA_SIZE = 92, - IB_CM_MRA_PRIVATE_DATA_SIZE = 222, - IB_CM_REJ_PRIVATE_DATA_SIZE = 148, - IB_CM_REP_PRIVATE_DATA_SIZE = 196, - IB_CM_RTU_PRIVATE_DATA_SIZE = 224, - IB_CM_DREQ_PRIVATE_DATA_SIZE = 220, - IB_CM_DREP_PRIVATE_DATA_SIZE = 224, - IB_CM_REJ_ARI_LENGTH = 72, - IB_CM_LAP_PRIVATE_DATA_SIZE = 168, - IB_CM_APR_PRIVATE_DATA_SIZE = 148, - IB_CM_APR_INFO_LENGTH = 72, - IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216, - IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136, - IB_CM_SIDR_REP_INFO_LENGTH = 72 -}; - -struct ib_cm_id; - -struct ib_cm_req_event_param { - struct ib_cm_id *listen_id; - struct ib_device *device; - u8 port; - - struct ib_sa_path_rec *primary_path; - struct ib_sa_path_rec *alternate_path; - - u64 remote_ca_guid; - u32 remote_qkey; - u32 remote_qpn; - enum ib_qp_type qp_type; - - u32 starting_psn; - u8 responder_resources; - u8 initiator_depth; - unsigned int local_cm_response_timeout:5; - unsigned int flow_control:1; - unsigned int remote_cm_response_timeout:5; - unsigned int retry_count:3; - unsigned int rnr_retry_count:3; - unsigned int srq:1; -}; - -struct ib_cm_rep_event_param { - u64 remote_ca_guid; - u32 remote_qkey; - u32 remote_qpn; - u32 starting_psn; - u8 responder_resources; - u8 initiator_depth; - unsigned int target_ack_delay:5; - unsigned int failover_accepted:2; - unsigned int flow_control:1; - unsigned int rnr_retry_count:3; - unsigned int srq:1; -}; - -enum ib_cm_rej_reason { - IB_CM_REJ_NO_QP = __constant_htons(1), - IB_CM_REJ_NO_EEC = __constant_htons(2), - IB_CM_REJ_NO_RESOURCES = __constant_htons(3), - IB_CM_REJ_TIMEOUT = __constant_htons(4), - IB_CM_REJ_UNSUPPORTED = __constant_htons(5), - IB_CM_REJ_INVALID_COMM_ID = __constant_htons(6), - IB_CM_REJ_INVALID_COMM_INSTANCE = __constant_htons(7), - IB_CM_REJ_INVALID_SERVICE_ID = __constant_htons(8), - IB_CM_REJ_INVALID_TRANSPORT_TYPE = __constant_htons(9), - IB_CM_REJ_STALE_CONN = __constant_htons(10), - IB_CM_REJ_RDC_NOT_EXIST = __constant_htons(11), - IB_CM_REJ_INVALID_GID = __constant_htons(12), - IB_CM_REJ_INVALID_LID = __constant_htons(13), - IB_CM_REJ_INVALID_SL = __constant_htons(14), - IB_CM_REJ_INVALID_TRAFFIC_CLASS = __constant_htons(15), - IB_CM_REJ_INVALID_HOP_LIMIT = __constant_htons(16), - IB_CM_REJ_INVALID_PACKET_RATE = __constant_htons(17), - IB_CM_REJ_INVALID_ALT_GID = __constant_htons(18), - IB_CM_REJ_INVALID_ALT_LID = __constant_htons(19), - IB_CM_REJ_INVALID_ALT_SL = __constant_htons(20), - IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS = __constant_htons(21), - IB_CM_REJ_INVALID_ALT_HOP_LIMIT = __constant_htons(22), - IB_CM_REJ_INVALID_ALT_PACKET_RATE = __constant_htons(23), - IB_CM_REJ_PORT_CM_REDIRECT = __constant_htons(24), - IB_CM_REJ_PORT_REDIRECT = __constant_htons(25), - IB_CM_REJ_INVALID_MTU = __constant_htons(26), - IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES = __constant_htons(27), - IB_CM_REJ_CONSUMER_DEFINED = __constant_htons(28), - IB_CM_REJ_INVALID_RNR_RETRY = __constant_htons(29), - IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID = __constant_htons(30), - IB_CM_REJ_INVALID_CLASS_VERSION = __constant_htons(31), - IB_CM_REJ_INVALID_FLOW_LABEL = __constant_htons(32), - IB_CM_REJ_INVALID_ALT_FLOW_LABEL = __constant_htons(33) -}; - -struct ib_cm_rej_event_param { - enum ib_cm_rej_reason reason; - void *ari; - u8 ari_length; -}; - -struct ib_cm_mra_event_param { - u8 service_timeout; -}; - -struct ib_cm_lap_event_param { - struct ib_sa_path_rec *alternate_path; -}; - -enum ib_cm_apr_status { - IB_CM_APR_SUCCESS, - IB_CM_APR_INVALID_COMM_ID, - IB_CM_APR_UNSUPPORTED, - IB_CM_APR_REJECT, - IB_CM_APR_REDIRECT, - IB_CM_APR_IS_CURRENT, - IB_CM_APR_INVALID_QPN_EECN, - IB_CM_APR_INVALID_LID, - IB_CM_APR_INVALID_GID, - IB_CM_APR_INVALID_FLOW_LABEL, - IB_CM_APR_INVALID_TCLASS, - IB_CM_APR_INVALID_HOP_LIMIT, - IB_CM_APR_INVALID_PACKET_RATE, - IB_CM_APR_INVALID_SL -}; - -struct ib_cm_apr_event_param { - enum ib_cm_apr_status ap_status; - void *apr_info; - u8 info_len; -}; - -struct ib_cm_sidr_req_event_param { - struct ib_cm_id *listen_id; - struct ib_device *device; - u8 port; - - u16 pkey; -}; - -enum ib_cm_sidr_status { - IB_SIDR_SUCCESS, - IB_SIDR_UNSUPPORTED, - IB_SIDR_REJECT, - IB_SIDR_NO_QP, - IB_SIDR_REDIRECT, - IB_SIDR_UNSUPPORTED_VERSION -}; - -struct ib_cm_sidr_rep_event_param { - enum ib_cm_sidr_status status; - u32 qkey; - u32 qpn; - void *info; - u8 info_len; - -}; - -struct ib_cm_event { - enum ib_cm_event_type event; - union { - struct ib_cm_req_event_param req_rcvd; - struct ib_cm_rep_event_param rep_rcvd; - /* No data for RTU received events. */ - struct ib_cm_rej_event_param rej_rcvd; - struct ib_cm_mra_event_param mra_rcvd; - struct ib_cm_lap_event_param lap_rcvd; - struct ib_cm_apr_event_param apr_rcvd; - /* No data for DREQ/DREP received events. */ - struct ib_cm_sidr_req_event_param sidr_req_rcvd; - struct ib_cm_sidr_rep_event_param sidr_rep_rcvd; - enum ib_wc_status send_status; - } param; - - void *private_data; -}; - -/** - * ib_cm_handler - User-defined callback to process communication events. - * @cm_id: Communication identifier associated with the reported event. - * @event: Information about the communication event. - * - * IB_CM_REQ_RECEIVED and IB_CM_SIDR_REQ_RECEIVED communication events - * generated as a result of listen requests result in the allocation of a - * new @cm_id. The new @cm_id is returned to the user through this callback. - * Clients are responsible for destroying the new @cm_id. For peer-to-peer - * IB_CM_REQ_RECEIVED and all other events, the returned @cm_id corresponds - * to a user's existing communication identifier. - * - * Users may not call ib_destroy_cm_id while in the context of this callback; - * however, returning a non-zero value instructs the communication manager to - * destroy the @cm_id after the callback completes. - */ -typedef int (*ib_cm_handler)(struct ib_cm_id *cm_id, - struct ib_cm_event *event); - -struct ib_cm_id { - ib_cm_handler cm_handler; - void *context; - u64 service_id; - u64 service_mask; - enum ib_cm_state state; /* internal CM/debug use */ - enum ib_cm_lap_state lap_state; /* internal CM/debug use */ - u32 local_id; - u32 remote_id; -}; - -/** - * ib_create_cm_id - Allocate a communication identifier. - * @cm_handler: Callback invoked to notify the user of CM events. - * @context: User specified context associated with the communication - * identifier. - * - * Communication identifiers are used to track connection states, service - * ID resolution requests, and listen requests. - */ -struct ib_cm_id *ib_create_cm_id(ib_cm_handler cm_handler, - void *context); - -/** - * ib_destroy_cm_id - Destroy a connection identifier. - * @cm_id: Connection identifier to destroy. - * - * This call blocks until the connection identifier is destroyed. - */ -void ib_destroy_cm_id(struct ib_cm_id *cm_id); - -#define IB_SERVICE_ID_AGN_MASK __constant_cpu_to_be64(0xFF00000000000000ULL) -#define IB_CM_ASSIGN_SERVICE_ID __constant_cpu_to_be64(0x0200000000000000ULL) - -/** - * ib_cm_listen - Initiates listening on the specified service ID for - * connection and service ID resolution requests. - * @cm_id: Connection identifier associated with the listen request. - * @service_id: Service identifier matched against incoming connection - * and service ID resolution requests. The service ID should be specified - * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will - * assign a service ID to the caller. - * @service_mask: Mask applied to service ID used to listen across a - * range of service IDs. If set to 0, the service ID is matched - * exactly. This parameter is ignored if %service_id is set to - * IB_CM_ASSIGN_SERVICE_ID. - */ -int ib_cm_listen(struct ib_cm_id *cm_id, - u64 service_id, - u64 service_mask); - -struct ib_cm_req_param { - struct ib_sa_path_rec *primary_path; - struct ib_sa_path_rec *alternate_path; - u64 service_id; - u32 qp_num; - enum ib_qp_type qp_type; - u32 starting_psn; - const void *private_data; - u8 private_data_len; - u8 peer_to_peer; - u8 responder_resources; - u8 initiator_depth; - u8 remote_cm_response_timeout; - u8 flow_control; - u8 local_cm_response_timeout; - u8 retry_count; - u8 rnr_retry_count; - u8 max_cm_retries; - u8 srq; -}; - -/** - * ib_send_cm_req - Sends a connection request to the remote node. - * @cm_id: Connection identifier that will be associated with the - * connection request. - * @param: Connection request information needed to establish the - * connection. - */ -int ib_send_cm_req(struct ib_cm_id *cm_id, - struct ib_cm_req_param *param); - -struct ib_cm_rep_param { - u32 qp_num; - u32 starting_psn; - const void *private_data; - u8 private_data_len; - u8 responder_resources; - u8 initiator_depth; - u8 target_ack_delay; - u8 failover_accepted; - u8 flow_control; - u8 rnr_retry_count; - u8 srq; -}; - -/** - * ib_send_cm_rep - Sends a connection reply in response to a connection - * request. - * @cm_id: Connection identifier that will be associated with the - * connection request. - * @param: Connection reply information needed to establish the - * connection. - */ -int ib_send_cm_rep(struct ib_cm_id *cm_id, - struct ib_cm_rep_param *param); - -/** - * ib_send_cm_rtu - Sends a connection ready to use message in response - * to a connection reply message. - * @cm_id: Connection identifier associated with the connection request. - * @private_data: Optional user-defined private data sent with the - * ready to use message. - * @private_data_len: Size of the private data buffer, in bytes. - */ -int ib_send_cm_rtu(struct ib_cm_id *cm_id, - const void *private_data, - u8 private_data_len); - -/** - * ib_send_cm_dreq - Sends a disconnection request for an existing - * connection. - * @cm_id: Connection identifier associated with the connection being - * released. - * @private_data: Optional user-defined private data sent with the - * disconnection request message. - * @private_data_len: Size of the private data buffer, in bytes. - */ -int ib_send_cm_dreq(struct ib_cm_id *cm_id, - const void *private_data, - u8 private_data_len); - -/** - * ib_send_cm_drep - Sends a disconnection reply to a disconnection request. - * @cm_id: Connection identifier associated with the connection being - * released. - * @private_data: Optional user-defined private data sent with the - * disconnection reply message. - * @private_data_len: Size of the private data buffer, in bytes. - * - * If the cm_id is in the correct state, the CM will transition the connection - * to the timewait state, even if an error occurs sending the DREP message. - */ -int ib_send_cm_drep(struct ib_cm_id *cm_id, - const void *private_data, - u8 private_data_len); - -/** - * ib_cm_establish - Forces a connection state to established. - * @cm_id: Connection identifier to transition to established. - * - * This routine should be invoked by users who receive messages on a - * connected QP before an RTU has been received. - */ -int ib_cm_establish(struct ib_cm_id *cm_id); - -/** - * ib_send_cm_rej - Sends a connection rejection message to the - * remote node. - * @cm_id: Connection identifier associated with the connection being - * rejected. - * @reason: Reason for the connection request rejection. - * @ari: Optional additional rejection information. - * @ari_length: Size of the additional rejection information, in bytes. - * @private_data: Optional user-defined private data sent with the - * rejection message. - * @private_data_len: Size of the private data buffer, in bytes. - */ -int ib_send_cm_rej(struct ib_cm_id *cm_id, - enum ib_cm_rej_reason reason, - void *ari, - u8 ari_length, - const void *private_data, - u8 private_data_len); - -/** - * ib_send_cm_mra - Sends a message receipt acknowledgement to a connection - * message. - * @cm_id: Connection identifier associated with the connection message. - * @service_timeout: The maximum time required for the sender to reply to - * to the connection message. - * @private_data: Optional user-defined private data sent with the - * message receipt acknowledgement. - * @private_data_len: Size of the private data buffer, in bytes. - */ -int ib_send_cm_mra(struct ib_cm_id *cm_id, - u8 service_timeout, - const void *private_data, - u8 private_data_len); - -/** - * ib_send_cm_lap - Sends a load alternate path request. - * @cm_id: Connection identifier associated with the load alternate path - * message. - * @alternate_path: A path record that identifies the alternate path to - * load. - * @private_data: Optional user-defined private data sent with the - * load alternate path message. - * @private_data_len: Size of the private data buffer, in bytes. - */ -int ib_send_cm_lap(struct ib_cm_id *cm_id, - struct ib_sa_path_rec *alternate_path, - const void *private_data, - u8 private_data_len); - -/** - * ib_cm_init_qp_attr - Initializes the QP attributes for use in transitioning - * to a specified QP state. - * @cm_id: Communication identifier associated with the QP attributes to - * initialize. - * @qp_attr: On input, specifies the desired QP state. On output, the - * mandatory and desired optional attributes will be set in order to - * modify the QP to the specified state. - * @qp_attr_mask: The QP attribute mask that may be used to transition the - * QP to the specified state. - * - * Users must set the @qp_attr->qp_state to the desired QP state. This call - * will set all required attributes for the given transition, along with - * known optional attributes. Users may override the attributes returned from - * this call before calling ib_modify_qp. - */ -int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, - struct ib_qp_attr *qp_attr, - int *qp_attr_mask); - -/** - * ib_send_cm_apr - Sends an alternate path response message in response to - * a load alternate path request. - * @cm_id: Connection identifier associated with the alternate path response. - * @status: Reply status sent with the alternate path response. - * @info: Optional additional information sent with the alternate path - * response. - * @info_length: Size of the additional information, in bytes. - * @private_data: Optional user-defined private data sent with the - * alternate path response message. - * @private_data_len: Size of the private data buffer, in bytes. - */ -int ib_send_cm_apr(struct ib_cm_id *cm_id, - enum ib_cm_apr_status status, - void *info, - u8 info_length, - const void *private_data, - u8 private_data_len); - -struct ib_cm_sidr_req_param { - struct ib_sa_path_rec *path; - u64 service_id; - int timeout_ms; - const void *private_data; - u8 private_data_len; - u8 max_cm_retries; - u16 pkey; -}; - -/** - * ib_send_cm_sidr_req - Sends a service ID resolution request to the - * remote node. - * @cm_id: Communication identifier that will be associated with the - * service ID resolution request. - * @param: Service ID resolution request information. - */ -int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, - struct ib_cm_sidr_req_param *param); - -struct ib_cm_sidr_rep_param { - u32 qp_num; - u32 qkey; - enum ib_cm_sidr_status status; - const void *info; - u8 info_length; - const void *private_data; - u8 private_data_len; -}; - -/** - * ib_send_cm_sidr_rep - Sends a service ID resolution request to the - * remote node. - * @cm_id: Communication identifier associated with the received service ID - * resolution request. - * @param: Service ID resolution reply information. - */ -int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, - struct ib_cm_sidr_rep_param *param); - -#endif /* IB_CM_H */ diff --git a/drivers/infiniband/include/ib_fmr_pool.h b/drivers/infiniband/include/ib_fmr_pool.h deleted file mode 100644 index 6c9e24d6e144..000000000000 --- a/drivers/infiniband/include/ib_fmr_pool.h +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright (c) 2004 Topspin Corporation. All rights reserved. - * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * $Id: ib_fmr_pool.h 2730 2005-06-28 16:43:03Z sean.hefty $ - */ - -#if !defined(IB_FMR_POOL_H) -#define IB_FMR_POOL_H - -#include <ib_verbs.h> - -struct ib_fmr_pool; - -/** - * struct ib_fmr_pool_param - Parameters for creating FMR pool - * @max_pages_per_fmr:Maximum number of pages per map request. - * @access:Access flags for FMRs in pool. - * @pool_size:Number of FMRs to allocate for pool. - * @dirty_watermark:Flush is triggered when @dirty_watermark dirty - * FMRs are present. - * @flush_function:Callback called when unmapped FMRs are flushed and - * more FMRs are possibly available for mapping - * @flush_arg:Context passed to user's flush function. - * @cache:If set, FMRs may be reused after unmapping for identical map - * requests. - */ -struct ib_fmr_pool_param { - int max_pages_per_fmr; - enum ib_access_flags access; - int pool_size; - int dirty_watermark; - void (*flush_function)(struct ib_fmr_pool *pool, - void * arg); - void *flush_arg; - unsigned cache:1; -}; - -struct ib_pool_fmr { - struct ib_fmr *fmr; - struct ib_fmr_pool *pool; - struct list_head list; - struct hlist_node cache_node; - int ref_count; - int remap_count; - u64 io_virtual_address; - int page_list_len; - u64 page_list[0]; -}; - -struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, - struct ib_fmr_pool_param *params); - -void ib_destroy_fmr_pool(struct ib_fmr_pool *pool); - -int ib_flush_fmr_pool(struct ib_fmr_pool *pool); - -struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, - u64 *page_list, - int list_len, - u64 *io_virtual_address); - -int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr); - -#endif /* IB_FMR_POOL_H */ diff --git a/drivers/infiniband/include/ib_mad.h b/drivers/infiniband/include/ib_mad.h deleted file mode 100644 index 491b6f25b3b8..000000000000 --- a/drivers/infiniband/include/ib_mad.h +++ /dev/null @@ -1,577 +0,0 @@ -/* - * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. - * Copyright (c) 2004 Infinicon Corporation. All rights reserved. - * Copyright (c) 2004 Intel Corporation. All rights reserved. - * Copyright (c) 2004 Topspin Corporation. All rights reserved. - * Copyright (c) 2004 Voltaire Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * $Id: ib_mad.h 2775 2005-07-02 13:42:12Z halr $ - */ - -#if !defined( IB_MAD_H ) -#define IB_MAD_H - -#include <linux/pci.h> - -#include <ib_verbs.h> - -/* Management base version */ -#define IB_MGMT_BASE_VERSION 1 - -/* Management classes */ -#define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01 -#define IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 0x81 -#define IB_MGMT_CLASS_SUBN_ADM 0x03 -#define IB_MGMT_CLASS_PERF_MGMT 0x04 -#define IB_MGMT_CLASS_BM 0x05 -#define IB_MGMT_CLASS_DEVICE_MGMT 0x06 -#define IB_MGMT_CLASS_CM 0x07 -#define IB_MGMT_CLASS_SNMP 0x08 -#define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30 -#define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F - -#define IB_OPENIB_OUI (0x001405) - -/* Management methods */ -#define IB_MGMT_METHOD_GET 0x01 -#define IB_MGMT_METHOD_SET 0x02 -#define IB_MGMT_METHOD_GET_RESP 0x81 -#define IB_MGMT_METHOD_SEND 0x03 -#define IB_MGMT_METHOD_TRAP 0x05 -#define IB_MGMT_METHOD_REPORT 0x06 -#define IB_MGMT_METHOD_REPORT_RESP 0x86 -#define IB_MGMT_METHOD_TRAP_REPRESS 0x07 - -#define IB_MGMT_METHOD_RESP 0x80 - -#define IB_MGMT_MAX_METHODS 128 - -/* RMPP information */ -#define IB_MGMT_RMPP_VERSION 1 - -#define IB_MGMT_RMPP_TYPE_DATA 1 -#define IB_MGMT_RMPP_TYPE_ACK 2 -#define IB_MGMT_RMPP_TYPE_STOP 3 -#define IB_MGMT_RMPP_TYPE_ABORT 4 - -#define IB_MGMT_RMPP_FLAG_ACTIVE 1 -#define IB_MGMT_RMPP_FLAG_FIRST (1<<1) -#define IB_MGMT_RMPP_FLAG_LAST (1<<2) - -#define IB_MGMT_RMPP_NO_RESPTIME 0x1F - -#define IB_MGMT_RMPP_STATUS_SUCCESS 0 -#define IB_MGMT_RMPP_STATUS_RESX 1 -#define IB_MGMT_RMPP_STATUS_T2L 118 -#define IB_MGMT_RMPP_STATUS_BAD_LEN 119 -#define IB_MGMT_RMPP_STATUS_BAD_SEG 120 -#define IB_MGMT_RMPP_STATUS_BADT 121 -#define IB_MGMT_RMPP_STATUS_W2S 122 -#define IB_MGMT_RMPP_STATUS_S2B 123 -#define IB_MGMT_RMPP_STATUS_BAD_STATUS 124 -#define IB_MGMT_RMPP_STATUS_UNV 125 -#define IB_MGMT_RMPP_STATUS_TMR 126 -#define IB_MGMT_RMPP_STATUS_UNSPEC 127 - -#define IB_QP0 0 -#define IB_QP1 __constant_htonl(1) -#define IB_QP1_QKEY 0x80010000 -#define IB_QP_SET_QKEY 0x80000000 - -struct ib_mad_hdr { - u8 base_version; - u8 mgmt_class; - u8 class_version; - u8 method; - u16 status; - u16 class_specific; - u64 tid; - u16 attr_id; - u16 resv; - u32 attr_mod; -}; - -struct ib_rmpp_hdr { - u8 rmpp_version; - u8 rmpp_type; - u8 rmpp_rtime_flags; - u8 rmpp_status; - u32 seg_num; - u32 paylen_newwin; -}; - -typedef u64 __bitwise ib_sa_comp_mask; - -#define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << n)) - -/* - * ib_sa_hdr and ib_sa_mad structures must be packed because they have - * 64-bit fields that are only 32-bit aligned. 64-bit architectures will - * lay them out wrong otherwise. (And unfortunately they are sent on - * the wire so we can't change the layout) - */ -struct ib_sa_hdr { - u64 sm_key; - u16 attr_offset; - u16 reserved; - ib_sa_comp_mask comp_mask; -} __attribute__ ((packed)); - -struct ib_mad { - struct ib_mad_hdr mad_hdr; - u8 data[232]; -}; - -struct ib_rmpp_mad { - struct ib_mad_hdr mad_hdr; - struct ib_rmpp_hdr rmpp_hdr; - u8 data[220]; -}; - -struct ib_sa_mad { - struct ib_mad_hdr mad_hdr; - struct ib_rmpp_hdr rmpp_hdr; - struct ib_sa_hdr sa_hdr; - u8 data[200]; -} __attribute__ ((packed)); - -struct ib_vendor_mad { - struct ib_mad_hdr mad_hdr; - struct ib_rmpp_hdr rmpp_hdr; - u8 reserved; - u8 oui[3]; - u8 data[216]; -}; - -/** - * ib_mad_send_buf - MAD data buffer and work request for sends. - * @mad: References an allocated MAD data buffer. The size of the data - * buffer is specified in the @send_wr.length field. - * @mapping: DMA mapping information. - * @mad_agent: MAD agent that allocated the buffer. - * @context: User-controlled context fields. - * @send_wr: An initialized work request structure used when sending the MAD. - * The wr_id field of the work request is initialized to reference this - * data structure. - * @sge: A scatter-gather list referenced by the work request. - * - * Users are responsible for initializing the MAD buffer itself, with the - * exception of specifying the payload length field in any RMPP MAD. - */ -struct ib_mad_send_buf { - struct ib_mad *mad; - DECLARE_PCI_UNMAP_ADDR(mapping) - struct ib_mad_agent *mad_agent; - void *context[2]; - struct ib_send_wr send_wr; - struct ib_sge sge; -}; - -/** - * ib_get_rmpp_resptime - Returns the RMPP response time. - * @rmpp_hdr: An RMPP header. - */ -static inline u8 ib_get_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr) -{ - return rmpp_hdr->rmpp_rtime_flags >> 3; -} - -/** - * ib_get_rmpp_flags - Returns the RMPP flags. - * @rmpp_hdr: An RMPP header. - */ -static inline u8 ib_get_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr) -{ - return rmpp_hdr->rmpp_rtime_flags & 0x7; -} - -/** - * ib_set_rmpp_resptime - Sets the response time in an RMPP header. - * @rmpp_hdr: An RMPP header. - * @rtime: The response time to set. - */ -static inline void ib_set_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr, u8 rtime) -{ - rmpp_hdr->rmpp_rtime_flags = ib_get_rmpp_flags(rmpp_hdr) | (rtime << 3); -} - -/** - * ib_set_rmpp_flags - Sets the flags in an RMPP header. - * @rmpp_hdr: An RMPP header. - * @flags: The flags to set. - */ -static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags) -{ - rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF1) | - (flags & 0x7); -} - -struct ib_mad_agent; -struct ib_mad_send_wc; -struct ib_mad_recv_wc; - -/** - * ib_mad_send_handler - callback handler for a sent MAD. - * @mad_agent: MAD agent that sent the MAD. - * @mad_send_wc: Send work completion information on the sent MAD. - */ -typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent, - struct ib_mad_send_wc *mad_send_wc); - -/** - * ib_mad_snoop_handler - Callback handler for snooping sent MADs. - * @mad_agent: MAD agent that snooped the MAD. - * @send_wr: Work request information on the sent MAD. - * @mad_send_wc: Work completion information on the sent MAD. Valid - * only for snooping that occurs on a send completion. - * - * Clients snooping MADs should not modify data referenced by the @send_wr - * or @mad_send_wc. - */ -typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent, - struct ib_send_wr *send_wr, - struct ib_mad_send_wc *mad_send_wc); - -/** - * ib_mad_recv_handler - callback handler for a received MAD. - * @mad_agent: MAD agent requesting the received MAD. - * @mad_recv_wc: Received work completion information on the received MAD. - * - * MADs received in response to a send request operation will be handed to - * the user after the send operation completes. All data buffers given - * to registered agents through this routine are owned by the receiving - * client, except for snooping agents. Clients snooping MADs should not - * modify the data referenced by @mad_recv_wc. - */ -typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent, - struct ib_mad_recv_wc *mad_recv_wc); - -/** - * ib_mad_agent - Used to track MAD registration with the access layer. - * @device: Reference to device registration is on. - * @qp: Reference to QP used for sending and receiving MADs. - * @mr: Memory region for system memory usable for DMA. - * @recv_handler: Callback handler for a received MAD. - * @send_handler: Callback handler for a sent MAD. - * @snoop_handler: Callback handler for snooped sent MADs. - * @context: User-specified context associated with this registration. - * @hi_tid: Access layer assigned transaction ID for this client. - * Unsolicited MADs sent by this client will have the upper 32-bits - * of their TID set to this value. - * @port_num: Port number on which QP is registered - * @rmpp_version: If set, indicates the RMPP version used by this agent. - */ -struct ib_mad_agent { - struct ib_device *device; - struct ib_qp *qp; - struct ib_mr *mr; - ib_mad_recv_handler recv_handler; - ib_mad_send_handler send_handler; - ib_mad_snoop_handler snoop_handler; - void *context; - u32 hi_tid; - u8 port_num; - u8 rmpp_version; -}; - -/** - * ib_mad_send_wc - MAD send completion information. - * @wr_id: Work request identifier associated with the send MAD request. - * @status: Completion status. - * @vendor_err: Optional vendor error information returned with a failed - * request. - */ -struct ib_mad_send_wc { - u64 wr_id; - enum ib_wc_status status; - u32 vendor_err; -}; - -/** - * ib_mad_recv_buf - received MAD buffer information. - * @list: Reference to next data buffer for a received RMPP MAD. - * @grh: References a data buffer containing the global route header. - * The data refereced by this buffer is only valid if the GRH is - * valid. - * @mad: References the start of the received MAD. - */ -struct ib_mad_recv_buf { - struct list_head list; - struct ib_grh *grh; - struct ib_mad *mad; -}; - -/** - * ib_mad_recv_wc - received MAD information. - * @wc: Completion information for the received data. - * @recv_buf: Specifies the location of the received data buffer(s). - * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers. - * @mad_len: The length of the received MAD, without duplicated headers. - * - * For received response, the wr_id field of the wc is set to the wr_id - * for the corresponding send request. - */ -struct ib_mad_recv_wc { - struct ib_wc *wc; - struct ib_mad_recv_buf recv_buf; - struct list_head rmpp_list; - int mad_len; -}; - -/** - * ib_mad_reg_req - MAD registration request - * @mgmt_class: Indicates which management class of MADs should be receive - * by the caller. This field is only required if the user wishes to - * receive unsolicited MADs, otherwise it should be 0. - * @mgmt_class_version: Indicates which version of MADs for the given - * management class to receive. - * @oui: Indicates IEEE OUI when mgmt_class is a vendor class - * in the range from 0x30 to 0x4f. Otherwise not used. - * @method_mask: The caller will receive unsolicited MADs for any method - * where @method_mask = 1. - */ -struct ib_mad_reg_req { - u8 mgmt_class; - u8 mgmt_class_version; - u8 oui[3]; - DECLARE_BITMAP(method_mask, IB_MGMT_MAX_METHODS); -}; - -/** - * ib_register_mad_agent - Register to send/receive MADs. - * @device: The device to register with. - * @port_num: The port on the specified device to use. - * @qp_type: Specifies which QP to access. Must be either - * IB_QPT_SMI or IB_QPT_GSI. - * @mad_reg_req: Specifies which unsolicited MADs should be received - * by the caller. This parameter may be NULL if the caller only - * wishes to receive solicited responses. - * @rmpp_version: If set, indicates that the client will send - * and receive MADs that contain the RMPP header for the given version. - * If set to 0, indicates that RMPP is not used by this client. - * @send_handler: The completion callback routine invoked after a send - * request has completed. - * @recv_handler: The completion callback routine invoked for a received - * MAD. - * @context: User specified context associated with the registration. - */ -struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, - u8 port_num, - enum ib_qp_type qp_type, - struct ib_mad_reg_req *mad_reg_req, - u8 rmpp_version, - ib_mad_send_handler send_handler, - ib_mad_recv_handler recv_handler, - void *context); - -enum ib_mad_snoop_flags { - /*IB_MAD_SNOOP_POSTED_SENDS = 1,*/ - /*IB_MAD_SNOOP_RMPP_SENDS = (1<<1),*/ - IB_MAD_SNOOP_SEND_COMPLETIONS = (1<<2), - /*IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS = (1<<3),*/ - IB_MAD_SNOOP_RECVS = (1<<4) - /*IB_MAD_SNOOP_RMPP_RECVS = (1<<5),*/ - /*IB_MAD_SNOOP_REDIRECTED_QPS = (1<<6)*/ -}; - -/** - * ib_register_mad_snoop - Register to snoop sent and received MADs. - * @device: The device to register with. - * @port_num: The port on the specified device to use. - * @qp_type: Specifies which QP traffic to snoop. Must be either - * IB_QPT_SMI or IB_QPT_GSI. - * @mad_snoop_flags: Specifies information where snooping occurs. - * @send_handler: The callback routine invoked for a snooped send. - * @recv_handler: The callback routine invoked for a snooped receive. - * @context: User specified context associated with the registration. - */ -struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, - u8 port_num, - enum ib_qp_type qp_type, - int mad_snoop_flags, - ib_mad_snoop_handler snoop_handler, - ib_mad_recv_handler recv_handler, - void *context); - -/** - * ib_unregister_mad_agent - Unregisters a client from using MAD services. - * @mad_agent: Corresponding MAD registration request to deregister. - * - * After invoking this routine, MAD services are no longer usable by the - * client on the associated QP. - */ -int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent); - -/** - * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated - * with the registered client. - * @mad_agent: Specifies the associated registration to post the send to. - * @send_wr: Specifies the information needed to send the MAD(s). - * @bad_send_wr: Specifies the MAD on which an error was encountered. - * - * Sent MADs are not guaranteed to complete in the order that they were posted. - * - * If the MAD requires RMPP, the data buffer should contain a single copy - * of the common MAD, RMPP, and class specific headers, followed by the class - * defined data. If the class defined data would not divide evenly into - * RMPP segments, then space must be allocated at the end of the referenced - * buffer for any required padding. To indicate the amount of class defined - * data being transferred, the paylen_newwin field in the RMPP header should - * be set to the size of the class specific header plus the amount of class - * defined data being transferred. The paylen_newwin field should be - * specified in network-byte order. - */ -int ib_post_send_mad(struct ib_mad_agent *mad_agent, - struct ib_send_wr *send_wr, - struct ib_send_wr **bad_send_wr); - -/** - * ib_coalesce_recv_mad - Coalesces received MAD data into a single buffer. - * @mad_recv_wc: Work completion information for a received MAD. - * @buf: User-provided data buffer to receive the coalesced buffers. The - * referenced buffer should be at least the size of the mad_len specified - * by @mad_recv_wc. - * - * This call copies a chain of received MAD segments into a single data buffer, - * removing duplicated headers. - */ -void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf); - -/** - * ib_free_recv_mad - Returns data buffers used to receive a MAD. - * @mad_recv_wc: Work completion information for a received MAD. - * - * Clients receiving MADs through their ib_mad_recv_handler must call this - * routine to return the work completion buffers to the access layer. - */ -void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc); - -/** - * ib_cancel_mad - Cancels an outstanding send MAD operation. - * @mad_agent: Specifies the registration associated with sent MAD. - * @wr_id: Indicates the work request identifier of the MAD to cancel. - * - * MADs will be returned to the user through the corresponding - * ib_mad_send_handler. - */ -void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id); - -/** - * ib_modify_mad - Modifies an outstanding send MAD operation. - * @mad_agent: Specifies the registration associated with sent MAD. - * @wr_id: Indicates the work request identifier of the MAD to modify. - * @timeout_ms: New timeout value for sent MAD. - * - * This call will reset the timeout value for a sent MAD to the specified - * value. - */ -int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms); - -/** - * ib_redirect_mad_qp - Registers a QP for MAD services. - * @qp: Reference to a QP that requires MAD services. - * @rmpp_version: If set, indicates that the client will send - * and receive MADs that contain the RMPP header for the given version. - * If set to 0, indicates that RMPP is not used by this client. - * @send_handler: The completion callback routine invoked after a send - * request has completed. - * @recv_handler: The completion callback routine invoked for a received - * MAD. - * @context: User specified context associated with the registration. - * - * Use of this call allows clients to use MAD services, such as RMPP, - * on user-owned QPs. After calling this routine, users may send - * MADs on the specified QP by calling ib_mad_post_send. - */ -struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, - u8 rmpp_version, - ib_mad_send_handler send_handler, - ib_mad_recv_handler recv_handler, - void *context); - -/** - * ib_process_mad_wc - Processes a work completion associated with a - * MAD sent or received on a redirected QP. - * @mad_agent: Specifies the registered MAD service using the redirected QP. - * @wc: References a work completion associated with a sent or received - * MAD segment. - * - * This routine is used to complete or continue processing on a MAD request. - * If the work completion is associated with a send operation, calling - * this routine is required to continue an RMPP transfer or to wait for a - * corresponding response, if it is a request. If the work completion is - * associated with a receive operation, calling this routine is required to - * process an inbound or outbound RMPP transfer, or to match a response MAD - * with its corresponding request. - */ -int ib_process_mad_wc(struct ib_mad_agent *mad_agent, - struct ib_wc *wc); - -/** - * ib_create_send_mad - Allocate and initialize a data buffer and work request - * for sending a MAD. - * @mad_agent: Specifies the registered MAD service to associate with the MAD. - * @remote_qpn: Specifies the QPN of the receiving node. - * @pkey_index: Specifies which PKey the MAD will be sent using. This field - * is valid only if the remote_qpn is QP 1. - * @ah: References the address handle used to transfer to the remote node. - * @rmpp_active: Indicates if the send will enable RMPP. - * @hdr_len: Indicates the size of the data header of the MAD. This length - * should include the common MAD header, RMPP header, plus any class - * specific header. - * @data_len: Indicates the size of any user-transferred data. The call will - * automatically adjust the allocated buffer size to account for any - * additional padding that may be necessary. - * @gfp_mask: GFP mask used for the memory allocation. - * - * This is a helper routine that may be used to allocate a MAD. Users are - * not required to allocate outbound MADs using this call. The returned - * MAD send buffer will reference a data buffer usable for sending a MAD, along - * with an initialized work request structure. Users may modify the returned - * MAD data buffer or work request before posting the send. - * - * The returned data buffer will be cleared. Users are responsible for - * initializing the common MAD and any class specific headers. If @rmpp_active - * is set, the RMPP header will be initialized for sending. - */ -struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, - u32 remote_qpn, u16 pkey_index, - struct ib_ah *ah, int rmpp_active, - int hdr_len, int data_len, - unsigned int __nocast gfp_mask); - -/** - * ib_free_send_mad - Returns data buffers used to send a MAD. - * @send_buf: Previously allocated send data buffer. - */ -void ib_free_send_mad(struct ib_mad_send_buf *send_buf); - -#endif /* IB_MAD_H */ diff --git a/drivers/infiniband/include/ib_pack.h b/drivers/infiniband/include/ib_pack.h deleted file mode 100644 index fe480f3e8654..000000000000 --- a/drivers/infiniband/include/ib_pack.h +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Copyright (c) 2004 Topspin Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * $Id: ib_pack.h 1349 2004-12-16 21:09:43Z roland $ - */ - -#ifndef IB_PACK_H -#define IB_PACK_H - -#include <ib_verbs.h> - -enum { - IB_LRH_BYTES = 8, - IB_GRH_BYTES = 40, - IB_BTH_BYTES = 12, - IB_DETH_BYTES = 8 -}; - -struct ib_field { - size_t struct_offset_bytes; - size_t struct_size_bytes; - int offset_words; - int offset_bits; - int size_bits; - char *field_name; -}; - -#define RESERVED \ - .field_name = "reserved" - -/* - * This macro cleans up the definitions of constants for BTH opcodes. - * It is used to define constants such as IB_OPCODE_UD_SEND_ONLY, - * which becomes IB_OPCODE_UD + IB_OPCODE_SEND_ONLY, and this gives - * the correct value. - * - * In short, user code should use the constants defined using the - * macro rather than worrying about adding together other constants. -*/ -#define IB_OPCODE(transport, op) \ - IB_OPCODE_ ## transport ## _ ## op = \ - IB_OPCODE_ ## transport + IB_OPCODE_ ## op - -enum { - /* transport types -- just used to define real constants */ - IB_OPCODE_RC = 0x00, - IB_OPCODE_UC = 0x20, - IB_OPCODE_RD = 0x40, - IB_OPCODE_UD = 0x60, - - /* operations -- just used to define real constants */ - IB_OPCODE_SEND_FIRST = 0x00, - IB_OPCODE_SEND_MIDDLE = 0x01, - IB_OPCODE_SEND_LAST = 0x02, - IB_OPCODE_SEND_LAST_WITH_IMMEDIATE = 0x03, - IB_OPCODE_SEND_ONLY = 0x04, - IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE = 0x05, - IB_OPCODE_RDMA_WRITE_FIRST = 0x06, - IB_OPCODE_RDMA_WRITE_MIDDLE = 0x07, - IB_OPCODE_RDMA_WRITE_LAST = 0x08, - IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE = 0x09, - IB_OPCODE_RDMA_WRITE_ONLY = 0x0a, - IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE = 0x0b, - IB_OPCODE_RDMA_READ_REQUEST = 0x0c, - IB_OPCODE_RDMA_READ_RESPONSE_FIRST = 0x0d, - IB_OPCODE_RDMA_READ_RESPONSE_MIDDLE = 0x0e, - IB_OPCODE_RDMA_READ_RESPONSE_LAST = 0x0f, - IB_OPCODE_RDMA_READ_RESPONSE_ONLY = 0x10, - IB_OPCODE_ACKNOWLEDGE = 0x11, - IB_OPCODE_ATOMIC_ACKNOWLEDGE = 0x12, - IB_OPCODE_COMPARE_SWAP = 0x13, - IB_OPCODE_FETCH_ADD = 0x14, - - /* real constants follow -- see comment about above IB_OPCODE() - macro for more details */ - - /* RC */ - IB_OPCODE(RC, SEND_FIRST), - IB_OPCODE(RC, SEND_MIDDLE), - IB_OPCODE(RC, SEND_LAST), - IB_OPCODE(RC, SEND_LAST_WITH_IMMEDIATE), - IB_OPCODE(RC, SEND_ONLY), - IB_OPCODE(RC, SEND_ONLY_WITH_IMMEDIATE), - IB_OPCODE(RC, RDMA_WRITE_FIRST), - IB_OPCODE(RC, RDMA_WRITE_MIDDLE), - IB_OPCODE(RC, RDMA_WRITE_LAST), - IB_OPCODE(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE), - IB_OPCODE(RC, RDMA_WRITE_ONLY), - IB_OPCODE(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE), - IB_OPCODE(RC, RDMA_READ_REQUEST), - IB_OPCODE(RC, RDMA_READ_RESPONSE_FIRST), - IB_OPCODE(RC, RDMA_READ_RESPONSE_MIDDLE), - IB_OPCODE(RC, RDMA_READ_RESPONSE_LAST), - IB_OPCODE(RC, RDMA_READ_RESPONSE_ONLY), - IB_OPCODE(RC, ACKNOWLEDGE), - IB_OPCODE(RC, ATOMIC_ACKNOWLEDGE), - IB_OPCODE(RC, COMPARE_SWAP), - IB_OPCODE(RC, FETCH_ADD), - - /* UC */ - IB_OPCODE(UC, SEND_FIRST), - IB_OPCODE(UC, SEND_MIDDLE), - IB_OPCODE(UC, SEND_LAST), - IB_OPCODE(UC, SEND_LAST_WITH_IMMEDIATE), - IB_OPCODE(UC, SEND_ONLY), - IB_OPCODE(UC, SEND_ONLY_WITH_IMMEDIATE), - IB_OPCODE(UC, RDMA_WRITE_FIRST), - IB_OPCODE(UC, RDMA_WRITE_MIDDLE), - IB_OPCODE(UC, RDMA_WRITE_LAST), - IB_OPCODE(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE), - IB_OPCODE(UC, RDMA_WRITE_ONLY), - IB_OPCODE(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE), - - /* RD */ - IB_OPCODE(RD, SEND_FIRST), - IB_OPCODE(RD, SEND_MIDDLE), - IB_OPCODE(RD, SEND_LAST), - IB_OPCODE(RD, SEND_LAST_WITH_IMMEDIATE), - IB_OPCODE(RD, SEND_ONLY), - IB_OPCODE(RD, SEND_ONLY_WITH_IMMEDIATE), - IB_OPCODE(RD, RDMA_WRITE_FIRST), - IB_OPCODE(RD, RDMA_WRITE_MIDDLE), - IB_OPCODE(RD, RDMA_WRITE_LAST), - IB_OPCODE(RD, RDMA_WRITE_LAST_WITH_IMMEDIATE), - IB_OPCODE(RD, RDMA_WRITE_ONLY), - IB_OPCODE(RD, RDMA_WRITE_ONLY_WITH_IMMEDIATE), - IB_OPCODE(RD, RDMA_READ_REQUEST), - IB_OPCODE(RD, RDMA_READ_RESPONSE_FIRST), - IB_OPCODE(RD, RDMA_READ_RESPONSE_MIDDLE), - IB_OPCODE(RD, RDMA_READ_RESPONSE_LAST), - IB_OPCODE(RD, RDMA_READ_RESPONSE_ONLY), - IB_OPCODE(RD, ACKNOWLEDGE), - IB_OPCODE(RD, ATOMIC_ACKNOWLEDGE), - IB_OPCODE(RD, COMPARE_SWAP), - IB_OPCODE(RD, FETCH_ADD), - - /* UD */ - IB_OPCODE(UD, SEND_ONLY), - IB_OPCODE(UD, SEND_ONLY_WITH_IMMEDIATE) -}; - -enum { - IB_LNH_RAW = 0, - IB_LNH_IP = 1, - IB_LNH_IBA_LOCAL = 2, - IB_LNH_IBA_GLOBAL = 3 -}; - -struct ib_unpacked_lrh { - u8 virtual_lane; - u8 link_version; - u8 service_level; - u8 link_next_header; - __be16 destination_lid; - __be16 packet_length; - __be16 source_lid; -}; - -struct ib_unpacked_grh { - u8 ip_version; - u8 traffic_class; - __be32 flow_label; - __be16 payload_length; - u8 next_header; - u8 hop_limit; - union ib_gid source_gid; - union ib_gid destination_gid; -}; - -struct ib_unpacked_bth { - u8 opcode; - u8 solicited_event; - u8 mig_req; - u8 pad_count; - u8 transport_header_version; - __be16 pkey; - __be32 destination_qpn; - u8 ack_req; - __be32 psn; -}; - -struct ib_unpacked_deth { - __be32 qkey; - __be32 source_qpn; -}; - -struct ib_ud_header { - struct ib_unpacked_lrh lrh; - int grh_present; - struct ib_unpacked_grh grh; - struct ib_unpacked_bth bth; - struct ib_unpacked_deth deth; - int immediate_present; - __be32 immediate_data; -}; - -void ib_pack(const struct ib_field *desc, - int desc_len, - void *structure, - void *buf); - -void ib_unpack(const struct ib_field *desc, - int desc_len, - void *buf, - void *structure); - -void ib_ud_header_init(int payload_bytes, - int grh_present, - struct ib_ud_header *header); - -int ib_ud_header_pack(struct ib_ud_header *header, - void *buf); - -int ib_ud_header_unpack(void *buf, - struct ib_ud_header *header); - -#endif /* IB_PACK_H */ diff --git a/drivers/infiniband/include/ib_sa.h b/drivers/infiniband/include/ib_sa.h deleted file mode 100644 index 6d999f7b5d93..000000000000 --- a/drivers/infiniband/include/ib_sa.h +++ /dev/null @@ -1,373 +0,0 @@ -/* - * Copyright (c) 2004 Topspin Communications. All rights reserved. - * Copyright (c) 2005 Voltaire, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * $Id: ib_sa.h 2811 2005-07-06 18:11:43Z halr $ - */ - -#ifndef IB_SA_H -#define IB_SA_H - -#include <linux/compiler.h> - -#include <ib_verbs.h> -#include <ib_mad.h> - -enum { - IB_SA_CLASS_VERSION = 2, /* IB spec version 1.1/1.2 */ - - IB_SA_METHOD_GET_TABLE = 0x12, - IB_SA_METHOD_GET_TABLE_RESP = 0x92, - IB_SA_METHOD_DELETE = 0x15 -}; - -enum ib_sa_selector { - IB_SA_GTE = 0, - IB_SA_LTE = 1, - IB_SA_EQ = 2, - /* - * The meaning of "best" depends on the attribute: for - * example, for MTU best will return the largest available - * MTU, while for packet life time, best will return the - * smallest available life time. - */ - IB_SA_BEST = 3 -}; - -enum ib_sa_rate { - IB_SA_RATE_2_5_GBPS = 2, - IB_SA_RATE_5_GBPS = 5, - IB_SA_RATE_10_GBPS = 3, - IB_SA_RATE_20_GBPS = 6, - IB_SA_RATE_30_GBPS = 4, - IB_SA_RATE_40_GBPS = 7, - IB_SA_RATE_60_GBPS = 8, - IB_SA_RATE_80_GBPS = 9, - IB_SA_RATE_120_GBPS = 10 -}; - -static inline int ib_sa_rate_enum_to_int(enum ib_sa_rate rate) -{ - switch (rate) { - case IB_SA_RATE_2_5_GBPS: return 1; - case IB_SA_RATE_5_GBPS: return 2; - case IB_SA_RATE_10_GBPS: return 4; - case IB_SA_RATE_20_GBPS: return 8; - case IB_SA_RATE_30_GBPS: return 12; - case IB_SA_RATE_40_GBPS: return 16; - case IB_SA_RATE_60_GBPS: return 24; - case IB_SA_RATE_80_GBPS: return 32; - case IB_SA_RATE_120_GBPS: return 48; - default: return -1; - } -} - -/* - * Structures for SA records are named "struct ib_sa_xxx_rec." No - * attempt is made to pack structures to match the physical layout of - * SA records in SA MADs; all packing and unpacking is handled by the - * SA query code. - * - * For a record with structure ib_sa_xxx_rec, the naming convention - * for the component mask value for field yyy is IB_SA_XXX_REC_YYY (we - * never use different abbreviations or otherwise change the spelling - * of xxx/yyy between ib_sa_xxx_rec.yyy and IB_SA_XXX_REC_YYY). - * - * Reserved rows are indicated with comments to help maintainability. - */ - -/* reserved: 0 */ -/* reserved: 1 */ -#define IB_SA_PATH_REC_DGID IB_SA_COMP_MASK( 2) -#define IB_SA_PATH_REC_SGID IB_SA_COMP_MASK( 3) -#define IB_SA_PATH_REC_DLID IB_SA_COMP_MASK( 4) -#define IB_SA_PATH_REC_SLID IB_SA_COMP_MASK( 5) -#define IB_SA_PATH_REC_RAW_TRAFFIC IB_SA_COMP_MASK( 6) -/* reserved: 7 */ -#define IB_SA_PATH_REC_FLOW_LABEL IB_SA_COMP_MASK( 8) -#define IB_SA_PATH_REC_HOP_LIMIT IB_SA_COMP_MASK( 9) -#define IB_SA_PATH_REC_TRAFFIC_CLASS IB_SA_COMP_MASK(10) -#define IB_SA_PATH_REC_REVERSIBLE IB_SA_COMP_MASK(11) -#define IB_SA_PATH_REC_NUMB_PATH IB_SA_COMP_MASK(12) -#define IB_SA_PATH_REC_PKEY IB_SA_COMP_MASK(13) -/* reserved: 14 */ -#define IB_SA_PATH_REC_SL IB_SA_COMP_MASK(15) -#define IB_SA_PATH_REC_MTU_SELECTOR IB_SA_COMP_MASK(16) -#define IB_SA_PATH_REC_MTU IB_SA_COMP_MASK(17) -#define IB_SA_PATH_REC_RATE_SELECTOR IB_SA_COMP_MASK(18) -#define IB_SA_PATH_REC_RATE IB_SA_COMP_MASK(19) -#define IB_SA_PATH_REC_PACKET_LIFE_TIME_SELECTOR IB_SA_COMP_MASK(20) -#define IB_SA_PATH_REC_PACKET_LIFE_TIME IB_SA_COMP_MASK(21) -#define IB_SA_PATH_REC_PREFERENCE IB_SA_COMP_MASK(22) - -struct ib_sa_path_rec { - /* reserved */ - /* reserved */ - union ib_gid dgid; - union ib_gid sgid; - u16 dlid; - u16 slid; - int raw_traffic; - /* reserved */ - u32 flow_label; - u8 hop_limit; - u8 traffic_class; - int reversible; - u8 numb_path; - u16 pkey; - /* reserved */ - u8 sl; - u8 mtu_selector; - u8 mtu; - u8 rate_selector; - u8 rate; - u8 packet_life_time_selector; - u8 packet_life_time; - u8 preference; -}; - -#define IB_SA_MCMEMBER_REC_MGID IB_SA_COMP_MASK( 0) -#define IB_SA_MCMEMBER_REC_PORT_GID IB_SA_COMP_MASK( 1) -#define IB_SA_MCMEMBER_REC_QKEY IB_SA_COMP_MASK( 2) -#define IB_SA_MCMEMBER_REC_MLID IB_SA_COMP_MASK( 3) -#define IB_SA_MCMEMBER_REC_MTU_SELECTOR IB_SA_COMP_MASK( 4) -#define IB_SA_MCMEMBER_REC_MTU IB_SA_COMP_MASK( 5) -#define IB_SA_MCMEMBER_REC_TRAFFIC_CLASS IB_SA_COMP_MASK( 6) -#define IB_SA_MCMEMBER_REC_PKEY IB_SA_COMP_MASK( 7) -#define IB_SA_MCMEMBER_REC_RATE_SELECTOR IB_SA_COMP_MASK( 8) -#define IB_SA_MCMEMBER_REC_RATE IB_SA_COMP_MASK( 9) -#define IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR IB_SA_COMP_MASK(10) -#define IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME IB_SA_COMP_MASK(11) -#define IB_SA_MCMEMBER_REC_SL IB_SA_COMP_MASK(12) -#define IB_SA_MCMEMBER_REC_FLOW_LABEL IB_SA_COMP_MASK(13) -#define IB_SA_MCMEMBER_REC_HOP_LIMIT IB_SA_COMP_MASK(14) -#define IB_SA_MCMEMBER_REC_SCOPE IB_SA_COMP_MASK(15) -#define IB_SA_MCMEMBER_REC_JOIN_STATE IB_SA_COMP_MASK(16) -#define IB_SA_MCMEMBER_REC_PROXY_JOIN IB_SA_COMP_MASK(17) - -struct ib_sa_mcmember_rec { - union ib_gid mgid; - union ib_gid port_gid; - u32 qkey; - u16 mlid; - u8 mtu_selector; - u8 mtu; - u8 traffic_class; - u16 pkey; - u8 rate_selector; - u8 rate; - u8 packet_life_time_selector; - u8 packet_life_time; - u8 sl; - u32 flow_label; - u8 hop_limit; - u8 scope; - u8 join_state; - int proxy_join; -}; - -/* Service Record Component Mask Sec 15.2.5.14 Ver 1.1 */ -#define IB_SA_SERVICE_REC_SERVICE_ID IB_SA_COMP_MASK( 0) -#define IB_SA_SERVICE_REC_SERVICE_GID IB_SA_COMP_MASK( 1) -#define IB_SA_SERVICE_REC_SERVICE_PKEY IB_SA_COMP_MASK( 2) -/* reserved: 3 */ -#define IB_SA_SERVICE_REC_SERVICE_LEASE IB_SA_COMP_MASK( 4) -#define IB_SA_SERVICE_REC_SERVICE_KEY IB_SA_COMP_MASK( 5) -#define IB_SA_SERVICE_REC_SERVICE_NAME IB_SA_COMP_MASK( 6) -#define IB_SA_SERVICE_REC_SERVICE_DATA8_0 IB_SA_COMP_MASK( 7) -#define IB_SA_SERVICE_REC_SERVICE_DATA8_1 IB_SA_COMP_MASK( 8) -#define IB_SA_SERVICE_REC_SERVICE_DATA8_2 IB_SA_COMP_MASK( 9) -#define IB_SA_SERVICE_REC_SERVICE_DATA8_3 IB_SA_COMP_MASK(10) -#define IB_SA_SERVICE_REC_SERVICE_DATA8_4 IB_SA_COMP_MASK(11) -#define IB_SA_SERVICE_REC_SERVICE_DATA8_5 IB_SA_COMP_MASK(12) -#define IB_SA_SERVICE_REC_SERVICE_DATA8_6 IB_SA_COMP_MASK(13) -#define IB_SA_SERVICE_REC_SERVICE_DATA8_7 IB_SA_COMP_MASK(14) -#define IB_SA_SERVICE_REC_SERVICE_DATA8_8 IB_SA_COMP_MASK(15) -#define IB_SA_SERVICE_REC_SERVICE_DATA8_9 IB_SA_COMP_MASK(16) -#define IB_SA_SERVICE_REC_SERVICE_DATA8_10 IB_SA_COMP_MASK(17) -#define IB_SA_SERVICE_REC_SERVICE_DATA8_11 IB_SA_COMP_MASK(18) -#define IB_SA_SERVICE_REC_SERVICE_DATA8_12 IB_SA_COMP_MASK(19) -#define IB_SA_SERVICE_REC_SERVICE_DATA8_13 IB_SA_COMP_MASK(20) -#define IB_SA_SERVICE_REC_SERVICE_DATA8_14 IB_SA_COMP_MASK(21) -#define IB_SA_SERVICE_REC_SERVICE_DATA8_15 IB_SA_COMP_MASK(22) -#define IB_SA_SERVICE_REC_SERVICE_DATA16_0 IB_SA_COMP_MASK(23) -#define IB_SA_SERVICE_REC_SERVICE_DATA16_1 IB_SA_COMP_MASK(24) -#define IB_SA_SERVICE_REC_SERVICE_DATA16_2 IB_SA_COMP_MASK(25) -#define IB_SA_SERVICE_REC_SERVICE_DATA16_3 IB_SA_COMP_MASK(26) -#define IB_SA_SERVICE_REC_SERVICE_DATA16_4 IB_SA_COMP_MASK(27) -#define IB_SA_SERVICE_REC_SERVICE_DATA16_5 IB_SA_COMP_MASK(28) -#define IB_SA_SERVICE_REC_SERVICE_DATA16_6 IB_SA_COMP_MASK(29) -#define IB_SA_SERVICE_REC_SERVICE_DATA16_7 IB_SA_COMP_MASK(30) -#define IB_SA_SERVICE_REC_SERVICE_DATA32_0 IB_SA_COMP_MASK(31) -#define IB_SA_SERVICE_REC_SERVICE_DATA32_1 IB_SA_COMP_MASK(32) -#define IB_SA_SERVICE_REC_SERVICE_DATA32_2 IB_SA_COMP_MASK(33) -#define IB_SA_SERVICE_REC_SERVICE_DATA32_3 IB_SA_COMP_MASK(34) -#define IB_SA_SERVICE_REC_SERVICE_DATA64_0 IB_SA_COMP_MASK(35) -#define IB_SA_SERVICE_REC_SERVICE_DATA64_1 IB_SA_COMP_MASK(36) - -#define IB_DEFAULT_SERVICE_LEASE 0xFFFFFFFF - -struct ib_sa_service_rec { - u64 id; - union ib_gid gid; - u16 pkey; - /* reserved */ - u32 lease; - u8 key[16]; - u8 name[64]; - u8 data8[16]; - u16 data16[8]; - u32 data32[4]; - u64 data64[2]; -}; - -struct ib_sa_query; - -void ib_sa_cancel_query(int id, struct ib_sa_query *query); - -int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, - struct ib_sa_path_rec *rec, - ib_sa_comp_mask comp_mask, - int timeout_ms, unsigned int __nocast gfp_mask, - void (*callback)(int status, - struct ib_sa_path_rec *resp, - void *context), - void *context, - struct ib_sa_query **query); - -int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, - u8 method, - struct ib_sa_mcmember_rec *rec, - ib_sa_comp_mask comp_mask, - int timeout_ms, unsigned int __nocast gfp_mask, - void (*callback)(int status, - struct ib_sa_mcmember_rec *resp, - void *context), - void *context, - struct ib_sa_query **query); - -int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, - u8 method, - struct ib_sa_service_rec *rec, - ib_sa_comp_mask comp_mask, - int timeout_ms, unsigned int __nocast gfp_mask, - void (*callback)(int status, - struct ib_sa_service_rec *resp, - void *context), - void *context, - struct ib_sa_query **sa_query); - -/** - * ib_sa_mcmember_rec_set - Start an MCMember set query - * @device:device to send query on - * @port_num: port number to send query on - * @rec:MCMember Record to send in query - * @comp_mask:component mask to send in query - * @timeout_ms:time to wait for response - * @gfp_mask:GFP mask to use for internal allocations - * @callback:function called when query completes, times out or is - * canceled - * @context:opaque user context passed to callback - * @sa_query:query context, used to cancel query - * - * Send an MCMember Set query to the SA (eg to join a multicast - * group). The callback function will be called when the query - * completes (or fails); status is 0 for a successful response, -EINTR - * if the query is canceled, -ETIMEDOUT is the query timed out, or - * -EIO if an error occurred sending the query. The resp parameter of - * the callback is only valid if status is 0. - * - * If the return value of ib_sa_mcmember_rec_set() is negative, it is - * an error code. Otherwise it is a query ID that can be used to - * cancel the query. - */ -static inline int -ib_sa_mcmember_rec_set(struct ib_device *device, u8 port_num, - struct ib_sa_mcmember_rec *rec, - ib_sa_comp_mask comp_mask, - int timeout_ms, unsigned int __nocast gfp_mask, - void (*callback)(int status, - struct ib_sa_mcmember_rec *resp, - void *context), - void *context, - struct ib_sa_query **query) -{ - return ib_sa_mcmember_rec_query(device, port_num, - IB_MGMT_METHOD_SET, - rec, comp_mask, - timeout_ms, gfp_mask, callback, - context, query); -} - -/** - * ib_sa_mcmember_rec_delete - Start an MCMember delete query - * @device:device to send query on - * @port_num: port number to send query on - * @rec:MCMember Record to send in query - * @comp_mask:component mask to send in query - * @timeout_ms:time to wait for response - * @gfp_mask:GFP mask to use for internal allocations - * @callback:function called when query completes, times out or is - * canceled - * @context:opaque user context passed to callback - * @sa_query:query context, used to cancel query - * - * Send an MCMember Delete query to the SA (eg to leave a multicast - * group). The callback function will be called when the query - * completes (or fails); status is 0 for a successful response, -EINTR - * if the query is canceled, -ETIMEDOUT is the query timed out, or - * -EIO if an error occurred sending the query. The resp parameter of - * the callback is only valid if status is 0. - * - * If the return value of ib_sa_mcmember_rec_delete() is negative, it - * is an error code. Otherwise it is a query ID that can be used to - * cancel the query. - */ -static inline int -ib_sa_mcmember_rec_delete(struct ib_device *device, u8 port_num, - struct ib_sa_mcmember_rec *rec, - ib_sa_comp_mask comp_mask, - int timeout_ms, unsigned int __nocast gfp_mask, - void (*callback)(int status, - struct ib_sa_mcmember_rec *resp, - void *context), - void *context, - struct ib_sa_query **query) -{ - return ib_sa_mcmember_rec_query(device, port_num, - IB_SA_METHOD_DELETE, - rec, comp_mask, - timeout_ms, gfp_mask, callback, - context, query); -} - - -#endif /* IB_SA_H */ diff --git a/drivers/infiniband/include/ib_smi.h b/drivers/infiniband/include/ib_smi.h deleted file mode 100644 index ca8216514963..000000000000 --- a/drivers/infiniband/include/ib_smi.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. - * Copyright (c) 2004 Infinicon Corporation. All rights reserved. - * Copyright (c) 2004 Intel Corporation. All rights reserved. - * Copyright (c) 2004 Topspin Corporation. All rights reserved. - * Copyright (c) 2004 Voltaire Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * $Id: ib_smi.h 1389 2004-12-27 22:56:47Z roland $ - */ - -#if !defined( IB_SMI_H ) -#define IB_SMI_H - -#include <ib_mad.h> - -#define IB_LID_PERMISSIVE 0xFFFF - -#define IB_SMP_DATA_SIZE 64 -#define IB_SMP_MAX_PATH_HOPS 64 - -struct ib_smp { - u8 base_version; - u8 mgmt_class; - u8 class_version; - u8 method; - u16 status; - u8 hop_ptr; - u8 hop_cnt; - u64 tid; - u16 attr_id; - u16 resv; - u32 attr_mod; - u64 mkey; - u16 dr_slid; - u16 dr_dlid; - u8 reserved[28]; - u8 data[IB_SMP_DATA_SIZE]; - u8 initial_path[IB_SMP_MAX_PATH_HOPS]; - u8 return_path[IB_SMP_MAX_PATH_HOPS]; -} __attribute__ ((packed)); - -#define IB_SMP_DIRECTION __constant_htons(0x8000) - -/* Subnet management attributes */ -#define IB_SMP_ATTR_NOTICE __constant_htons(0x0002) -#define IB_SMP_ATTR_NODE_DESC __constant_htons(0x0010) -#define IB_SMP_ATTR_NODE_INFO __constant_htons(0x0011) -#define IB_SMP_ATTR_SWITCH_INFO __constant_htons(0x0012) -#define IB_SMP_ATTR_GUID_INFO __constant_htons(0x0014) -#define IB_SMP_ATTR_PORT_INFO __constant_htons(0x0015) -#define IB_SMP_ATTR_PKEY_TABLE __constant_htons(0x0016) -#define IB_SMP_ATTR_SL_TO_VL_TABLE __constant_htons(0x0017) -#define IB_SMP_ATTR_VL_ARB_TABLE __constant_htons(0x0018) -#define IB_SMP_ATTR_LINEAR_FORWARD_TABLE __constant_htons(0x0019) -#define IB_SMP_ATTR_RANDOM_FORWARD_TABLE __constant_htons(0x001A) -#define IB_SMP_ATTR_MCAST_FORWARD_TABLE __constant_htons(0x001B) -#define IB_SMP_ATTR_SM_INFO __constant_htons(0x0020) -#define IB_SMP_ATTR_VENDOR_DIAG __constant_htons(0x0030) -#define IB_SMP_ATTR_LED_INFO __constant_htons(0x0031) -#define IB_SMP_ATTR_VENDOR_MASK __constant_htons(0xFF00) - -static inline u8 -ib_get_smp_direction(struct ib_smp *smp) -{ - return ((smp->status & IB_SMP_DIRECTION) == IB_SMP_DIRECTION); -} - -#endif /* IB_SMI_H */ diff --git a/drivers/infiniband/include/ib_user_cm.h b/drivers/infiniband/include/ib_user_cm.h deleted file mode 100644 index 500b1af6ff77..000000000000 --- a/drivers/infiniband/include/ib_user_cm.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright (c) 2005 Topspin Communications. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * $Id: ib_user_cm.h 2576 2005-06-09 17:00:30Z libor $ - */ - -#ifndef IB_USER_CM_H -#define IB_USER_CM_H - -#include <linux/types.h> - -#define IB_USER_CM_ABI_VERSION 1 - -enum { - IB_USER_CM_CMD_CREATE_ID, - IB_USER_CM_CMD_DESTROY_ID, - IB_USER_CM_CMD_ATTR_ID, - - IB_USER_CM_CMD_LISTEN, - IB_USER_CM_CMD_ESTABLISH, - - IB_USER_CM_CMD_SEND_REQ, - IB_USER_CM_CMD_SEND_REP, - IB_USER_CM_CMD_SEND_RTU, - IB_USER_CM_CMD_SEND_DREQ, - IB_USER_CM_CMD_SEND_DREP, - IB_USER_CM_CMD_SEND_REJ, - IB_USER_CM_CMD_SEND_MRA, - IB_USER_CM_CMD_SEND_LAP, - IB_USER_CM_CMD_SEND_APR, - IB_USER_CM_CMD_SEND_SIDR_REQ, - IB_USER_CM_CMD_SEND_SIDR_REP, - - IB_USER_CM_CMD_EVENT, -}; -/* - * command ABI structures. - */ -struct ib_ucm_cmd_hdr { - __u32 cmd; - __u16 in; - __u16 out; -}; - -struct ib_ucm_create_id { - __u64 response; -}; - -struct ib_ucm_create_id_resp { - __u32 id; -}; - -struct ib_ucm_destroy_id { - __u32 id; -}; - -struct ib_ucm_attr_id { - __u64 response; - __u32 id; -}; - -struct ib_ucm_attr_id_resp { - __u64 service_id; - __u64 service_mask; - __u32 local_id; - __u32 remote_id; -}; - -struct ib_ucm_listen { - __u64 service_id; - __u64 service_mask; - __u32 id; -}; - -struct ib_ucm_establish { - __u32 id; -}; - -struct ib_ucm_private_data { - __u64 data; - __u32 id; - __u8 len; - __u8 reserved[3]; -}; - -struct ib_ucm_path_rec { - __u8 dgid[16]; - __u8 sgid[16]; - __u16 dlid; - __u16 slid; - __u32 raw_traffic; - __u32 flow_label; - __u32 reversible; - __u32 mtu; - __u16 pkey; - __u8 hop_limit; - __u8 traffic_class; - __u8 numb_path; - __u8 sl; - __u8 mtu_selector; - __u8 rate_selector; - __u8 rate; - __u8 packet_life_time_selector; - __u8 packet_life_time; - __u8 preference; -}; - -struct ib_ucm_req { - __u32 id; - __u32 qpn; - __u32 qp_type; - __u32 psn; - __u64 sid; - __u64 data; - __u64 primary_path; - __u64 alternate_path; - __u8 len; - __u8 peer_to_peer; - __u8 responder_resources; - __u8 initiator_depth; - __u8 remote_cm_response_timeout; - __u8 flow_control; - __u8 local_cm_response_timeout; - __u8 retry_count; - __u8 rnr_retry_count; - __u8 max_cm_retries; - __u8 srq; - __u8 reserved[1]; -}; - -struct ib_ucm_rep { - __u64 data; - __u32 id; - __u32 qpn; - __u32 psn; - __u8 len; - __u8 responder_resources; - __u8 initiator_depth; - __u8 target_ack_delay; - __u8 failover_accepted; - __u8 flow_control; - __u8 rnr_retry_count; - __u8 srq; -}; - -struct ib_ucm_info { - __u32 id; - __u32 status; - __u64 info; - __u64 data; - __u8 info_len; - __u8 data_len; - __u8 reserved[2]; -}; - -struct ib_ucm_mra { - __u64 data; - __u32 id; - __u8 len; - __u8 timeout; - __u8 reserved[2]; -}; - -struct ib_ucm_lap { - __u64 path; - __u64 data; - __u32 id; - __u8 len; - __u8 reserved[3]; -}; - -struct ib_ucm_sidr_req { - __u32 id; - __u32 timeout; - __u64 sid; - __u64 data; - __u64 path; - __u16 pkey; - __u8 len; - __u8 max_cm_retries; -}; - -struct ib_ucm_sidr_rep { - __u32 id; - __u32 qpn; - __u32 qkey; - __u32 status; - __u64 info; - __u64 data; - __u8 info_len; - __u8 data_len; - __u8 reserved[2]; -}; -/* - * event notification ABI structures. - */ -struct ib_ucm_event_get { - __u64 response; - __u64 data; - __u64 info; - __u8 data_len; - __u8 info_len; - __u8 reserved[2]; -}; - -struct ib_ucm_req_event_resp { - __u32 listen_id; - /* device */ - /* port */ - struct ib_ucm_path_rec primary_path; - struct ib_ucm_path_rec alternate_path; - __u64 remote_ca_guid; - __u32 remote_qkey; - __u32 remote_qpn; - __u32 qp_type; - __u32 starting_psn; - __u8 responder_resources; - __u8 initiator_depth; - __u8 local_cm_response_timeout; - __u8 flow_control; - __u8 remote_cm_response_timeout; - __u8 retry_count; - __u8 rnr_retry_count; - __u8 srq; -}; - -struct ib_ucm_rep_event_resp { - __u64 remote_ca_guid; - __u32 remote_qkey; - __u32 remote_qpn; - __u32 starting_psn; - __u8 responder_resources; - __u8 initiator_depth; - __u8 target_ack_delay; - __u8 failover_accepted; - __u8 flow_control; - __u8 rnr_retry_count; - __u8 srq; - __u8 reserved[1]; -}; - -struct ib_ucm_rej_event_resp { - __u32 reason; - /* ari in ib_ucm_event_get info field. */ -}; - -struct ib_ucm_mra_event_resp { - __u8 timeout; - __u8 reserved[3]; -}; - -struct ib_ucm_lap_event_resp { - struct ib_ucm_path_rec path; -}; - -struct ib_ucm_apr_event_resp { - __u32 status; - /* apr info in ib_ucm_event_get info field. */ -}; - -struct ib_ucm_sidr_req_event_resp { - __u32 listen_id; - /* device */ - /* port */ - __u16 pkey; - __u8 reserved[2]; -}; - -struct ib_ucm_sidr_rep_event_resp { - __u32 status; - __u32 qkey; - __u32 qpn; - /* info in ib_ucm_event_get info field. */ -}; - -#define IB_UCM_PRES_DATA 0x01 -#define IB_UCM_PRES_INFO 0x02 -#define IB_UCM_PRES_PRIMARY 0x04 -#define IB_UCM_PRES_ALTERNATE 0x08 - -struct ib_ucm_event_resp { - __u32 id; - __u32 event; - __u32 present; - union { - struct ib_ucm_req_event_resp req_resp; - struct ib_ucm_rep_event_resp rep_resp; - struct ib_ucm_rej_event_resp rej_resp; - struct ib_ucm_mra_event_resp mra_resp; - struct ib_ucm_lap_event_resp lap_resp; - struct ib_ucm_apr_event_resp apr_resp; - - struct ib_ucm_sidr_req_event_resp sidr_req_resp; - struct ib_ucm_sidr_rep_event_resp sidr_rep_resp; - - __u32 send_status; - } u; -}; - -#endif /* IB_USER_CM_H */ diff --git a/drivers/infiniband/include/ib_user_mad.h b/drivers/infiniband/include/ib_user_mad.h deleted file mode 100644 index a9a56b50aacc..000000000000 --- a/drivers/infiniband/include/ib_user_mad.h +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright (c) 2004 Topspin Communications. All rights reserved. - * Copyright (c) 2005 Voltaire, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * $Id: ib_user_mad.h 2814 2005-07-06 19:14:09Z halr $ - */ - -#ifndef IB_USER_MAD_H -#define IB_USER_MAD_H - -#include <linux/types.h> -#include <linux/ioctl.h> - -/* - * Increment this value if any changes that break userspace ABI - * compatibility are made. - */ -#define IB_USER_MAD_ABI_VERSION 5 - -/* - * Make sure that all structs defined in this file remain laid out so - * that they pack the same way on 32-bit and 64-bit architectures (to - * avoid incompatibility between 32-bit userspace and 64-bit kernels). - */ - -/** - * ib_user_mad_hdr - MAD packet header - * @id - ID of agent MAD received with/to be sent with - * @status - 0 on successful receive, ETIMEDOUT if no response - * received (transaction ID in data[] will be set to TID of original - * request) (ignored on send) - * @timeout_ms - Milliseconds to wait for response (unset on receive) - * @retries - Number of automatic retries to attempt - * @qpn - Remote QP number received from/to be sent to - * @qkey - Remote Q_Key to be sent with (unset on receive) - * @lid - Remote lid received from/to be sent to - * @sl - Service level received with/to be sent with - * @path_bits - Local path bits received with/to be sent with - * @grh_present - If set, GRH was received/should be sent - * @gid_index - Local GID index to send with (unset on receive) - * @hop_limit - Hop limit in GRH - * @traffic_class - Traffic class in GRH - * @gid - Remote GID in GRH - * @flow_label - Flow label in GRH - * - * All multi-byte quantities are stored in network (big endian) byte order. - */ -struct ib_user_mad_hdr { - __u32 id; - __u32 status; - __u32 timeout_ms; - __u32 retries; - __u32 length; - __u32 qpn; - __u32 qkey; - __u16 lid; - __u8 sl; - __u8 path_bits; - __u8 grh_present; - __u8 gid_index; - __u8 hop_limit; - __u8 traffic_class; - __u8 gid[16]; - __u32 flow_label; -}; - -/** - * ib_user_mad - MAD packet - * @hdr - MAD packet header - * @data - Contents of MAD - * - */ -struct ib_user_mad { - struct ib_user_mad_hdr hdr; - __u8 data[0]; -}; - -/** - * ib_user_mad_reg_req - MAD registration request - * @id - Set by the kernel; used to identify agent in future requests. - * @qpn - Queue pair number; must be 0 or 1. - * @method_mask - The caller will receive unsolicited MADs for any method - * where @method_mask = 1. - * @mgmt_class - Indicates which management class of MADs should be receive - * by the caller. This field is only required if the user wishes to - * receive unsolicited MADs, otherwise it should be 0. - * @mgmt_class_version - Indicates which version of MADs for the given - * management class to receive. - * @oui: Indicates IEEE OUI when mgmt_class is a vendor class - * in the range from 0x30 to 0x4f. Otherwise not used. - * @rmpp_version: If set, indicates the RMPP version used. - * - */ -struct ib_user_mad_reg_req { - __u32 id; - __u32 method_mask[4]; - __u8 qpn; - __u8 mgmt_class; - __u8 mgmt_class_version; - __u8 oui[3]; - __u8 rmpp_version; -}; - -#define IB_IOCTL_MAGIC 0x1b - -#define IB_USER_MAD_REGISTER_AGENT _IOWR(IB_IOCTL_MAGIC, 1, \ - struct ib_user_mad_reg_req) - -#define IB_USER_MAD_UNREGISTER_AGENT _IOW(IB_IOCTL_MAGIC, 2, __u32) - -#endif /* IB_USER_MAD_H */ diff --git a/drivers/infiniband/include/ib_user_verbs.h b/drivers/infiniband/include/ib_user_verbs.h deleted file mode 100644 index 7c613706af72..000000000000 --- a/drivers/infiniband/include/ib_user_verbs.h +++ /dev/null @@ -1,389 +0,0 @@ -/* - * Copyright (c) 2005 Topspin Communications. All rights reserved. - * Copyright (c) 2005 Cisco Systems. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * $Id: ib_user_verbs.h 2708 2005-06-24 17:27:21Z roland $ - */ - -#ifndef IB_USER_VERBS_H -#define IB_USER_VERBS_H - -#include <linux/types.h> - -/* - * Increment this value if any changes that break userspace ABI - * compatibility are made. - */ -#define IB_USER_VERBS_ABI_VERSION 1 - -enum { - IB_USER_VERBS_CMD_QUERY_PARAMS, - IB_USER_VERBS_CMD_GET_CONTEXT, - IB_USER_VERBS_CMD_QUERY_DEVICE, - IB_USER_VERBS_CMD_QUERY_PORT, - IB_USER_VERBS_CMD_QUERY_GID, - IB_USER_VERBS_CMD_QUERY_PKEY, - IB_USER_VERBS_CMD_ALLOC_PD, - IB_USER_VERBS_CMD_DEALLOC_PD, - IB_USER_VERBS_CMD_CREATE_AH, - IB_USER_VERBS_CMD_MODIFY_AH, - IB_USER_VERBS_CMD_QUERY_AH, - IB_USER_VERBS_CMD_DESTROY_AH, - IB_USER_VERBS_CMD_REG_MR, - IB_USER_VERBS_CMD_REG_SMR, - IB_USER_VERBS_CMD_REREG_MR, - IB_USER_VERBS_CMD_QUERY_MR, - IB_USER_VERBS_CMD_DEREG_MR, - IB_USER_VERBS_CMD_ALLOC_MW, - IB_USER_VERBS_CMD_BIND_MW, - IB_USER_VERBS_CMD_DEALLOC_MW, - IB_USER_VERBS_CMD_CREATE_CQ, - IB_USER_VERBS_CMD_RESIZE_CQ, - IB_USER_VERBS_CMD_DESTROY_CQ, - IB_USER_VERBS_CMD_POLL_CQ, - IB_USER_VERBS_CMD_PEEK_CQ, - IB_USER_VERBS_CMD_REQ_NOTIFY_CQ, - IB_USER_VERBS_CMD_CREATE_QP, - IB_USER_VERBS_CMD_QUERY_QP, - IB_USER_VERBS_CMD_MODIFY_QP, - IB_USER_VERBS_CMD_DESTROY_QP, - IB_USER_VERBS_CMD_POST_SEND, - IB_USER_VERBS_CMD_POST_RECV, - IB_USER_VERBS_CMD_ATTACH_MCAST, - IB_USER_VERBS_CMD_DETACH_MCAST -}; - -/* - * Make sure that all structs defined in this file remain laid out so - * that they pack the same way on 32-bit and 64-bit architectures (to - * avoid incompatibility between 32-bit userspace and 64-bit kernels). - * In particular do not use pointer types -- pass pointers in __u64 - * instead. - */ - -struct ib_uverbs_async_event_desc { - __u64 element; - __u32 event_type; /* enum ib_event_type */ - __u32 reserved; -}; - -struct ib_uverbs_comp_event_desc { - __u64 cq_handle; -}; - -/* - * All commands from userspace should start with a __u32 command field - * followed by __u16 in_words and out_words fields (which give the - * length of the command block and response buffer if any in 32-bit - * words). The kernel driver will read these fields first and read - * the rest of the command struct based on these value. - */ - -struct ib_uverbs_cmd_hdr { - __u32 command; - __u16 in_words; - __u16 out_words; -}; - -/* - * No driver_data for "query params" command, since this is intended - * to be a core function with no possible device dependence. - */ -struct ib_uverbs_query_params { - __u64 response; -}; - -struct ib_uverbs_query_params_resp { - __u32 num_cq_events; -}; - -struct ib_uverbs_get_context { - __u64 response; - __u64 cq_fd_tab; - __u64 driver_data[0]; -}; - -struct ib_uverbs_get_context_resp { - __u32 async_fd; - __u32 reserved; -}; - -struct ib_uverbs_query_device { - __u64 response; - __u64 driver_data[0]; -}; - -struct ib_uverbs_query_device_resp { - __u64 fw_ver; - __u64 node_guid; - __u64 sys_image_guid; - __u64 max_mr_size; - __u64 page_size_cap; - __u32 vendor_id; - __u32 vendor_part_id; - __u32 hw_ver; - __u32 max_qp; - __u32 max_qp_wr; - __u32 device_cap_flags; - __u32 max_sge; - __u32 max_sge_rd; - __u32 max_cq; - __u32 max_cqe; - __u32 max_mr; - __u32 max_pd; - __u32 max_qp_rd_atom; - __u32 max_ee_rd_atom; - __u32 max_res_rd_atom; - __u32 max_qp_init_rd_atom; - __u32 max_ee_init_rd_atom; - __u32 atomic_cap; - __u32 max_ee; - __u32 max_rdd; - __u32 max_mw; - __u32 max_raw_ipv6_qp; - __u32 max_raw_ethy_qp; - __u32 max_mcast_grp; - __u32 max_mcast_qp_attach; - __u32 max_total_mcast_qp_attach; - __u32 max_ah; - __u32 max_fmr; - __u32 max_map_per_fmr; - __u32 max_srq; - __u32 max_srq_wr; - __u32 max_srq_sge; - __u16 max_pkeys; - __u8 local_ca_ack_delay; - __u8 phys_port_cnt; - __u8 reserved[4]; -}; - -struct ib_uverbs_query_port { - __u64 response; - __u8 port_num; - __u8 reserved[7]; - __u64 driver_data[0]; -}; - -struct ib_uverbs_query_port_resp { - __u32 port_cap_flags; - __u32 max_msg_sz; - __u32 bad_pkey_cntr; - __u32 qkey_viol_cntr; - __u32 gid_tbl_len; - __u16 pkey_tbl_len; - __u16 lid; - __u16 sm_lid; - __u8 state; - __u8 max_mtu; - __u8 active_mtu; - __u8 lmc; - __u8 max_vl_num; - __u8 sm_sl; - __u8 subnet_timeout; - __u8 init_type_reply; - __u8 active_width; - __u8 active_speed; - __u8 phys_state; - __u8 reserved[3]; -}; - -struct ib_uverbs_query_gid { - __u64 response; - __u8 port_num; - __u8 index; - __u8 reserved[6]; - __u64 driver_data[0]; -}; - -struct ib_uverbs_query_gid_resp { - __u8 gid[16]; -}; - -struct ib_uverbs_query_pkey { - __u64 response; - __u8 port_num; - __u8 index; - __u8 reserved[6]; - __u64 driver_data[0]; -}; - -struct ib_uverbs_query_pkey_resp { - __u16 pkey; - __u16 reserved; -}; - -struct ib_uverbs_alloc_pd { - __u64 response; - __u64 driver_data[0]; -}; - -struct ib_uverbs_alloc_pd_resp { - __u32 pd_handle; -}; - -struct ib_uverbs_dealloc_pd { - __u32 pd_handle; -}; - -struct ib_uverbs_reg_mr { - __u64 response; - __u64 start; - __u64 length; - __u64 hca_va; - __u32 pd_handle; - __u32 access_flags; - __u64 driver_data[0]; -}; - -struct ib_uverbs_reg_mr_resp { - __u32 mr_handle; - __u32 lkey; - __u32 rkey; -}; - -struct ib_uverbs_dereg_mr { - __u32 mr_handle; -}; - -struct ib_uverbs_create_cq { - __u64 response; - __u64 user_handle; - __u32 cqe; - __u32 event_handler; - __u64 driver_data[0]; -}; - -struct ib_uverbs_create_cq_resp { - __u32 cq_handle; - __u32 cqe; -}; - -struct ib_uverbs_destroy_cq { - __u32 cq_handle; -}; - -struct ib_uverbs_create_qp { - __u64 response; - __u64 user_handle; - __u32 pd_handle; - __u32 send_cq_handle; - __u32 recv_cq_handle; - __u32 srq_handle; - __u32 max_send_wr; - __u32 max_recv_wr; - __u32 max_send_sge; - __u32 max_recv_sge; - __u32 max_inline_data; - __u8 sq_sig_all; - __u8 qp_type; - __u8 is_srq; - __u8 reserved; - __u64 driver_data[0]; -}; - -struct ib_uverbs_create_qp_resp { - __u32 qp_handle; - __u32 qpn; -}; - -/* - * This struct needs to remain a multiple of 8 bytes to keep the - * alignment of the modify QP parameters. - */ -struct ib_uverbs_qp_dest { - __u8 dgid[16]; - __u32 flow_label; - __u16 dlid; - __u16 reserved; - __u8 sgid_index; - __u8 hop_limit; - __u8 traffic_class; - __u8 sl; - __u8 src_path_bits; - __u8 static_rate; - __u8 is_global; - __u8 port_num; -}; - -struct ib_uverbs_modify_qp { - struct ib_uverbs_qp_dest dest; - struct ib_uverbs_qp_dest alt_dest; - __u32 qp_handle; - __u32 attr_mask; - __u32 qkey; - __u32 rq_psn; - __u32 sq_psn; - __u32 dest_qp_num; - __u32 qp_access_flags; - __u16 pkey_index; - __u16 alt_pkey_index; - __u8 qp_state; - __u8 cur_qp_state; - __u8 path_mtu; - __u8 path_mig_state; - __u8 en_sqd_async_notify; - __u8 max_rd_atomic; - __u8 max_dest_rd_atomic; - __u8 min_rnr_timer; - __u8 port_num; - __u8 timeout; - __u8 retry_cnt; - __u8 rnr_retry; - __u8 alt_port_num; - __u8 alt_timeout; - __u8 reserved[2]; - __u64 driver_data[0]; -}; - -struct ib_uverbs_modify_qp_resp { -}; - -struct ib_uverbs_destroy_qp { - __u32 qp_handle; -}; - -struct ib_uverbs_attach_mcast { - __u8 gid[16]; - __u32 qp_handle; - __u16 mlid; - __u16 reserved; - __u64 driver_data[0]; -}; - -struct ib_uverbs_detach_mcast { - __u8 gid[16]; - __u32 qp_handle; - __u16 mlid; - __u16 reserved; - __u64 driver_data[0]; -}; - -#endif /* IB_USER_VERBS_H */ diff --git a/drivers/infiniband/include/ib_verbs.h b/drivers/infiniband/include/ib_verbs.h deleted file mode 100644 index 5d24edaa66e6..000000000000 --- a/drivers/infiniband/include/ib_verbs.h +++ /dev/null @@ -1,1365 +0,0 @@ -/* - * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. - * Copyright (c) 2004 Infinicon Corporation. All rights reserved. - * Copyright (c) 2004 Intel Corporation. All rights reserved. - * Copyright (c) 2004 Topspin Corporation. All rights reserved. - * Copyright (c) 2004 Voltaire Corporation. All rights reserved. - * Copyright (c) 2005 Cisco Systems. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $ - */ - -#if !defined(IB_VERBS_H) -#define IB_VERBS_H - -#include <linux/types.h> -#include <linux/device.h> - -#include <asm/atomic.h> -#include <asm/scatterlist.h> -#include <asm/uaccess.h> - -union ib_gid { - u8 raw[16]; - struct { - u64 subnet_prefix; - u64 interface_id; - } global; -}; - -enum ib_node_type { - IB_NODE_CA = 1, - IB_NODE_SWITCH, - IB_NODE_ROUTER -}; - -enum ib_device_cap_flags { - IB_DEVICE_RESIZE_MAX_WR = 1, - IB_DEVICE_BAD_PKEY_CNTR = (1<<1), - IB_DEVICE_BAD_QKEY_CNTR = (1<<2), - IB_DEVICE_RAW_MULTI = (1<<3), - IB_DEVICE_AUTO_PATH_MIG = (1<<4), - IB_DEVICE_CHANGE_PHY_PORT = (1<<5), - IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6), - IB_DEVICE_CURR_QP_STATE_MOD = (1<<7), - IB_DEVICE_SHUTDOWN_PORT = (1<<8), - IB_DEVICE_INIT_TYPE = (1<<9), - IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10), - IB_DEVICE_SYS_IMAGE_GUID = (1<<11), - IB_DEVICE_RC_RNR_NAK_GEN = (1<<12), - IB_DEVICE_SRQ_RESIZE = (1<<13), - IB_DEVICE_N_NOTIFY_CQ = (1<<14), -}; - -enum ib_atomic_cap { - IB_ATOMIC_NONE, - IB_ATOMIC_HCA, - IB_ATOMIC_GLOB -}; - -struct ib_device_attr { - u64 fw_ver; - u64 node_guid; - u64 sys_image_guid; - u64 max_mr_size; - u64 page_size_cap; - u32 vendor_id; - u32 vendor_part_id; - u32 hw_ver; - int max_qp; - int max_qp_wr; - int device_cap_flags; - int max_sge; - int max_sge_rd; - int max_cq; - int max_cqe; - int max_mr; - int max_pd; - int max_qp_rd_atom; - int max_ee_rd_atom; - int max_res_rd_atom; - int max_qp_init_rd_atom; - int max_ee_init_rd_atom; - enum ib_atomic_cap atomic_cap; - int max_ee; - int max_rdd; - int max_mw; - int max_raw_ipv6_qp; - int max_raw_ethy_qp; - int max_mcast_grp; - int max_mcast_qp_attach; - int max_total_mcast_qp_attach; - int max_ah; - int max_fmr; - int max_map_per_fmr; - int max_srq; - int max_srq_wr; - int max_srq_sge; - u16 max_pkeys; - u8 local_ca_ack_delay; -}; - -enum ib_mtu { - IB_MTU_256 = 1, - IB_MTU_512 = 2, - IB_MTU_1024 = 3, - IB_MTU_2048 = 4, - IB_MTU_4096 = 5 -}; - -static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) -{ - switch (mtu) { - case IB_MTU_256: return 256; - case IB_MTU_512: return 512; - case IB_MTU_1024: return 1024; - case IB_MTU_2048: return 2048; - case IB_MTU_4096: return 4096; - default: return -1; - } -} - -enum ib_port_state { - IB_PORT_NOP = 0, - IB_PORT_DOWN = 1, - IB_PORT_INIT = 2, - IB_PORT_ARMED = 3, - IB_PORT_ACTIVE = 4, - IB_PORT_ACTIVE_DEFER = 5 -}; - -enum ib_port_cap_flags { - IB_PORT_SM = 1 << 1, - IB_PORT_NOTICE_SUP = 1 << 2, - IB_PORT_TRAP_SUP = 1 << 3, - IB_PORT_OPT_IPD_SUP = 1 << 4, - IB_PORT_AUTO_MIGR_SUP = 1 << 5, - IB_PORT_SL_MAP_SUP = 1 << 6, - IB_PORT_MKEY_NVRAM = 1 << 7, - IB_PORT_PKEY_NVRAM = 1 << 8, - IB_PORT_LED_INFO_SUP = 1 << 9, - IB_PORT_SM_DISABLED = 1 << 10, - IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, - IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, - IB_PORT_CM_SUP = 1 << 16, - IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, - IB_PORT_REINIT_SUP = 1 << 18, - IB_PORT_DEVICE_MGMT_SUP = 1 << 19, - IB_PORT_VENDOR_CLASS_SUP = 1 << 20, - IB_PORT_DR_NOTICE_SUP = 1 << 21, - IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, - IB_PORT_BOOT_MGMT_SUP = 1 << 23, - IB_PORT_LINK_LATENCY_SUP = 1 << 24, - IB_PORT_CLIENT_REG_SUP = 1 << 25 -}; - -enum ib_port_width { - IB_WIDTH_1X = 1, - IB_WIDTH_4X = 2, - IB_WIDTH_8X = 4, - IB_WIDTH_12X = 8 -}; - -static inline int ib_width_enum_to_int(enum ib_port_width width) -{ - switch (width) { - case IB_WIDTH_1X: return 1; - case IB_WIDTH_4X: return 4; - case IB_WIDTH_8X: return 8; - case IB_WIDTH_12X: return 12; - default: return -1; - } -} - -struct ib_port_attr { - enum ib_port_state state; - enum ib_mtu max_mtu; - enum ib_mtu active_mtu; - int gid_tbl_len; - u32 port_cap_flags; - u32 max_msg_sz; - u32 bad_pkey_cntr; - u32 qkey_viol_cntr; - u16 pkey_tbl_len; - u16 lid; - u16 sm_lid; - u8 lmc; - u8 max_vl_num; - u8 sm_sl; - u8 subnet_timeout; - u8 init_type_reply; - u8 active_width; - u8 active_speed; - u8 phys_state; -}; - -enum ib_device_modify_flags { - IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 -}; - -struct ib_device_modify { - u64 sys_image_guid; -}; - -enum ib_port_modify_flags { - IB_PORT_SHUTDOWN = 1, - IB_PORT_INIT_TYPE = (1<<2), - IB_PORT_RESET_QKEY_CNTR = (1<<3) -}; - -struct ib_port_modify { - u32 set_port_cap_mask; - u32 clr_port_cap_mask; - u8 init_type; -}; - -enum ib_event_type { - IB_EVENT_CQ_ERR, - IB_EVENT_QP_FATAL, - IB_EVENT_QP_REQ_ERR, - IB_EVENT_QP_ACCESS_ERR, - IB_EVENT_COMM_EST, - IB_EVENT_SQ_DRAINED, - IB_EVENT_PATH_MIG, - IB_EVENT_PATH_MIG_ERR, - IB_EVENT_DEVICE_FATAL, - IB_EVENT_PORT_ACTIVE, - IB_EVENT_PORT_ERR, - IB_EVENT_LID_CHANGE, - IB_EVENT_PKEY_CHANGE, - IB_EVENT_SM_CHANGE -}; - -struct ib_event { - struct ib_device *device; - union { - struct ib_cq *cq; - struct ib_qp *qp; - u8 port_num; - } element; - enum ib_event_type event; -}; - -struct ib_event_handler { - struct ib_device *device; - void (*handler)(struct ib_event_handler *, struct ib_event *); - struct list_head list; -}; - -#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ - do { \ - (_ptr)->device = _device; \ - (_ptr)->handler = _handler; \ - INIT_LIST_HEAD(&(_ptr)->list); \ - } while (0) - -struct ib_global_route { - union ib_gid dgid; - u32 flow_label; - u8 sgid_index; - u8 hop_limit; - u8 traffic_class; -}; - -struct ib_grh { - u32 version_tclass_flow; - u16 paylen; - u8 next_hdr; - u8 hop_limit; - union ib_gid sgid; - union ib_gid dgid; -}; - -enum { - IB_MULTICAST_QPN = 0xffffff -}; - -enum ib_ah_flags { - IB_AH_GRH = 1 -}; - -struct ib_ah_attr { - struct ib_global_route grh; - u16 dlid; - u8 sl; - u8 src_path_bits; - u8 static_rate; - u8 ah_flags; - u8 port_num; -}; - -enum ib_wc_status { - IB_WC_SUCCESS, - IB_WC_LOC_LEN_ERR, - IB_WC_LOC_QP_OP_ERR, - IB_WC_LOC_EEC_OP_ERR, - IB_WC_LOC_PROT_ERR, - IB_WC_WR_FLUSH_ERR, - IB_WC_MW_BIND_ERR, - IB_WC_BAD_RESP_ERR, - IB_WC_LOC_ACCESS_ERR, - IB_WC_REM_INV_REQ_ERR, - IB_WC_REM_ACCESS_ERR, - IB_WC_REM_OP_ERR, - IB_WC_RETRY_EXC_ERR, - IB_WC_RNR_RETRY_EXC_ERR, - IB_WC_LOC_RDD_VIOL_ERR, - IB_WC_REM_INV_RD_REQ_ERR, - IB_WC_REM_ABORT_ERR, - IB_WC_INV_EECN_ERR, - IB_WC_INV_EEC_STATE_ERR, - IB_WC_FATAL_ERR, - IB_WC_RESP_TIMEOUT_ERR, - IB_WC_GENERAL_ERR -}; - -enum ib_wc_opcode { - IB_WC_SEND, - IB_WC_RDMA_WRITE, - IB_WC_RDMA_READ, - IB_WC_COMP_SWAP, - IB_WC_FETCH_ADD, - IB_WC_BIND_MW, -/* - * Set value of IB_WC_RECV so consumers can test if a completion is a - * receive by testing (opcode & IB_WC_RECV). - */ - IB_WC_RECV = 1 << 7, - IB_WC_RECV_RDMA_WITH_IMM -}; - -enum ib_wc_flags { - IB_WC_GRH = 1, - IB_WC_WITH_IMM = (1<<1) -}; - -struct ib_wc { - u64 wr_id; - enum ib_wc_status status; - enum ib_wc_opcode opcode; - u32 vendor_err; - u32 byte_len; - __be32 imm_data; - u32 qp_num; - u32 src_qp; - int wc_flags; - u16 pkey_index; - u16 slid; - u8 sl; - u8 dlid_path_bits; - u8 port_num; /* valid only for DR SMPs on switches */ -}; - -enum ib_cq_notify { - IB_CQ_SOLICITED, - IB_CQ_NEXT_COMP -}; - -struct ib_qp_cap { - u32 max_send_wr; - u32 max_recv_wr; - u32 max_send_sge; - u32 max_recv_sge; - u32 max_inline_data; -}; - -enum ib_sig_type { - IB_SIGNAL_ALL_WR, - IB_SIGNAL_REQ_WR -}; - -enum ib_qp_type { - /* - * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries - * here (and in that order) since the MAD layer uses them as - * indices into a 2-entry table. - */ - IB_QPT_SMI, - IB_QPT_GSI, - - IB_QPT_RC, - IB_QPT_UC, - IB_QPT_UD, - IB_QPT_RAW_IPV6, - IB_QPT_RAW_ETY -}; - -struct ib_qp_init_attr { - void (*event_handler)(struct ib_event *, void *); - void *qp_context; - struct ib_cq *send_cq; - struct ib_cq *recv_cq; - struct ib_srq *srq; - struct ib_qp_cap cap; - enum ib_sig_type sq_sig_type; - enum ib_qp_type qp_type; - u8 port_num; /* special QP types only */ -}; - -enum ib_rnr_timeout { - IB_RNR_TIMER_655_36 = 0, - IB_RNR_TIMER_000_01 = 1, - IB_RNR_TIMER_000_02 = 2, - IB_RNR_TIMER_000_03 = 3, - IB_RNR_TIMER_000_04 = 4, - IB_RNR_TIMER_000_06 = 5, - IB_RNR_TIMER_000_08 = 6, - IB_RNR_TIMER_000_12 = 7, - IB_RNR_TIMER_000_16 = 8, - IB_RNR_TIMER_000_24 = 9, - IB_RNR_TIMER_000_32 = 10, - IB_RNR_TIMER_000_48 = 11, - IB_RNR_TIMER_000_64 = 12, - IB_RNR_TIMER_000_96 = 13, - IB_RNR_TIMER_001_28 = 14, - IB_RNR_TIMER_001_92 = 15, - IB_RNR_TIMER_002_56 = 16, - IB_RNR_TIMER_003_84 = 17, - IB_RNR_TIMER_005_12 = 18, - IB_RNR_TIMER_007_68 = 19, - IB_RNR_TIMER_010_24 = 20, - IB_RNR_TIMER_015_36 = 21, - IB_RNR_TIMER_020_48 = 22, - IB_RNR_TIMER_030_72 = 23, - IB_RNR_TIMER_040_96 = 24, - IB_RNR_TIMER_061_44 = 25, - IB_RNR_TIMER_081_92 = 26, - IB_RNR_TIMER_122_88 = 27, - IB_RNR_TIMER_163_84 = 28, - IB_RNR_TIMER_245_76 = 29, - IB_RNR_TIMER_327_68 = 30, - IB_RNR_TIMER_491_52 = 31 -}; - -enum ib_qp_attr_mask { - IB_QP_STATE = 1, - IB_QP_CUR_STATE = (1<<1), - IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), - IB_QP_ACCESS_FLAGS = (1<<3), - IB_QP_PKEY_INDEX = (1<<4), - IB_QP_PORT = (1<<5), - IB_QP_QKEY = (1<<6), - IB_QP_AV = (1<<7), - IB_QP_PATH_MTU = (1<<8), - IB_QP_TIMEOUT = (1<<9), - IB_QP_RETRY_CNT = (1<<10), - IB_QP_RNR_RETRY = (1<<11), - IB_QP_RQ_PSN = (1<<12), - IB_QP_MAX_QP_RD_ATOMIC = (1<<13), - IB_QP_ALT_PATH = (1<<14), - IB_QP_MIN_RNR_TIMER = (1<<15), - IB_QP_SQ_PSN = (1<<16), - IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), - IB_QP_PATH_MIG_STATE = (1<<18), - IB_QP_CAP = (1<<19), - IB_QP_DEST_QPN = (1<<20) -}; - -enum ib_qp_state { - IB_QPS_RESET, - IB_QPS_INIT, - IB_QPS_RTR, - IB_QPS_RTS, - IB_QPS_SQD, - IB_QPS_SQE, - IB_QPS_ERR -}; - -enum ib_mig_state { - IB_MIG_MIGRATED, - IB_MIG_REARM, - IB_MIG_ARMED -}; - -struct ib_qp_attr { - enum ib_qp_state qp_state; - enum ib_qp_state cur_qp_state; - enum ib_mtu path_mtu; - enum ib_mig_state path_mig_state; - u32 qkey; - u32 rq_psn; - u32 sq_psn; - u32 dest_qp_num; - int qp_access_flags; - struct ib_qp_cap cap; - struct ib_ah_attr ah_attr; - struct ib_ah_attr alt_ah_attr; - u16 pkey_index; - u16 alt_pkey_index; - u8 en_sqd_async_notify; - u8 sq_draining; - u8 max_rd_atomic; - u8 max_dest_rd_atomic; - u8 min_rnr_timer; - u8 port_num; - u8 timeout; - u8 retry_cnt; - u8 rnr_retry; - u8 alt_port_num; - u8 alt_timeout; -}; - -enum ib_wr_opcode { - IB_WR_RDMA_WRITE, - IB_WR_RDMA_WRITE_WITH_IMM, - IB_WR_SEND, - IB_WR_SEND_WITH_IMM, - IB_WR_RDMA_READ, - IB_WR_ATOMIC_CMP_AND_SWP, - IB_WR_ATOMIC_FETCH_AND_ADD -}; - -enum ib_send_flags { - IB_SEND_FENCE = 1, - IB_SEND_SIGNALED = (1<<1), - IB_SEND_SOLICITED = (1<<2), - IB_SEND_INLINE = (1<<3) -}; - -struct ib_sge { - u64 addr; - u32 length; - u32 lkey; -}; - -struct ib_send_wr { - struct ib_send_wr *next; - u64 wr_id; - struct ib_sge *sg_list; - int num_sge; - enum ib_wr_opcode opcode; - int send_flags; - __be32 imm_data; - union { - struct { - u64 remote_addr; - u32 rkey; - } rdma; - struct { - u64 remote_addr; - u64 compare_add; - u64 swap; - u32 rkey; - } atomic; - struct { - struct ib_ah *ah; - struct ib_mad_hdr *mad_hdr; - u32 remote_qpn; - u32 remote_qkey; - int timeout_ms; /* valid for MADs only */ - int retries; /* valid for MADs only */ - u16 pkey_index; /* valid for GSI only */ - u8 port_num; /* valid for DR SMPs on switch only */ - } ud; - } wr; -}; - -struct ib_recv_wr { - struct ib_recv_wr *next; - u64 wr_id; - struct ib_sge *sg_list; - int num_sge; -}; - -enum ib_access_flags { - IB_ACCESS_LOCAL_WRITE = 1, - IB_ACCESS_REMOTE_WRITE = (1<<1), - IB_ACCESS_REMOTE_READ = (1<<2), - IB_ACCESS_REMOTE_ATOMIC = (1<<3), - IB_ACCESS_MW_BIND = (1<<4) -}; - -struct ib_phys_buf { - u64 addr; - u64 size; -}; - -struct ib_mr_attr { - struct ib_pd *pd; - u64 device_virt_addr; - u64 size; - int mr_access_flags; - u32 lkey; - u32 rkey; -}; - -enum ib_mr_rereg_flags { - IB_MR_REREG_TRANS = 1, - IB_MR_REREG_PD = (1<<1), - IB_MR_REREG_ACCESS = (1<<2) -}; - -struct ib_mw_bind { - struct ib_mr *mr; - u64 wr_id; - u64 addr; - u32 length; - int send_flags; - int mw_access_flags; -}; - -struct ib_fmr_attr { - int max_pages; - int max_maps; - u8 page_size; -}; - -struct ib_ucontext { - struct ib_device *device; - struct list_head pd_list; - struct list_head mr_list; - struct list_head mw_list; - struct list_head cq_list; - struct list_head qp_list; - struct list_head srq_list; - struct list_head ah_list; - spinlock_t lock; -}; - -struct ib_uobject { - u64 user_handle; /* handle given to us by userspace */ - struct ib_ucontext *context; /* associated user context */ - struct list_head list; /* link to context's list */ - u32 id; /* index into kernel idr */ -}; - -struct ib_umem { - unsigned long user_base; - unsigned long virt_base; - size_t length; - int offset; - int page_size; - int writable; - struct list_head chunk_list; -}; - -struct ib_umem_chunk { - struct list_head list; - int nents; - int nmap; - struct scatterlist page_list[0]; -}; - -struct ib_udata { - void __user *inbuf; - void __user *outbuf; - size_t inlen; - size_t outlen; -}; - -#define IB_UMEM_MAX_PAGE_CHUNK \ - ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \ - ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \ - (void *) &((struct ib_umem_chunk *) 0)->page_list[0])) - -struct ib_umem_object { - struct ib_uobject uobject; - struct ib_umem umem; -}; - -struct ib_pd { - struct ib_device *device; - struct ib_uobject *uobject; - atomic_t usecnt; /* count all resources */ -}; - -struct ib_ah { - struct ib_device *device; - struct ib_pd *pd; - struct ib_uobject *uobject; -}; - -typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); - -struct ib_cq { - struct ib_device *device; - struct ib_uobject *uobject; - ib_comp_handler comp_handler; - void (*event_handler)(struct ib_event *, void *); - void * cq_context; - int cqe; - atomic_t usecnt; /* count number of work queues */ -}; - -struct ib_srq { - struct ib_device *device; - struct ib_uobject *uobject; - struct ib_pd *pd; - void *srq_context; - atomic_t usecnt; -}; - -struct ib_qp { - struct ib_device *device; - struct ib_pd *pd; - struct ib_cq *send_cq; - struct ib_cq *recv_cq; - struct ib_srq *srq; - struct ib_uobject *uobject; - void (*event_handler)(struct ib_event *, void *); - void *qp_context; - u32 qp_num; - enum ib_qp_type qp_type; -}; - -struct ib_mr { - struct ib_device *device; - struct ib_pd *pd; - struct ib_uobject *uobject; - u32 lkey; - u32 rkey; - atomic_t usecnt; /* count number of MWs */ -}; - -struct ib_mw { - struct ib_device *device; - struct ib_pd *pd; - struct ib_uobject *uobject; - u32 rkey; -}; - -struct ib_fmr { - struct ib_device *device; - struct ib_pd *pd; - struct list_head list; - u32 lkey; - u32 rkey; -}; - -struct ib_mad; -struct ib_grh; - -enum ib_process_mad_flags { - IB_MAD_IGNORE_MKEY = 1, - IB_MAD_IGNORE_BKEY = 2, - IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY -}; - -enum ib_mad_result { - IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ - IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ - IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ - IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ -}; - -#define IB_DEVICE_NAME_MAX 64 - -struct ib_cache { - rwlock_t lock; - struct ib_event_handler event_handler; - struct ib_pkey_cache **pkey_cache; - struct ib_gid_cache **gid_cache; -}; - -struct ib_device { - struct device *dma_device; - - char name[IB_DEVICE_NAME_MAX]; - - struct list_head event_handler_list; - spinlock_t event_handler_lock; - - struct list_head core_list; - struct list_head client_data_list; - spinlock_t client_data_lock; - - struct ib_cache cache; - - u32 flags; - - int (*query_device)(struct ib_device *device, - struct ib_device_attr *device_attr); - int (*query_port)(struct ib_device *device, - u8 port_num, - struct ib_port_attr *port_attr); - int (*query_gid)(struct ib_device *device, - u8 port_num, int index, - union ib_gid *gid); - int (*query_pkey)(struct ib_device *device, - u8 port_num, u16 index, u16 *pkey); - int (*modify_device)(struct ib_device *device, - int device_modify_mask, - struct ib_device_modify *device_modify); - int (*modify_port)(struct ib_device *device, - u8 port_num, int port_modify_mask, - struct ib_port_modify *port_modify); - struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, - struct ib_udata *udata); - int (*dealloc_ucontext)(struct ib_ucontext *context); - int (*mmap)(struct ib_ucontext *context, - struct vm_area_struct *vma); - struct ib_pd * (*alloc_pd)(struct ib_device *device, - struct ib_ucontext *context, - struct ib_udata *udata); - int (*dealloc_pd)(struct ib_pd *pd); - struct ib_ah * (*create_ah)(struct ib_pd *pd, - struct ib_ah_attr *ah_attr); - int (*modify_ah)(struct ib_ah *ah, - struct ib_ah_attr *ah_attr); - int (*query_ah)(struct ib_ah *ah, - struct ib_ah_attr *ah_attr); - int (*destroy_ah)(struct ib_ah *ah); - struct ib_qp * (*create_qp)(struct ib_pd *pd, - struct ib_qp_init_attr *qp_init_attr, - struct ib_udata *udata); - int (*modify_qp)(struct ib_qp *qp, - struct ib_qp_attr *qp_attr, - int qp_attr_mask); - int (*query_qp)(struct ib_qp *qp, - struct ib_qp_attr *qp_attr, - int qp_attr_mask, - struct ib_qp_init_attr *qp_init_attr); - int (*destroy_qp)(struct ib_qp *qp); - int (*post_send)(struct ib_qp *qp, - struct ib_send_wr *send_wr, - struct ib_send_wr **bad_send_wr); - int (*post_recv)(struct ib_qp *qp, - struct ib_recv_wr *recv_wr, - struct ib_recv_wr **bad_recv_wr); - struct ib_cq * (*create_cq)(struct ib_device *device, int cqe, - struct ib_ucontext *context, - struct ib_udata *udata); - int (*destroy_cq)(struct ib_cq *cq); - int (*resize_cq)(struct ib_cq *cq, int *cqe); - int (*poll_cq)(struct ib_cq *cq, int num_entries, - struct ib_wc *wc); - int (*peek_cq)(struct ib_cq *cq, int wc_cnt); - int (*req_notify_cq)(struct ib_cq *cq, - enum ib_cq_notify cq_notify); - int (*req_ncomp_notif)(struct ib_cq *cq, - int wc_cnt); - struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, - int mr_access_flags); - struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd, - struct ib_phys_buf *phys_buf_array, - int num_phys_buf, - int mr_access_flags, - u64 *iova_start); - struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, - struct ib_umem *region, - int mr_access_flags, - struct ib_udata *udata); - int (*query_mr)(struct ib_mr *mr, - struct ib_mr_attr *mr_attr); - int (*dereg_mr)(struct ib_mr *mr); - int (*rereg_phys_mr)(struct ib_mr *mr, - int mr_rereg_mask, - struct ib_pd *pd, - struct ib_phys_buf *phys_buf_array, - int num_phys_buf, - int mr_access_flags, - u64 *iova_start); - struct ib_mw * (*alloc_mw)(struct ib_pd *pd); - int (*bind_mw)(struct ib_qp *qp, - struct ib_mw *mw, - struct ib_mw_bind *mw_bind); - int (*dealloc_mw)(struct ib_mw *mw); - struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, - int mr_access_flags, - struct ib_fmr_attr *fmr_attr); - int (*map_phys_fmr)(struct ib_fmr *fmr, - u64 *page_list, int list_len, - u64 iova); - int (*unmap_fmr)(struct list_head *fmr_list); - int (*dealloc_fmr)(struct ib_fmr *fmr); - int (*attach_mcast)(struct ib_qp *qp, - union ib_gid *gid, - u16 lid); - int (*detach_mcast)(struct ib_qp *qp, - union ib_gid *gid, - u16 lid); - int (*process_mad)(struct ib_device *device, - int process_mad_flags, - u8 port_num, - struct ib_wc *in_wc, - struct ib_grh *in_grh, - struct ib_mad *in_mad, - struct ib_mad *out_mad); - - struct module *owner; - struct class_device class_dev; - struct kobject ports_parent; - struct list_head port_list; - - enum { - IB_DEV_UNINITIALIZED, - IB_DEV_REGISTERED, - IB_DEV_UNREGISTERED - } reg_state; - - u8 node_type; - u8 phys_port_cnt; -}; - -struct ib_client { - char *name; - void (*add) (struct ib_device *); - void (*remove)(struct ib_device *); - - struct list_head list; -}; - -struct ib_device *ib_alloc_device(size_t size); -void ib_dealloc_device(struct ib_device *device); - -int ib_register_device (struct ib_device *device); -void ib_unregister_device(struct ib_device *device); - -int ib_register_client (struct ib_client *client); -void ib_unregister_client(struct ib_client *client); - -void *ib_get_client_data(struct ib_device *device, struct ib_client *client); -void ib_set_client_data(struct ib_device *device, struct ib_client *client, - void *data); - -static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) -{ - return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; -} - -static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) -{ - return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; -} - -int ib_register_event_handler (struct ib_event_handler *event_handler); -int ib_unregister_event_handler(struct ib_event_handler *event_handler); -void ib_dispatch_event(struct ib_event *event); - -int ib_query_device(struct ib_device *device, - struct ib_device_attr *device_attr); - -int ib_query_port(struct ib_device *device, - u8 port_num, struct ib_port_attr *port_attr); - -int ib_query_gid(struct ib_device *device, - u8 port_num, int index, union ib_gid *gid); - -int ib_query_pkey(struct ib_device *device, - u8 port_num, u16 index, u16 *pkey); - -int ib_modify_device(struct ib_device *device, - int device_modify_mask, - struct ib_device_modify *device_modify); - -int ib_modify_port(struct ib_device *device, - u8 port_num, int port_modify_mask, - struct ib_port_modify *port_modify); - -/** - * ib_alloc_pd - Allocates an unused protection domain. - * @device: The device on which to allocate the protection domain. - * - * A protection domain object provides an association between QPs, shared - * receive queues, address handles, memory regions, and memory windows. - */ -struct ib_pd *ib_alloc_pd(struct ib_device *device); - -/** - * ib_dealloc_pd - Deallocates a protection domain. - * @pd: The protection domain to deallocate. - */ -int ib_dealloc_pd(struct ib_pd *pd); - -/** - * ib_create_ah - Creates an address handle for the given address vector. - * @pd: The protection domain associated with the address handle. - * @ah_attr: The attributes of the address vector. - * - * The address handle is used to reference a local or global destination - * in all UD QP post sends. - */ -struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); - -/** - * ib_create_ah_from_wc - Creates an address handle associated with the - * sender of the specified work completion. - * @pd: The protection domain associated with the address handle. - * @wc: Work completion information associated with a received message. - * @grh: References the received global route header. This parameter is - * ignored unless the work completion indicates that the GRH is valid. - * @port_num: The outbound port number to associate with the address. - * - * The address handle is used to reference a local or global destination - * in all UD QP post sends. - */ -struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc, - struct ib_grh *grh, u8 port_num); - -/** - * ib_modify_ah - Modifies the address vector associated with an address - * handle. - * @ah: The address handle to modify. - * @ah_attr: The new address vector attributes to associate with the - * address handle. - */ -int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); - -/** - * ib_query_ah - Queries the address vector associated with an address - * handle. - * @ah: The address handle to query. - * @ah_attr: The address vector attributes associated with the address - * handle. - */ -int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); - -/** - * ib_destroy_ah - Destroys an address handle. - * @ah: The address handle to destroy. - */ -int ib_destroy_ah(struct ib_ah *ah); - -/** - * ib_create_qp - Creates a QP associated with the specified protection - * domain. - * @pd: The protection domain associated with the QP. - * @qp_init_attr: A list of initial attributes required to create the QP. - */ -struct ib_qp *ib_create_qp(struct ib_pd *pd, - struct ib_qp_init_attr *qp_init_attr); - -/** - * ib_modify_qp - Modifies the attributes for the specified QP and then - * transitions the QP to the given state. - * @qp: The QP to modify. - * @qp_attr: On input, specifies the QP attributes to modify. On output, - * the current values of selected QP attributes are returned. - * @qp_attr_mask: A bit-mask used to specify which attributes of the QP - * are being modified. - */ -int ib_modify_qp(struct ib_qp *qp, - struct ib_qp_attr *qp_attr, - int qp_attr_mask); - -/** - * ib_query_qp - Returns the attribute list and current values for the - * specified QP. - * @qp: The QP to query. - * @qp_attr: The attributes of the specified QP. - * @qp_attr_mask: A bit-mask used to select specific attributes to query. - * @qp_init_attr: Additional attributes of the selected QP. - * - * The qp_attr_mask may be used to limit the query to gathering only the - * selected attributes. - */ -int ib_query_qp(struct ib_qp *qp, - struct ib_qp_attr *qp_attr, - int qp_attr_mask, - struct ib_qp_init_attr *qp_init_attr); - -/** - * ib_destroy_qp - Destroys the specified QP. - * @qp: The QP to destroy. - */ -int ib_destroy_qp(struct ib_qp *qp); - -/** - * ib_post_send - Posts a list of work requests to the send queue of - * the specified QP. - * @qp: The QP to post the work request on. - * @send_wr: A list of work requests to post on the send queue. - * @bad_send_wr: On an immediate failure, this parameter will reference - * the work request that failed to be posted on the QP. - */ -static inline int ib_post_send(struct ib_qp *qp, - struct ib_send_wr *send_wr, - struct ib_send_wr **bad_send_wr) -{ - return qp->device->post_send(qp, send_wr, bad_send_wr); -} - -/** - * ib_post_recv - Posts a list of work requests to the receive queue of - * the specified QP. - * @qp: The QP to post the work request on. - * @recv_wr: A list of work requests to post on the receive queue. - * @bad_recv_wr: On an immediate failure, this parameter will reference - * the work request that failed to be posted on the QP. - */ -static inline int ib_post_recv(struct ib_qp *qp, - struct ib_recv_wr *recv_wr, - struct ib_recv_wr **bad_recv_wr) -{ - return qp->device->post_recv(qp, recv_wr, bad_recv_wr); -} - -/** - * ib_create_cq - Creates a CQ on the specified device. - * @device: The device on which to create the CQ. - * @comp_handler: A user-specified callback that is invoked when a - * completion event occurs on the CQ. - * @event_handler: A user-specified callback that is invoked when an - * asynchronous event not associated with a completion occurs on the CQ. - * @cq_context: Context associated with the CQ returned to the user via - * the associated completion and event handlers. - * @cqe: The minimum size of the CQ. - * - * Users can examine the cq structure to determine the actual CQ size. - */ -struct ib_cq *ib_create_cq(struct ib_device *device, - ib_comp_handler comp_handler, - void (*event_handler)(struct ib_event *, void *), - void *cq_context, int cqe); - -/** - * ib_resize_cq - Modifies the capacity of the CQ. - * @cq: The CQ to resize. - * @cqe: The minimum size of the CQ. - * - * Users can examine the cq structure to determine the actual CQ size. - */ -int ib_resize_cq(struct ib_cq *cq, int cqe); - -/** - * ib_destroy_cq - Destroys the specified CQ. - * @cq: The CQ to destroy. - */ -int ib_destroy_cq(struct ib_cq *cq); - -/** - * ib_poll_cq - poll a CQ for completion(s) - * @cq:the CQ being polled - * @num_entries:maximum number of completions to return - * @wc:array of at least @num_entries &struct ib_wc where completions - * will be returned - * - * Poll a CQ for (possibly multiple) completions. If the return value - * is < 0, an error occurred. If the return value is >= 0, it is the - * number of completions returned. If the return value is - * non-negative and < num_entries, then the CQ was emptied. - */ -static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, - struct ib_wc *wc) -{ - return cq->device->poll_cq(cq, num_entries, wc); -} - -/** - * ib_peek_cq - Returns the number of unreaped completions currently - * on the specified CQ. - * @cq: The CQ to peek. - * @wc_cnt: A minimum number of unreaped completions to check for. - * - * If the number of unreaped completions is greater than or equal to wc_cnt, - * this function returns wc_cnt, otherwise, it returns the actual number of - * unreaped completions. - */ -int ib_peek_cq(struct ib_cq *cq, int wc_cnt); - -/** - * ib_req_notify_cq - Request completion notification on a CQ. - * @cq: The CQ to generate an event for. - * @cq_notify: If set to %IB_CQ_SOLICITED, completion notification will - * occur on the next solicited event. If set to %IB_CQ_NEXT_COMP, - * notification will occur on the next completion. - */ -static inline int ib_req_notify_cq(struct ib_cq *cq, - enum ib_cq_notify cq_notify) -{ - return cq->device->req_notify_cq(cq, cq_notify); -} - -/** - * ib_req_ncomp_notif - Request completion notification when there are - * at least the specified number of unreaped completions on the CQ. - * @cq: The CQ to generate an event for. - * @wc_cnt: The number of unreaped completions that should be on the - * CQ before an event is generated. - */ -static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) -{ - return cq->device->req_ncomp_notif ? - cq->device->req_ncomp_notif(cq, wc_cnt) : - -ENOSYS; -} - -/** - * ib_get_dma_mr - Returns a memory region for system memory that is - * usable for DMA. - * @pd: The protection domain associated with the memory region. - * @mr_access_flags: Specifies the memory access rights. - */ -struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); - -/** - * ib_reg_phys_mr - Prepares a virtually addressed memory region for use - * by an HCA. - * @pd: The protection domain associated assigned to the registered region. - * @phys_buf_array: Specifies a list of physical buffers to use in the - * memory region. - * @num_phys_buf: Specifies the size of the phys_buf_array. - * @mr_access_flags: Specifies the memory access rights. - * @iova_start: The offset of the region's starting I/O virtual address. - */ -struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, - struct ib_phys_buf *phys_buf_array, - int num_phys_buf, - int mr_access_flags, - u64 *iova_start); - -/** - * ib_rereg_phys_mr - Modifies the attributes of an existing memory region. - * Conceptually, this call performs the functions deregister memory region - * followed by register physical memory region. Where possible, - * resources are reused instead of deallocated and reallocated. - * @mr: The memory region to modify. - * @mr_rereg_mask: A bit-mask used to indicate which of the following - * properties of the memory region are being modified. - * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies - * the new protection domain to associated with the memory region, - * otherwise, this parameter is ignored. - * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this - * field specifies a list of physical buffers to use in the new - * translation, otherwise, this parameter is ignored. - * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this - * field specifies the size of the phys_buf_array, otherwise, this - * parameter is ignored. - * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this - * field specifies the new memory access rights, otherwise, this - * parameter is ignored. - * @iova_start: The offset of the region's starting I/O virtual address. - */ -int ib_rereg_phys_mr(struct ib_mr *mr, - int mr_rereg_mask, - struct ib_pd *pd, - struct ib_phys_buf *phys_buf_array, - int num_phys_buf, - int mr_access_flags, - u64 *iova_start); - -/** - * ib_query_mr - Retrieves information about a specific memory region. - * @mr: The memory region to retrieve information about. - * @mr_attr: The attributes of the specified memory region. - */ -int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); - -/** - * ib_dereg_mr - Deregisters a memory region and removes it from the - * HCA translation table. - * @mr: The memory region to deregister. - */ -int ib_dereg_mr(struct ib_mr *mr); - -/** - * ib_alloc_mw - Allocates a memory window. - * @pd: The protection domain associated with the memory window. - */ -struct ib_mw *ib_alloc_mw(struct ib_pd *pd); - -/** - * ib_bind_mw - Posts a work request to the send queue of the specified - * QP, which binds the memory window to the given address range and - * remote access attributes. - * @qp: QP to post the bind work request on. - * @mw: The memory window to bind. - * @mw_bind: Specifies information about the memory window, including - * its address range, remote access rights, and associated memory region. - */ -static inline int ib_bind_mw(struct ib_qp *qp, - struct ib_mw *mw, - struct ib_mw_bind *mw_bind) -{ - /* XXX reference counting in corresponding MR? */ - return mw->device->bind_mw ? - mw->device->bind_mw(qp, mw, mw_bind) : - -ENOSYS; -} - -/** - * ib_dealloc_mw - Deallocates a memory window. - * @mw: The memory window to deallocate. - */ -int ib_dealloc_mw(struct ib_mw *mw); - -/** - * ib_alloc_fmr - Allocates a unmapped fast memory region. - * @pd: The protection domain associated with the unmapped region. - * @mr_access_flags: Specifies the memory access rights. - * @fmr_attr: Attributes of the unmapped region. - * - * A fast memory region must be mapped before it can be used as part of - * a work request. - */ -struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, - int mr_access_flags, - struct ib_fmr_attr *fmr_attr); - -/** - * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. - * @fmr: The fast memory region to associate with the pages. - * @page_list: An array of physical pages to map to the fast memory region. - * @list_len: The number of pages in page_list. - * @iova: The I/O virtual address to use with the mapped region. - */ -static inline int ib_map_phys_fmr(struct ib_fmr *fmr, - u64 *page_list, int list_len, - u64 iova) -{ - return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); -} - -/** - * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. - * @fmr_list: A linked list of fast memory regions to unmap. - */ -int ib_unmap_fmr(struct list_head *fmr_list); - -/** - * ib_dealloc_fmr - Deallocates a fast memory region. - * @fmr: The fast memory region to deallocate. - */ -int ib_dealloc_fmr(struct ib_fmr *fmr); - -/** - * ib_attach_mcast - Attaches the specified QP to a multicast group. - * @qp: QP to attach to the multicast group. The QP must be type - * IB_QPT_UD. - * @gid: Multicast group GID. - * @lid: Multicast group LID in host byte order. - * - * In order to send and receive multicast packets, subnet - * administration must have created the multicast group and configured - * the fabric appropriately. The port associated with the specified - * QP must also be a member of the multicast group. - */ -int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); - -/** - * ib_detach_mcast - Detaches the specified QP from a multicast group. - * @qp: QP to detach from the multicast group. - * @gid: Multicast group GID. - * @lid: Multicast group LID in host byte order. - */ -int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); - -#endif /* IB_VERBS_H */ diff --git a/drivers/infiniband/ulp/ipoib/Makefile b/drivers/infiniband/ulp/ipoib/Makefile index 394bc08abc6f..8935e74ae3f8 100644 --- a/drivers/infiniband/ulp/ipoib/Makefile +++ b/drivers/infiniband/ulp/ipoib/Makefile @@ -1,5 +1,3 @@ -EXTRA_CFLAGS += -Idrivers/infiniband/include - obj-$(CONFIG_INFINIBAND_IPOIB) += ib_ipoib.o ib_ipoib-y := ipoib_main.o \ diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 04c98f54e9c4..bea960b8191f 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -1,5 +1,7 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -49,9 +51,9 @@ #include <asm/atomic.h> #include <asm/semaphore.h> -#include <ib_verbs.h> -#include <ib_pack.h> -#include <ib_sa.h> +#include <rdma/ib_verbs.h> +#include <rdma/ib_pack.h> +#include <rdma/ib_sa.h> /* constants */ @@ -88,8 +90,8 @@ enum { /* structs */ struct ipoib_header { - u16 proto; - u16 reserved; + __be16 proto; + u16 reserved; }; struct ipoib_pseudoheader { diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c index a84e5fe0f193..38b150f775e7 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c @@ -97,7 +97,7 @@ static int ipoib_mcg_seq_show(struct seq_file *file, void *iter_ptr) for (n = 0, i = 0; i < sizeof mgid / 2; ++i) { n += sprintf(gid_buf + n, "%x", - be16_to_cpu(((u16 *)mgid.raw)[i])); + be16_to_cpu(((__be16 *) mgid.raw)[i])); if (i < sizeof mgid / 2 - 1) gid_buf[n++] = ':'; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index eee82363167d..ef0e3894863c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -1,5 +1,8 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -35,7 +38,7 @@ #include <linux/delay.h> #include <linux/dma-mapping.h> -#include <ib_cache.h> +#include <rdma/ib_cache.h> #include "ipoib.h" diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index fa00816a3cf7..0e8ac138e355 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1,5 +1,7 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -34,7 +36,6 @@ #include "ipoib.h" -#include <linux/version.h> #include <linux/module.h> #include <linux/init.h> @@ -607,8 +608,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x " IPOIB_GID_FMT "\n", skb->dst ? "neigh" : "dst", - be16_to_cpup((u16 *) skb->data), - be32_to_cpup((u32 *) phdr->hwaddr), + be16_to_cpup((__be16 *) skb->data), + be32_to_cpup((__be32 *) phdr->hwaddr), IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4))); dev_kfree_skb_any(skb); ++priv->stats.tx_dropped; @@ -671,7 +672,7 @@ static void ipoib_set_mcast_list(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); - schedule_work(&priv->restart_task); + queue_work(ipoib_workqueue, &priv->restart_task); } static void ipoib_neigh_destructor(struct neighbour *n) @@ -780,15 +781,11 @@ void ipoib_dev_cleanup(struct net_device *dev) ipoib_ib_dev_cleanup(dev); - if (priv->rx_ring) { - kfree(priv->rx_ring); - priv->rx_ring = NULL; - } + kfree(priv->rx_ring); + kfree(priv->tx_ring); - if (priv->tx_ring) { - kfree(priv->tx_ring); - priv->tx_ring = NULL; - } + priv->rx_ring = NULL; + priv->tx_ring = NULL; } static void ipoib_setup(struct net_device *dev) @@ -886,6 +883,12 @@ static ssize_t create_child(struct class_device *cdev, if (pkey < 0 || pkey > 0xffff) return -EINVAL; + /* + * Set the full membership bit, so that we join the right + * broadcast group, etc. + */ + pkey |= 0x8000; + ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev), pkey); @@ -938,6 +941,12 @@ static struct net_device *ipoib_add_port(const char *format, goto alloc_mem_failed; } + /* + * Set the full membership bit, so that we join the right + * broadcast group, etc. + */ + priv->pkey |= 0x8000; + priv->dev->broadcast[8] = priv->pkey >> 8; priv->dev->broadcast[9] = priv->pkey & 0xff; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 70208c3d21e2..aca7aea18a69 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -1,5 +1,7 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -357,7 +359,7 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) rec.mgid = mcast->mcmember.mgid; rec.port_gid = priv->local_gid; - rec.pkey = be16_to_cpu(priv->pkey); + rec.pkey = cpu_to_be16(priv->pkey); ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, IB_SA_MCMEMBER_REC_MGID | @@ -457,7 +459,7 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, rec.mgid = mcast->mcmember.mgid; rec.port_gid = priv->local_gid; - rec.pkey = be16_to_cpu(priv->pkey); + rec.pkey = cpu_to_be16(priv->pkey); comp_mask = IB_SA_MCMEMBER_REC_MGID | @@ -646,7 +648,7 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) rec.mgid = mcast->mcmember.mgid; rec.port_gid = priv->local_gid; - rec.pkey = be16_to_cpu(priv->pkey); + rec.pkey = cpu_to_be16(priv->pkey); /* Remove ourselves from the multicast group */ ret = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid), diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 4933edf062c2..79f59d0563ed 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -32,7 +33,7 @@ * $Id: ipoib_verbs.c 1349 2004-12-16 21:09:43Z roland $ */ -#include <ib_cache.h> +#include <rdma/ib_cache.h> #include "ipoib.h" diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 94b8ea812fef..332d730e60c2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -32,7 +32,6 @@ * $Id: ipoib_vlan.c 1349 2004-12-16 21:09:43Z roland $ */ -#include <linux/version.h> #include <linux/module.h> #include <linux/init.h> |