diff options
author | Hal Rosenstock <halr@voltaire.com> | 2006-03-29 04:40:04 +0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-03-30 19:19:51 +0400 |
commit | 618a3c03fcfdf1ac4543247c8ddfb0c9d775ff33 (patch) | |
tree | 9b3c0baf53f0fcab01848b7816aace785afd8a94 | |
parent | fa9656bbd9af5b95adc43eaa0a143992346378cb (diff) | |
download | linux-618a3c03fcfdf1ac4543247c8ddfb0c9d775ff33.tar.xz |
IB/mad: RMPP support for additional classes
Add RMPP support for additional management classes that support it.
Also, validate RMPP is consistent with management class specified.
Signed-off-by: Hal Rosenstock <halr@voltaire.com>
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r-- | drivers/infiniband/core/mad.c | 54 | ||||
-rw-r--r-- | drivers/infiniband/core/mad_rmpp.c | 19 | ||||
-rw-r--r-- | drivers/infiniband/core/user_mad.c | 30 | ||||
-rw-r--r-- | include/rdma/ib_mad.h | 27 |
4 files changed, 85 insertions, 45 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index d4d07012a5ca..ba54c856b0e5 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -227,6 +227,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, if (!is_vendor_oui(mad_reg_req->oui)) goto error1; } + /* Make sure class supplied is consistent with RMPP */ + if (ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { + if (!rmpp_version) + goto error1; + } else { + if (rmpp_version) + goto error1; + } /* Make sure class supplied is consistent with QP type */ if (qp_type == IB_QPT_SMI) { if ((mad_reg_req->mgmt_class != @@ -890,6 +898,35 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, } EXPORT_SYMBOL(ib_create_send_mad); +int ib_get_mad_data_offset(u8 mgmt_class) +{ + if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) + return IB_MGMT_SA_HDR; + else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || + (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || + (mgmt_class == IB_MGMT_CLASS_BIS)) + return IB_MGMT_DEVICE_HDR; + else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && + (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) + return IB_MGMT_VENDOR_HDR; + else + return IB_MGMT_MAD_HDR; +} +EXPORT_SYMBOL(ib_get_mad_data_offset); + +int ib_is_mad_class_rmpp(u8 mgmt_class) +{ + if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || + (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || + (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || + (mgmt_class == IB_MGMT_CLASS_BIS) || + ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && + (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) + return 1; + return 0; +} +EXPORT_SYMBOL(ib_is_mad_class_rmpp); + void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) { struct ib_mad_send_wr_private *mad_send_wr; @@ -1022,6 +1059,13 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, goto error; } + if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { + if (mad_agent_priv->agent.rmpp_version) { + ret = -EINVAL; + goto error; + } + } + /* * Save pointer to next work request to post in case the * current one completes, and the user modifies the work @@ -2454,11 +2498,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, } } sg_list.addr = dma_map_single(qp_info->port_priv-> - device->dma_device, - &mad_priv->grh, - sizeof *mad_priv - - sizeof mad_priv->header, - DMA_FROM_DEVICE); + device->dma_device, + &mad_priv->grh, + sizeof *mad_priv - + sizeof mad_priv->header, + DMA_FROM_DEVICE); pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr); recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; mad_priv->header.mad_list.mad_queue = recv_queue; diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index a6405079c285..dfd4e588ce03 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2005 Intel Inc. All rights reserved. - * Copyright (c) 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -100,17 +100,6 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent) } } -static int data_offset(u8 mgmt_class) -{ - if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) - return IB_MGMT_SA_HDR; - else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && - (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) - return IB_MGMT_VENDOR_HDR; - else - return IB_MGMT_RMPP_HDR; -} - static void format_ack(struct ib_mad_send_buf *msg, struct ib_rmpp_mad *data, struct mad_rmpp_recv *rmpp_recv) @@ -137,7 +126,7 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv, struct ib_mad_send_buf *msg; int ret, hdr_len; - hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); + hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1, hdr_len, 0, GFP_KERNEL); @@ -163,7 +152,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, if (IS_ERR(ah)) return (void *) ah; - hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); + hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1, hdr_len, 0, GFP_KERNEL); @@ -408,7 +397,7 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv) rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; - hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class); + hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); data_size = sizeof(struct ib_rmpp_mad) - hdr_size; pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); if (pad > IB_MGMT_RMPP_DATA || pad < 0) diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index fb6cd42601f9..afe70a549c2f 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -177,17 +177,6 @@ static int queue_packet(struct ib_umad_file *file, return ret; } -static int data_offset(u8 mgmt_class) -{ - if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) - return IB_MGMT_SA_HDR; - else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && - (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) - return IB_MGMT_VENDOR_HDR; - else - return IB_MGMT_RMPP_HDR; -} - static void send_handler(struct ib_mad_agent *agent, struct ib_mad_send_wc *send_wc) { @@ -283,7 +272,7 @@ static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet, */ return -ENOSPC; } - offset = data_offset(recv_buf->mad->mad_hdr.mgmt_class); + offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class); max_seg_payload = sizeof (struct ib_mad) - offset; for (left = packet->length - seg_payload, buf += seg_payload; @@ -441,21 +430,14 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, } rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; - if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { - hdr_len = IB_MGMT_SA_HDR; - copy_offset = IB_MGMT_RMPP_HDR; - rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & - IB_MGMT_RMPP_FLAG_ACTIVE; - } else if (rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START && - rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END) { - hdr_len = IB_MGMT_VENDOR_HDR; + hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); + if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) { + copy_offset = IB_MGMT_MAD_HDR; + rmpp_active = 0; + } else { copy_offset = IB_MGMT_RMPP_HDR; rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE; - } else { - hdr_len = IB_MGMT_MAD_HDR; - copy_offset = IB_MGMT_MAD_HDR; - rmpp_active = 0; } data_len = count - sizeof (struct ib_user_mad) - hdr_len; diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 51ab8eddb295..5ff77558013b 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -3,7 +3,7 @@ * Copyright (c) 2004 Infinicon Corporation. All rights reserved. * Copyright (c) 2004 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. - * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * Copyright (c) 2004-2006 Voltaire Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -55,6 +55,10 @@ #define IB_MGMT_CLASS_DEVICE_MGMT 0x06 #define IB_MGMT_CLASS_CM 0x07 #define IB_MGMT_CLASS_SNMP 0x08 +#define IB_MGMT_CLASS_DEVICE_ADM 0x10 +#define IB_MGMT_CLASS_BOOT_MGMT 0x11 +#define IB_MGMT_CLASS_BIS 0x12 +#define IB_MGMT_CLASS_CONG_MGMT 0x21 #define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30 #define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F @@ -117,6 +121,8 @@ enum { IB_MGMT_VENDOR_DATA = 216, IB_MGMT_SA_HDR = 56, IB_MGMT_SA_DATA = 200, + IB_MGMT_DEVICE_HDR = 64, + IB_MGMT_DEVICE_DATA = 192, }; struct ib_mad_hdr { @@ -603,6 +609,25 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, gfp_t gfp_mask); /** + * ib_is_mad_class_rmpp - returns whether given management class + * supports RMPP. + * @mgmt_class: management class + * + * This routine returns whether the management class supports RMPP. + */ +int ib_is_mad_class_rmpp(u8 mgmt_class); + +/** + * ib_get_mad_data_offset - returns the data offset for a given + * management class. + * @mgmt_class: management class + * + * This routine returns the data offset in the MAD for the management + * class requested. + */ +int ib_get_mad_data_offset(u8 mgmt_class); + +/** * ib_get_rmpp_segment - returns the data buffer for a given RMPP segment. * @send_buf: Previously allocated send data buffer. * @seg_num: number of segment to return |