diff options
Diffstat (limited to 'drivers/infiniband/core/cma.c')
| -rw-r--r-- | drivers/infiniband/core/cma.c | 116 | 
1 files changed, 87 insertions, 29 deletions
| diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 94096511599f..2b9ffc21cbc4 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -43,7 +43,6 @@ MODULE_DESCRIPTION("Generic RDMA CM Agent");  MODULE_LICENSE("Dual BSD/GPL");  #define CMA_CM_RESPONSE_TIMEOUT 20 -#define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000  #define CMA_MAX_CM_RETRIES 15  #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)  #define CMA_IBOE_PACKET_LIFETIME 18 @@ -219,14 +218,6 @@ struct rdma_bind_list {  	unsigned short		port;  }; -struct class_port_info_context { -	struct ib_class_port_info	*class_port_info; -	struct ib_device		*device; -	struct completion		done; -	struct ib_sa_query		*sa_query; -	u8				port_num; -}; -  static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps,  			struct rdma_bind_list *bind_list, int snum)  { @@ -287,7 +278,7 @@ struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter	filter,  }  int cma_get_default_gid_type(struct cma_device *cma_dev, -			     unsigned int port) +			     u32 port)  {  	if (!rdma_is_port_valid(cma_dev->device, port))  		return -EINVAL; @@ -296,7 +287,7 @@ int cma_get_default_gid_type(struct cma_device *cma_dev,  }  int cma_set_default_gid_type(struct cma_device *cma_dev, -			     unsigned int port, +			     u32 port,  			     enum ib_gid_type default_gid_type)  {  	unsigned long supported_gids; @@ -319,7 +310,7 @@ int cma_set_default_gid_type(struct cma_device *cma_dev,  	return 0;  } -int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port) +int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port)  {  	if (!rdma_is_port_valid(cma_dev->device, port))  		return -EINVAL; @@ -327,7 +318,7 @@ int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port)  	return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)];  } -int cma_set_default_roce_tos(struct cma_device *cma_dev, unsigned int port, +int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port,  			     u8 default_roce_tos)  {  	if (!rdma_is_port_valid(cma_dev->device, port)) @@ -463,7 +454,6 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,  	id_priv->id.route.addr.dev_addr.transport =  		rdma_node_get_transport(cma_dev->device->node_type);  	list_add_tail(&id_priv->list, &cma_dev->id_list); -	rdma_restrack_add(&id_priv->res);  	trace_cm_id_attach(id_priv, cma_dev->device);  } @@ -562,7 +552,7 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a  }  static const struct ib_gid_attr * -cma_validate_port(struct ib_device *device, u8 port, +cma_validate_port(struct ib_device *device, u32 port,  		  enum ib_gid_type gid_type,  		  union ib_gid *gid,  		  struct rdma_id_private *id_priv) @@ -620,7 +610,7 @@ static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)  	struct cma_device *cma_dev;  	enum ib_gid_type gid_type;  	int ret = -ENODEV; -	unsigned int port; +	u32 port;  	if (dev_addr->dev_type != ARPHRD_INFINIBAND &&  	    id_priv->id.ps == RDMA_PS_IPOIB) @@ -700,6 +690,7 @@ static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,  	mutex_lock(&lock);  	cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);  	mutex_unlock(&lock); +	rdma_restrack_add(&id_priv->res);  	return 0;  } @@ -711,8 +702,8 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,  	struct cma_device *cma_dev;  	enum ib_gid_type gid_type;  	int ret = -ENODEV; -	unsigned int port;  	union ib_gid gid; +	u32 port;  	if (dev_addr->dev_type != ARPHRD_INFINIBAND &&  	    id_priv->id.ps == RDMA_PS_IPOIB) @@ -754,8 +745,10 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,  	}  out: -	if (!ret) +	if (!ret) {  		cma_attach_to_dev(id_priv, cma_dev); +		rdma_restrack_add(&id_priv->res); +	}  	mutex_unlock(&lock);  	return ret; @@ -816,6 +809,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)  found:  	cma_attach_to_dev(id_priv, cma_dev); +	rdma_restrack_add(&id_priv->res);  	mutex_unlock(&lock);  	addr = (struct sockaddr_ib *)cma_src_addr(id_priv);  	memcpy(&addr->sib_addr, &sgid, sizeof(sgid)); @@ -852,6 +846,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,  	id_priv->id.qp_type = qp_type;  	id_priv->tos_set = false;  	id_priv->timeout_set = false; +	id_priv->min_rnr_timer_set = false;  	id_priv->gid_type = IB_GID_TYPE_IB;  	spin_lock_init(&id_priv->lock);  	mutex_init(&id_priv->qp_mutex); @@ -1135,12 +1130,16 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,  						 qp_attr_mask);  		qp_attr->port_num = id_priv->id.port_num;  		*qp_attr_mask |= IB_QP_PORT; -	} else +	} else {  		ret = -ENOSYS; +	}  	if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set)  		qp_attr->timeout = id_priv->timeout; +	if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set) +		qp_attr->min_rnr_timer = id_priv->min_rnr_timer; +  	return ret;  }  EXPORT_SYMBOL(rdma_init_qp_attr); @@ -1581,7 +1580,7 @@ static bool cma_match_private_data(struct rdma_id_private *id_priv,  static bool cma_protocol_roce(const struct rdma_cm_id *id)  {  	struct ib_device *device = id->device; -	const int port_num = id->port_num ?: rdma_start_port(device); +	const u32 port_num = id->port_num ?: rdma_start_port(device);  	return rdma_protocol_roce(device, port_num);  } @@ -2474,6 +2473,7 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)  	id->tos = id_priv->tos;  	id->tos_set = id_priv->tos_set; +	id->afonly = id_priv->afonly;  	id_priv->cm_id.iw = id;  	memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), @@ -2529,6 +2529,7 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,  	       rdma_addr_size(cma_src_addr(id_priv)));  	_cma_attach_to_dev(dev_id_priv, cma_dev); +	rdma_restrack_add(&dev_id_priv->res);  	cma_id_get(id_priv);  	dev_id_priv->internal_id = 1;  	dev_id_priv->afonly = id_priv->afonly; @@ -2615,6 +2616,43 @@ int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)  }  EXPORT_SYMBOL(rdma_set_ack_timeout); +/** + * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the + *			      QP associated with a connection identifier. + * @id: Communication identifier to associated with service type. + * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK + *		   Timer Field" in the IBTA specification. + * + * This function should be called before rdma_connect() on active + * side, and on passive side before rdma_accept(). The timer value + * will be associated with the local QP. When it receives a send it is + * not read to handle, typically if the receive queue is empty, an RNR + * Retry NAK is returned to the requester with the min_rnr_timer + * encoded. The requester will then wait at least the time specified + * in the NAK before retrying. The default is zero, which translates + * to a minimum RNR Timer value of 655 ms. + * + * Return: 0 for success + */ +int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer) +{ +	struct rdma_id_private *id_priv; + +	/* It is a five-bit value */ +	if (min_rnr_timer & 0xe0) +		return -EINVAL; + +	if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT)) +		return -EINVAL; + +	id_priv = container_of(id, struct rdma_id_private, id); +	id_priv->min_rnr_timer = min_rnr_timer; +	id_priv->min_rnr_timer_set = true; + +	return 0; +} +EXPORT_SYMBOL(rdma_set_min_rnr_timer); +  static void cma_query_handler(int status, struct sa_path_rec *path_rec,  			      void *context)  { @@ -3169,6 +3207,7 @@ port_found:  	ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);  	id_priv->id.port_num = p;  	cma_attach_to_dev(id_priv, cma_dev); +	rdma_restrack_add(&id_priv->res);  	cma_set_loopback(cma_src_addr(id_priv));  out:  	mutex_unlock(&lock); @@ -3201,6 +3240,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,  		if (status)  			pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",  					     status); +		rdma_restrack_add(&id_priv->res);  	} else if (status) {  		pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);  	} @@ -3812,6 +3852,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)  	if (ret)  		goto err2; +	if (!cma_any_addr(addr)) +		rdma_restrack_add(&id_priv->res);  	return 0;  err2:  	if (id_priv->cma_dev) @@ -4124,10 +4166,11 @@ int rdma_connect_locked(struct rdma_cm_id *id,  			ret = cma_resolve_ib_udp(id_priv, conn_param);  		else  			ret = cma_connect_ib(id_priv, conn_param); -	} else if (rdma_cap_iw_cm(id->device, id->port_num)) +	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {  		ret = cma_connect_iw(id_priv, conn_param); -	else +	} else {  		ret = -ENOSYS; +	}  	if (ret)  		goto err_state;  	return 0; @@ -4234,9 +4277,9 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,  	iw_param.ird = conn_param->responder_resources;  	iw_param.private_data = conn_param->private_data;  	iw_param.private_data_len = conn_param->private_data_len; -	if (id_priv->id.qp) { +	if (id_priv->id.qp)  		iw_param.qpn = id_priv->qp_num; -	} else +	else  		iw_param.qpn = conn_param->qp_num;  	return iw_cm_accept(id_priv->cm_id.iw, &iw_param); @@ -4319,11 +4362,11 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)  			else  				ret = cma_rep_recv(id_priv);  		} -	} else if (rdma_cap_iw_cm(id->device, id->port_num)) +	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {  		ret = cma_accept_iw(id_priv, conn_param); -	else +	} else {  		ret = -ENOSYS; - +	}  	if (ret)  		goto reject; @@ -4409,8 +4452,9 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,  	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {  		ret = iw_cm_reject(id_priv->cm_id.iw,  				   private_data, private_data_len); -	} else +	} else {  		ret = -ENOSYS; +	}  	return ret;  } @@ -4864,14 +4908,28 @@ static void cma_process_remove(struct cma_device *cma_dev)  	wait_for_completion(&cma_dev->comp);  } +static bool cma_supported(struct ib_device *device) +{ +	u32 i; + +	rdma_for_each_port(device, i) { +		if (rdma_cap_ib_cm(device, i) || rdma_cap_iw_cm(device, i)) +			return true; +	} +	return false; +} +  static int cma_add_one(struct ib_device *device)  {  	struct rdma_id_private *to_destroy;  	struct cma_device *cma_dev;  	struct rdma_id_private *id_priv; -	unsigned int i;  	unsigned long supported_gids = 0;  	int ret; +	u32 i; + +	if (!cma_supported(device)) +		return -EOPNOTSUPP;  	cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL);  	if (!cma_dev) | 
