diff options
Diffstat (limited to 'drivers/infiniband')
80 files changed, 2247 insertions, 1272 deletions
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 56a4b7ca7ee3..45d67e9228d7 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1124,6 +1124,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx, if (!optlen) return -EINVAL; + memset(&sa_path, 0, sizeof(sa_path)); + sa_path.vlan_id = 0xffff; + ib_sa_unpack_path(path_data->path_rec, &sa_path); ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1); if (ret) diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 6095872549e7..8b8cc6fa0ab0 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -294,7 +294,8 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem) if (likely(ib_umem_start(umem) != ib_umem_end(umem))) rbt_ib_umem_insert(&umem->odp_data->interval_tree, &context->umem_tree); - if (likely(!atomic_read(&context->notifier_count))) + if (likely(!atomic_read(&context->notifier_count)) || + context->odp_mrs_count == 1) umem->odp_data->mn_counters_active = true; else list_add(&umem->odp_data->no_private_counters, diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index 643c08a025a5..b716b0815644 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -258,5 +258,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd); IB_UVERBS_DECLARE_EX_CMD(create_flow); IB_UVERBS_DECLARE_EX_CMD(destroy_flow); +IB_UVERBS_DECLARE_EX_CMD(query_device); #endif /* UVERBS_H */ diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index b7943ff16ed3..a9f048990dfc 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -400,6 +400,52 @@ err: return ret; } +static void copy_query_dev_fields(struct ib_uverbs_file *file, + struct ib_uverbs_query_device_resp *resp, + struct ib_device_attr *attr) +{ + resp->fw_ver = attr->fw_ver; + resp->node_guid = file->device->ib_dev->node_guid; + resp->sys_image_guid = attr->sys_image_guid; + resp->max_mr_size = attr->max_mr_size; + resp->page_size_cap = attr->page_size_cap; + resp->vendor_id = attr->vendor_id; + resp->vendor_part_id = attr->vendor_part_id; + resp->hw_ver = attr->hw_ver; + resp->max_qp = attr->max_qp; + resp->max_qp_wr = attr->max_qp_wr; + resp->device_cap_flags = attr->device_cap_flags; + resp->max_sge = attr->max_sge; + resp->max_sge_rd = attr->max_sge_rd; + resp->max_cq = attr->max_cq; + resp->max_cqe = attr->max_cqe; + resp->max_mr = attr->max_mr; + resp->max_pd = attr->max_pd; + resp->max_qp_rd_atom = attr->max_qp_rd_atom; + resp->max_ee_rd_atom = attr->max_ee_rd_atom; + resp->max_res_rd_atom = attr->max_res_rd_atom; + resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; + resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; + resp->atomic_cap = attr->atomic_cap; + resp->max_ee = attr->max_ee; + resp->max_rdd = attr->max_rdd; + resp->max_mw = attr->max_mw; + resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; + resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; + resp->max_mcast_grp = attr->max_mcast_grp; + resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; + resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; + resp->max_ah = attr->max_ah; + resp->max_fmr = attr->max_fmr; + resp->max_map_per_fmr = attr->max_map_per_fmr; + resp->max_srq = attr->max_srq; + resp->max_srq_wr = attr->max_srq_wr; + resp->max_srq_sge = attr->max_srq_sge; + resp->max_pkeys = attr->max_pkeys; + resp->local_ca_ack_delay = attr->local_ca_ack_delay; + resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt; +} + ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, const char __user *buf, int in_len, int out_len) @@ -420,47 +466,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, return ret; memset(&resp, 0, sizeof resp); - - resp.fw_ver = attr.fw_ver; - resp.node_guid = file->device->ib_dev->node_guid; - resp.sys_image_guid = attr.sys_image_guid; - resp.max_mr_size = attr.max_mr_size; - resp.page_size_cap = attr.page_size_cap; - resp.vendor_id = attr.vendor_id; - resp.vendor_part_id = attr.vendor_part_id; - resp.hw_ver = attr.hw_ver; - resp.max_qp = attr.max_qp; - resp.max_qp_wr = attr.max_qp_wr; - resp.device_cap_flags = attr.device_cap_flags; - resp.max_sge = attr.max_sge; - resp.max_sge_rd = attr.max_sge_rd; - resp.max_cq = attr.max_cq; - resp.max_cqe = attr.max_cqe; - resp.max_mr = attr.max_mr; - resp.max_pd = attr.max_pd; - resp.max_qp_rd_atom = attr.max_qp_rd_atom; - resp.max_ee_rd_atom = attr.max_ee_rd_atom; - resp.max_res_rd_atom = attr.max_res_rd_atom; - resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; - resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; - resp.atomic_cap = attr.atomic_cap; - resp.max_ee = attr.max_ee; - resp.max_rdd = attr.max_rdd; - resp.max_mw = attr.max_mw; - resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; - resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; - resp.max_mcast_grp = attr.max_mcast_grp; - resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; - resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; - resp.max_ah = attr.max_ah; - resp.max_fmr = attr.max_fmr; - resp.max_map_per_fmr = attr.max_map_per_fmr; - resp.max_srq = attr.max_srq; - resp.max_srq_wr = attr.max_srq_wr; - resp.max_srq_sge = attr.max_srq_sge; - resp.max_pkeys = attr.max_pkeys; - resp.local_ca_ack_delay = attr.local_ca_ack_delay; - resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; + copy_query_dev_fields(file, &resp, &attr); if (copy_to_user((void __user *) (unsigned long) cmd.response, &resp, sizeof resp)) @@ -2091,20 +2097,21 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, if (qp->real_qp == qp) { ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask); if (ret) - goto out; + goto release_qp; ret = qp->device->modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); } else { ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); } - put_qp_read(qp); - if (ret) - goto out; + goto release_qp; ret = in_len; +release_qp: + put_qp_read(qp); + out: kfree(attr); @@ -3287,3 +3294,64 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, return ret ? ret : in_len; } + +int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, + struct ib_udata *ucore, + struct ib_udata *uhw) +{ + struct ib_uverbs_ex_query_device_resp resp; + struct ib_uverbs_ex_query_device cmd; + struct ib_device_attr attr; + struct ib_device *device; + int err; + + device = file->device->ib_dev; + if (ucore->inlen < sizeof(cmd)) + return -EINVAL; + + err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); + if (err) + return err; + + if (cmd.comp_mask) + return -EINVAL; + + if (cmd.reserved) + return -EINVAL; + + resp.response_length = offsetof(typeof(resp), odp_caps); + + if (ucore->outlen < resp.response_length) + return -ENOSPC; + + err = device->query_device(device, &attr); + if (err) + return err; + + copy_query_dev_fields(file, &resp.base, &attr); + resp.comp_mask = 0; + + if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) + goto end; + +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + resp.odp_caps.general_caps = attr.odp_caps.general_caps; + resp.odp_caps.per_transport_caps.rc_odp_caps = + attr.odp_caps.per_transport_caps.rc_odp_caps; + resp.odp_caps.per_transport_caps.uc_odp_caps = + attr.odp_caps.per_transport_caps.uc_odp_caps; + resp.odp_caps.per_transport_caps.ud_odp_caps = + attr.odp_caps.per_transport_caps.ud_odp_caps; + resp.odp_caps.reserved = 0; +#else + memset(&resp.odp_caps, 0, sizeof(resp.odp_caps)); +#endif + resp.response_length += sizeof(resp.odp_caps); + +end: + err = ib_copy_to_udata(ucore, &resp, resp.response_length); + if (err) + return err; + + return 0; +} diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 5db1a8cc388d..259dcc7779f5 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -123,6 +123,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file, struct ib_udata *uhw) = { [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow, [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow, + [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device, }; static void ib_uverbs_add_one(struct ib_device *device); diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 9edc200b311d..57176ddd4c50 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -235,19 +235,19 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) static void set_emss(struct c4iw_ep *ep, u16 opt) { - ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - + ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] - ((AF_INET == ep->com.remote_addr.ss_family) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) - sizeof(struct tcphdr); ep->mss = ep->emss; - if (GET_TCPOPT_TSTAMP(opt)) + if (TCPOPT_TSTAMP_G(opt)) ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4); if (ep->emss < 128) ep->emss = 128; if (ep->emss & 7) PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n", - GET_TCPOPT_MSS(opt), ep->mss, ep->emss); - PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), + TCPOPT_MSS_G(opt), ep->mss, ep->emss); + PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt), ep->mss, ep->emss); } @@ -652,29 +652,29 @@ static int send_connect(struct c4iw_ep *ep) if (win > RCV_BUFSIZ_M) win = RCV_BUFSIZ_M; - opt0 = (nocong ? NO_CONG(1) : 0) | + opt0 = (nocong ? NO_CONG_F : 0) | KEEP_ALIVE_F | - DELACK(1) | + DELACK_F | WND_SCALE_V(wscale) | MSS_IDX_V(mtu_idx) | L2T_IDX_V(ep->l2t->idx) | TX_CHAN_V(ep->tx_chan) | SMAC_SEL_V(ep->smac_idx) | - DSCP(ep->tos) | + DSCP_V(ep->tos) | ULP_MODE_V(ULP_MODE_TCPDDP) | RCV_BUFSIZ_V(win); opt2 = RX_CHANNEL_V(0) | - CCTRL_ECN(enable_ecn) | + CCTRL_ECN_V(enable_ecn) | RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); if (enable_tcp_timestamps) - opt2 |= TSTAMPS_EN(1); + opt2 |= TSTAMPS_EN_F; if (enable_tcp_sack) - opt2 |= SACK_EN(1); + opt2 |= SACK_EN_F; if (wscale && enable_tcp_window_scaling) opt2 |= WND_SCALE_EN_F; if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { opt2 |= T5_OPT_2_VALID_F; - opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); + opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ } t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); @@ -1042,7 +1042,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) struct c4iw_ep *ep; struct cpl_act_establish *req = cplhdr(skb); unsigned int tid = GET_TID(req); - unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); + unsigned int atid = TID_TID_G(ntohl(req->tos_atid)); struct tid_info *t = dev->rdev.lldi.tids; ep = lookup_atid(t, atid); @@ -1258,8 +1258,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits) OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F | - F_RX_DACK_CHANGE | - V_RX_DACK_MODE(dack_mode)); + RX_DACK_CHANGE_F | + RX_DACK_MODE_V(dack_mode)); set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); c4iw_ofld_send(&ep->com.dev->rdev, skb); return credits; @@ -1751,7 +1751,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); memset(req, 0, sizeof(*req)); - req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); + req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR)); req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); req->le.filter = cpu_to_be32(cxgb4_select_ntuple( ep->com.dev->rdev.lldi.ports[0], @@ -1782,27 +1782,27 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) if (win > RCV_BUFSIZ_M) win = RCV_BUFSIZ_M; - req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) | - (nocong ? NO_CONG(1) : 0) | + req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F | + (nocong ? NO_CONG_F : 0) | KEEP_ALIVE_F | - DELACK(1) | + DELACK_F | WND_SCALE_V(wscale) | MSS_IDX_V(mtu_idx) | L2T_IDX_V(ep->l2t->idx) | TX_CHAN_V(ep->tx_chan) | SMAC_SEL_V(ep->smac_idx) | - DSCP(ep->tos) | + DSCP_V(ep->tos) | ULP_MODE_V(ULP_MODE_TCPDDP) | RCV_BUFSIZ_V(win)); - req->tcb.opt2 = (__force __be32) (PACE(1) | - TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | + req->tcb.opt2 = (__force __be32) (PACE_V(1) | + TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | RX_CHANNEL_V(0) | - CCTRL_ECN(enable_ecn) | + CCTRL_ECN_V(enable_ecn) | RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid)); if (enable_tcp_timestamps) - req->tcb.opt2 |= (__force __be32)TSTAMPS_EN(1); + req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F; if (enable_tcp_sack) - req->tcb.opt2 |= (__force __be32)SACK_EN(1); + req->tcb.opt2 |= (__force __be32)SACK_EN_F; if (wscale && enable_tcp_window_scaling) req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F; req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0); @@ -2023,10 +2023,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_act_open_rpl *rpl = cplhdr(skb); - unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( - ntohl(rpl->atid_status))); + unsigned int atid = TID_TID_G(AOPEN_ATID_G( + ntohl(rpl->atid_status))); struct tid_info *t = dev->rdev.lldi.tids; - int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); + int status = AOPEN_STATUS_G(ntohl(rpl->atid_status)); struct sockaddr_in *la; struct sockaddr_in *ra; struct sockaddr_in6 *la6; @@ -2064,7 +2064,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) if (ep->com.local_addr.ss_family == AF_INET && dev->rdev.lldi.enable_fw_ofld_conn) { send_fw_act_open_req(ep, - GET_TID_TID(GET_AOPEN_ATID( + TID_TID_G(AOPEN_ATID_G( ntohl(rpl->atid_status)))); return 0; } @@ -2181,39 +2181,39 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, win = ep->rcv_win >> 10; if (win > RCV_BUFSIZ_M) win = RCV_BUFSIZ_M; - opt0 = (nocong ? NO_CONG(1) : 0) | + opt0 = (nocong ? NO_CONG_F : 0) | KEEP_ALIVE_F | - DELACK(1) | + DELACK_F | WND_SCALE_V(wscale) | MSS_IDX_V(mtu_idx) | L2T_IDX_V(ep->l2t->idx) | TX_CHAN_V(ep->tx_chan) | SMAC_SEL_V(ep->smac_idx) | - DSCP(ep->tos >> 2) | + DSCP_V(ep->tos >> 2) | ULP_MODE_V(ULP_MODE_TCPDDP) | RCV_BUFSIZ_V(win); opt2 = RX_CHANNEL_V(0) | RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); if (enable_tcp_timestamps && req->tcpopt.tstamp) - opt2 |= TSTAMPS_EN(1); + opt2 |= TSTAMPS_EN_F; if (enable_tcp_sack && req->tcpopt.sack) - opt2 |= SACK_EN(1); + opt2 |= SACK_EN_F; if (wscale && enable_tcp_window_scaling) opt2 |= WND_SCALE_EN_F; if (enable_ecn) { const struct tcphdr *tcph; u32 hlen = ntohl(req->hdr_len); - tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) + - G_IP_HDR_LEN(hlen); + tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) + + IP_HDR_LEN_G(hlen); if (tcph->ece && tcph->cwr) - opt2 |= CCTRL_ECN(1); + opt2 |= CCTRL_ECN_V(1); } if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { u32 isn = (prandom_u32() & ~7UL) - 1; opt2 |= T5_OPT_2_VALID_F; - opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); + opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ rpl5 = (void *)rpl; memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); @@ -2245,8 +2245,8 @@ static void get_4tuple(struct cpl_pass_accept_req *req, int *iptype, __u8 *local_ip, __u8 *peer_ip, __be16 *local_port, __be16 *peer_port) { - int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len)); - int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len)); + int eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)); + int ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)); struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len); struct tcphdr *tcp = (struct tcphdr *) @@ -2277,7 +2277,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *child_ep = NULL, *parent_ep; struct cpl_pass_accept_req *req = cplhdr(skb); - unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); + unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid)); struct tid_info *t = dev->rdev.lldi.tids; unsigned int hwtid = GET_TID(req); struct dst_entry *dst; @@ -2310,14 +2310,14 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) ntohs(peer_port), peer_mss); dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip, local_port, peer_port, - GET_POPEN_TOS(ntohl(req->tos_stid))); + PASS_OPEN_TOS_G(ntohl(req->tos_stid))); } else { PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n" , __func__, parent_ep, hwtid, local_ip, peer_ip, ntohs(local_port), ntohs(peer_port), peer_mss); dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port, - PASS_OPEN_TOS(ntohl(req->tos_stid)), + PASS_OPEN_TOS_G(ntohl(req->tos_stid)), ((struct sockaddr_in6 *) &parent_ep->com.local_addr)->sin6_scope_id); } @@ -2375,7 +2375,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) } c4iw_get_ep(&parent_ep->com); child_ep->parent_ep = parent_ep; - child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); + child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); child_ep->dst = dst; child_ep->hwtid = hwtid; @@ -3500,24 +3500,24 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); memset(req, 0, sizeof(*req)); - req->l2info = cpu_to_be16(V_SYN_INTF(intf) | - V_SYN_MAC_IDX(G_RX_MACIDX( + req->l2info = cpu_to_be16(SYN_INTF_V(intf) | + SYN_MAC_IDX_V(RX_MACIDX_G( (__force int) htonl(l2info))) | - F_SYN_XACT_MATCH); + SYN_XACT_MATCH_F); eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? - G_RX_ETHHDR_LEN((__force int) htonl(l2info)) : - G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info)); - req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN( + RX_ETHHDR_LEN_G((__force int)htonl(l2info)) : + RX_T5_ETHHDR_LEN_G((__force int)htonl(l2info)); + req->hdr_len = cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G( (__force int) htonl(l2info))) | - V_TCP_HDR_LEN(G_RX_TCPHDR_LEN( + TCP_HDR_LEN_V(RX_TCPHDR_LEN_G( (__force int) htons(hdr_len))) | - V_IP_HDR_LEN(G_RX_IPHDR_LEN( + IP_HDR_LEN_V(RX_IPHDR_LEN_G( (__force int) htons(hdr_len))) | - V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len))); + ETH_HDR_LEN_V(RX_ETHHDR_LEN_G(eth_hdr_len))); req->vlan = (__force __be16) vlantag; req->len = (__force __be16) len; - req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | - PASS_OPEN_TOS(tos)); + req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) | + PASS_OPEN_TOS_V(tos)); req->tcpopt.mss = htons(tmp_opt.mss_clamp); if (tmp_opt.wscale_ok) req->tcpopt.wsf = tmp_opt.snd_wscale; @@ -3542,7 +3542,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); memset(req, 0, sizeof(*req)); - req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F); + req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F); req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F); req->le.filter = (__force __be32) filter; @@ -3556,7 +3556,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) | FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) | FW_OFLD_CONNECTION_WR_ASTID_V( - GET_PASS_OPEN_TID(ntohl(cpl->tos_stid)))); + PASS_OPEN_TID_G(ntohl(cpl->tos_stid)))); /* * We store the qid in opt2 which will be used by the firmware @@ -3613,7 +3613,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) struct neighbour *neigh; /* Drop all non-SYN packets */ - if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN))) + if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F))) goto reject; /* @@ -3635,8 +3635,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) } eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? - G_RX_ETHHDR_LEN(htonl(cpl->l2info)) : - G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info)); + RX_ETHHDR_LEN_G(htonl(cpl->l2info)) : + RX_T5_ETHHDR_LEN_G(htonl(cpl->l2info)); if (eth_hdr_len == ETH_HLEN) { eh = (struct ethhdr *)(req + 1); iph = (struct iphdr *)(eh + 1); diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index e9fd3a029296..ab7692ac2044 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -52,7 +52,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, memset(res_wr, 0, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP_V(FW_RI_RES_WR) | - V_FW_RI_RES_WR_NRES(1) | + FW_RI_RES_WR_NRES_V(1) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->cookie = (unsigned long) &wr_wait; @@ -122,7 +122,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, memset(res_wr, 0, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP_V(FW_RI_RES_WR) | - V_FW_RI_RES_WR_NRES(1) | + FW_RI_RES_WR_NRES_V(1) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->cookie = (unsigned long) &wr_wait; @@ -131,17 +131,17 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, res->u.cq.op = FW_RI_RES_OP_WRITE; res->u.cq.iqid = cpu_to_be32(cq->cqid); res->u.cq.iqandst_to_iqandstindex = cpu_to_be32( - V_FW_RI_RES_WR_IQANUS(0) | - V_FW_RI_RES_WR_IQANUD(1) | - F_FW_RI_RES_WR_IQANDST | - V_FW_RI_RES_WR_IQANDSTINDEX( + FW_RI_RES_WR_IQANUS_V(0) | + FW_RI_RES_WR_IQANUD_V(1) | + FW_RI_RES_WR_IQANDST_F | + FW_RI_RES_WR_IQANDSTINDEX_V( rdev->lldi.ciq_ids[cq->vector])); res->u.cq.iqdroprss_to_iqesize = cpu_to_be16( - F_FW_RI_RES_WR_IQDROPRSS | - V_FW_RI_RES_WR_IQPCIECH(2) | - V_FW_RI_RES_WR_IQINTCNTTHRESH(0) | - F_FW_RI_RES_WR_IQO | - V_FW_RI_RES_WR_IQESIZE(1)); + FW_RI_RES_WR_IQDROPRSS_F | + FW_RI_RES_WR_IQPCIECH_V(2) | + FW_RI_RES_WR_IQINTCNTTHRESH_V(0) | + FW_RI_RES_WR_IQO_F | + FW_RI_RES_WR_IQESIZE_V(1)); res->u.cq.iqsize = cpu_to_be16(cq->size); res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr); @@ -182,12 +182,12 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, wq, cq, cq->sw_cidx, cq->sw_pidx); memset(&cqe, 0, sizeof(cqe)); - cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | - V_CQE_OPCODE(FW_RI_SEND) | - V_CQE_TYPE(0) | - V_CQE_SWCQE(1) | - V_CQE_QPID(wq->sq.qid)); - cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); + cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | + CQE_OPCODE_V(FW_RI_SEND) | + CQE_TYPE_V(0) | + CQE_SWCQE_V(1) | + CQE_QPID_V(wq->sq.qid)); + cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); cq->sw_queue[cq->sw_pidx] = cqe; t4_swcq_produce(cq); } @@ -215,13 +215,13 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, wq, cq, cq->sw_cidx, cq->sw_pidx); memset(&cqe, 0, sizeof(cqe)); - cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | - V_CQE_OPCODE(swcqe->opcode) | - V_CQE_TYPE(1) | - V_CQE_SWCQE(1) | - V_CQE_QPID(wq->sq.qid)); + cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | + CQE_OPCODE_V(swcqe->opcode) | + CQE_TYPE_V(1) | + CQE_SWCQE_V(1) | + CQE_QPID_V(wq->sq.qid)); CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; - cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); + cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); cq->sw_queue[cq->sw_pidx] = cqe; t4_swcq_produce(cq); } @@ -284,7 +284,7 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) */ PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n", __func__, cidx, cq->sw_pidx); - swsqe->cqe.header |= htonl(V_CQE_SWCQE(1)); + swsqe->cqe.header |= htonl(CQE_SWCQE_V(1)); cq->sw_queue[cq->sw_pidx] = swsqe->cqe; t4_swcq_produce(cq); swsqe->flushed = 1; @@ -301,10 +301,10 @@ static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, { read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx; read_cqe->len = htonl(wq->sq.oldest_read->read_len); - read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) | - V_CQE_SWCQE(SW_CQE(hw_cqe)) | - V_CQE_OPCODE(FW_RI_READ_REQ) | - V_CQE_TYPE(1)); + read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) | + CQE_SWCQE_V(SW_CQE(hw_cqe)) | + CQE_OPCODE_V(FW_RI_READ_REQ) | + CQE_TYPE_V(1)); read_cqe->bits_type_ts = hw_cqe->bits_type_ts; } @@ -400,7 +400,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp) } else { swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx]; *swcqe = *hw_cqe; - swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1)); + swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1)); t4_swcq_produce(&chp->cq); } next_cqe: @@ -576,7 +576,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, } if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { t4_set_wq_in_error(wq); - hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN)); + hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN)); goto proc_cqe; } goto proc_cqe; diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index eb5df4e62703..8fb295e4a9ab 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -380,12 +380,12 @@ static int dump_stag(int id, void *p, void *data) "stag: idx 0x%x valid %d key 0x%x state %d pdid %d " "perm 0x%x ps %d len 0x%llx va 0x%llx\n", (u32)id<<8, - G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)), - G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)), - G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)), - G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)), - G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)), - G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)), + FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)), + FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)), + FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)), + FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)), + FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)), + FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)), ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo), ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo)); if (cc < space) @@ -700,37 +700,24 @@ static const struct file_operations ep_debugfs_fops = { static int setup_debugfs(struct c4iw_dev *devp) { - struct dentry *de; - if (!devp->debugfs_root) return -1; - de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root, - (void *)devp, &qp_debugfs_fops); - if (de && de->d_inode) - de->d_inode->i_size = 4096; + debugfs_create_file_size("qps", S_IWUSR, devp->debugfs_root, + (void *)devp, &qp_debugfs_fops, 4096); - de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root, - (void *)devp, &stag_debugfs_fops); - if (de && de->d_inode) - de->d_inode->i_size = 4096; + debugfs_create_file_size("stags", S_IWUSR, devp->debugfs_root, + (void *)devp, &stag_debugfs_fops, 4096); - de = debugfs_create_file("stats", S_IWUSR, devp->debugfs_root, - (void *)devp, &stats_debugfs_fops); - if (de && de->d_inode) - de->d_inode->i_size = 4096; + debugfs_create_file_size("stats", S_IWUSR, devp->debugfs_root, + (void *)devp, &stats_debugfs_fops, 4096); - de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root, - (void *)devp, &ep_debugfs_fops); - if (de && de->d_inode) - de->d_inode->i_size = 4096; + debugfs_create_file_size("eps", S_IWUSR, devp->debugfs_root, + (void *)devp, &ep_debugfs_fops, 4096); - if (c4iw_wr_log) { - de = debugfs_create_file("wr_log", S_IWUSR, devp->debugfs_root, - (void *)devp, &wr_log_debugfs_fops); - if (de && de->d_inode) - de->d_inode->i_size = 4096; - } + if (c4iw_wr_log) + debugfs_create_file_size("wr_log", S_IWUSR, devp->debugfs_root, + (void *)devp, &wr_log_debugfs_fops, 4096); return 0; } diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index c9df0549f51d..bdfac2ccb704 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c @@ -50,12 +50,12 @@ static void print_tpte(struct c4iw_dev *dev, u32 stag) PDBG("stag idx 0x%x valid %d key 0x%x state %d pdid %d " "perm 0x%x ps %d len 0x%llx va 0x%llx\n", stag & 0xffffff00, - G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)), - G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)), - G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)), - G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)), - G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)), - G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)), + FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)), + FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)), + FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)), + FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)), + FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)), + FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)), ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo), ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo)); } @@ -225,13 +225,20 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) struct c4iw_cq *chp; unsigned long flag; + spin_lock_irqsave(&dev->lock, flag); chp = get_chp(dev, qid); if (chp) { + atomic_inc(&chp->refcnt); + spin_unlock_irqrestore(&dev->lock, flag); t4_clear_cq_armed(&chp->cq); spin_lock_irqsave(&chp->comp_handler_lock, flag); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); spin_unlock_irqrestore(&chp->comp_handler_lock, flag); - } else + if (atomic_dec_and_test(&chp->refcnt)) + wake_up(&chp->wait); + } else { PDBG("%s unknown cqid 0x%x\n", __func__, qid); + spin_unlock_irqrestore(&dev->lock, flag); + } return 0; } diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index b5678ac97393..d87e1650f643 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -196,7 +196,7 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev) return (int)(rdev->lldi.vr->stag.size >> 5); } -#define C4IW_WR_TO (30*HZ) +#define C4IW_WR_TO (60*HZ) struct c4iw_wr_wait { struct completion completion; @@ -220,22 +220,21 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, u32 hwtid, u32 qpid, const char *func) { - unsigned to = C4IW_WR_TO; int ret; - do { - ret = wait_for_completion_timeout(&wr_waitp->completion, to); - if (!ret) { - printk(KERN_ERR MOD "%s - Device %s not responding - " - "tid %u qpid %u\n", func, - pci_name(rdev->lldi.pdev), hwtid, qpid); - if (c4iw_fatal_error(rdev)) { - wr_waitp->ret = -EIO; - break; - } - to = to << 2; - } - } while (!ret); + if (c4iw_fatal_error(rdev)) { + wr_waitp->ret = -EIO; + goto out; + } + + ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO); + if (!ret) { + PDBG("%s - Device %s not responding (disabling device) - tid %u qpid %u\n", + func, pci_name(rdev->lldi.pdev), hwtid, qpid); + rdev->flags |= T4_FATAL_ERROR; + wr_waitp->ret = -EIO; + } +out: if (wr_waitp->ret) PDBG("%s: FW reply %d tid %u qpid %u\n", pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index cb43c2299ac0..6791fd16272c 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -86,14 +86,14 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L; req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16))); req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE)); - req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1)); + req->cmd |= cpu_to_be32(T5_ULP_MEMIO_ORDER_V(1)); req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5)); req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr)); sgl = (struct ulptx_sgl *)(req + 1); sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) | - ULPTX_NSGE(1)); + ULPTX_NSGE_V(1)); sgl->len0 = cpu_to_be32(len); sgl->addr0 = cpu_to_be64(data); @@ -286,17 +286,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, if (reset_tpt_entry) memset(&tpt, 0, sizeof(tpt)); else { - tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID | - V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) | - V_FW_RI_TPTE_STAGSTATE(stag_state) | - V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid)); - tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) | - (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) | - V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO : + tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F | + FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) | + FW_RI_TPTE_STAGSTATE_V(stag_state) | + FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid)); + tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) | + (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) | + FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO : FW_RI_VA_BASED_TO))| - V_FW_RI_TPTE_PS(page_size)); + FW_RI_TPTE_PS_V(page_size)); tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32( - V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3)); + FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3)); tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL)); tpt.va_hi = cpu_to_be32((u32)(to >> 32)); tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL)); diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index bb85d479e66e..15cae5a31018 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -272,7 +272,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, memset(res_wr, 0, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP_V(FW_RI_RES_WR) | - V_FW_RI_RES_WR_NRES(2) | + FW_RI_RES_WR_NRES_V(2) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->cookie = (unsigned long) &wr_wait; @@ -287,19 +287,19 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, rdev->hw_queue.t4_eq_status_entries; res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( - V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ - V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ - V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ - (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) | - V_FW_RI_RES_WR_IQID(scq->cqid)); + FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */ + FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */ + FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */ + (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) | + FW_RI_RES_WR_IQID_V(scq->cqid)); res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( - V_FW_RI_RES_WR_DCAEN(0) | - V_FW_RI_RES_WR_DCACPU(0) | - V_FW_RI_RES_WR_FBMIN(2) | - V_FW_RI_RES_WR_FBMAX(2) | - V_FW_RI_RES_WR_CIDXFTHRESHO(0) | - V_FW_RI_RES_WR_CIDXFTHRESH(0) | - V_FW_RI_RES_WR_EQSIZE(eqsize)); + FW_RI_RES_WR_DCAEN_V(0) | + FW_RI_RES_WR_DCACPU_V(0) | + FW_RI_RES_WR_FBMIN_V(2) | + FW_RI_RES_WR_FBMAX_V(2) | + FW_RI_RES_WR_CIDXFTHRESHO_V(0) | + FW_RI_RES_WR_CIDXFTHRESH_V(0) | + FW_RI_RES_WR_EQSIZE_V(eqsize)); res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); res++; @@ -312,18 +312,18 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + rdev->hw_queue.t4_eq_status_entries; res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( - V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ - V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ - V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ - V_FW_RI_RES_WR_IQID(rcq->cqid)); + FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */ + FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */ + FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */ + FW_RI_RES_WR_IQID_V(rcq->cqid)); res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( - V_FW_RI_RES_WR_DCAEN(0) | - V_FW_RI_RES_WR_DCACPU(0) | - V_FW_RI_RES_WR_FBMIN(2) | - V_FW_RI_RES_WR_FBMAX(2) | - V_FW_RI_RES_WR_CIDXFTHRESHO(0) | - V_FW_RI_RES_WR_CIDXFTHRESH(0) | - V_FW_RI_RES_WR_EQSIZE(eqsize)); + FW_RI_RES_WR_DCAEN_V(0) | + FW_RI_RES_WR_DCACPU_V(0) | + FW_RI_RES_WR_FBMIN_V(2) | + FW_RI_RES_WR_FBMAX_V(2) | + FW_RI_RES_WR_CIDXFTHRESHO_V(0) | + FW_RI_RES_WR_CIDXFTHRESH_V(0) | + FW_RI_RES_WR_EQSIZE_V(eqsize)); res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); @@ -444,19 +444,19 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, case IB_WR_SEND: if (wr->send_flags & IB_SEND_SOLICITED) wqe->send.sendop_pkd = cpu_to_be32( - V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE)); + FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE)); else wqe->send.sendop_pkd = cpu_to_be32( - V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND)); + FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND)); wqe->send.stag_inv = 0; break; case IB_WR_SEND_WITH_INV: if (wr->send_flags & IB_SEND_SOLICITED) wqe->send.sendop_pkd = cpu_to_be32( - V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV)); + FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV)); else wqe->send.sendop_pkd = cpu_to_be32( - V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV)); + FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV)); wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); break; @@ -1283,8 +1283,8 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) wqe->u.init.type = FW_RI_TYPE_INIT; wqe->u.init.mpareqbit_p2ptype = - V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) | - V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type); + FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) | + FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type); wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE; if (qhp->attr.mpa_attr.recv_marker_enabled) wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE; @@ -1776,7 +1776,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, if (mm5) { mm5->key = uresp.ma_sync_key; mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0) - + A_PCIE_MA_SYNC) & PAGE_MASK; + + PCIE_MA_SYNC_A) & PAGE_MASK; mm5->len = PAGE_SIZE; insert_mmap(ucontext, mm5); } diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index c04e5134b30c..871cdcac7be2 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -41,7 +41,7 @@ #define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ #define T4_STAG_UNSET 0xffffffff #define T4_FW_MAJ 0 -#define A_PCIE_MA_SYNC 0x30b4 +#define PCIE_MA_SYNC_A 0x30b4 struct t4_status_page { __be32 rsvd1; /* flit 0 - hw owns */ @@ -184,44 +184,44 @@ struct t4_cqe { /* macros for flit 0 of the cqe */ -#define S_CQE_QPID 12 -#define M_CQE_QPID 0xFFFFF -#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID) -#define V_CQE_QPID(x) ((x)<<S_CQE_QPID) - -#define S_CQE_SWCQE 11 -#define M_CQE_SWCQE 0x1 -#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE) -#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE) - -#define S_CQE_STATUS 5 -#define M_CQE_STATUS 0x1F -#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS) -#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS) - -#define S_CQE_TYPE 4 -#define M_CQE_TYPE 0x1 -#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE) -#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE) - -#define S_CQE_OPCODE 0 -#define M_CQE_OPCODE 0xF -#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE) -#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE) - -#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header))) -#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header))) -#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header))) +#define CQE_QPID_S 12 +#define CQE_QPID_M 0xFFFFF +#define CQE_QPID_G(x) ((((x) >> CQE_QPID_S)) & CQE_QPID_M) +#define CQE_QPID_V(x) ((x)<<CQE_QPID_S) + +#define CQE_SWCQE_S 11 +#define CQE_SWCQE_M 0x1 +#define CQE_SWCQE_G(x) ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M) +#define CQE_SWCQE_V(x) ((x)<<CQE_SWCQE_S) + +#define CQE_STATUS_S 5 +#define CQE_STATUS_M 0x1F +#define CQE_STATUS_G(x) ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M) +#define CQE_STATUS_V(x) ((x)<<CQE_STATUS_S) + +#define CQE_TYPE_S 4 +#define CQE_TYPE_M 0x1 +#define CQE_TYPE_G(x) ((((x) >> CQE_TYPE_S)) & CQE_TYPE_M) +#define CQE_TYPE_V(x) ((x)<<CQE_TYPE_S) + +#define CQE_OPCODE_S 0 +#define CQE_OPCODE_M 0xF +#define CQE_OPCODE_G(x) ((((x) >> CQE_OPCODE_S)) & CQE_OPCODE_M) +#define CQE_OPCODE_V(x) ((x)<<CQE_OPCODE_S) + +#define SW_CQE(x) (CQE_SWCQE_G(be32_to_cpu((x)->header))) +#define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header))) +#define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header))) #define SQ_TYPE(x) (CQE_TYPE((x))) #define RQ_TYPE(x) (!CQE_TYPE((x))) -#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header))) -#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header))) +#define CQE_STATUS(x) (CQE_STATUS_G(be32_to_cpu((x)->header))) +#define CQE_OPCODE(x) (CQE_OPCODE_G(be32_to_cpu((x)->header))) #define CQE_SEND_OPCODE(x)( \ - (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \ - (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \ - (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \ - (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV)) + (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND) || \ + (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \ + (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \ + (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV)) #define CQE_LEN(x) (be32_to_cpu((x)->len)) @@ -237,25 +237,25 @@ struct t4_cqe { #define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low)) /* macros for flit 3 of the cqe */ -#define S_CQE_GENBIT 63 -#define M_CQE_GENBIT 0x1 -#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT) -#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT) +#define CQE_GENBIT_S 63 +#define CQE_GENBIT_M 0x1 +#define CQE_GENBIT_G(x) (((x) >> CQE_GENBIT_S) & CQE_GENBIT_M) +#define CQE_GENBIT_V(x) ((x)<<CQE_GENBIT_S) -#define S_CQE_OVFBIT 62 -#define M_CQE_OVFBIT 0x1 -#define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT) +#define CQE_OVFBIT_S 62 +#define CQE_OVFBIT_M 0x1 +#define CQE_OVFBIT_G(x) ((((x) >> CQE_OVFBIT_S)) & CQE_OVFBIT_M) -#define S_CQE_IQTYPE 60 -#define M_CQE_IQTYPE 0x3 -#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE) +#define CQE_IQTYPE_S 60 +#define CQE_IQTYPE_M 0x3 +#define CQE_IQTYPE_G(x) ((((x) >> CQE_IQTYPE_S)) & CQE_IQTYPE_M) -#define M_CQE_TS 0x0fffffffffffffffULL -#define G_CQE_TS(x) ((x) & M_CQE_TS) +#define CQE_TS_M 0x0fffffffffffffffULL +#define CQE_TS_G(x) ((x) & CQE_TS_M) -#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts))) -#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts))) -#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts))) +#define CQE_OVFBIT(x) ((unsigned)CQE_OVFBIT_G(be64_to_cpu((x)->bits_type_ts))) +#define CQE_GENBIT(x) ((unsigned)CQE_GENBIT_G(be64_to_cpu((x)->bits_type_ts))) +#define CQE_TS(x) (CQE_TS_G(be64_to_cpu((x)->bits_type_ts))) struct t4_swsqe { u64 wr_id; @@ -465,14 +465,14 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5, } else { PDBG("%s: DB wq->sq.pidx = %d\n", __func__, wq->sq.pidx); - writel(PIDX_T5(inc), wq->sq.udb); + writel(PIDX_T5_V(inc), wq->sq.udb); } /* Flush user doorbell area writes. */ wmb(); return; } - writel(QID(wq->sq.qid) | PIDX(inc), wq->db); + writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db); } static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5, @@ -489,14 +489,14 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5, } else { PDBG("%s: DB wq->rq.pidx = %d\n", __func__, wq->rq.pidx); - writel(PIDX_T5(inc), wq->rq.udb); + writel(PIDX_T5_V(inc), wq->rq.udb); } /* Flush user doorbell area writes. */ wmb(); return; } - writel(QID(wq->rq.qid) | PIDX(inc), wq->db); + writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db); } static inline int t4_wq_in_error(struct t4_wq *wq) @@ -561,14 +561,14 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se) u32 val; set_bit(CQ_ARMED, &cq->flags); - while (cq->cidx_inc > CIDXINC_MASK) { - val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | - INGRESSQID(cq->cqid); + while (cq->cidx_inc > CIDXINC_M) { + val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) | + INGRESSQID_V(cq->cqid); writel(val, cq->gts); - cq->cidx_inc -= CIDXINC_MASK; + cq->cidx_inc -= CIDXINC_M; } - val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) | - INGRESSQID(cq->cqid); + val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) | + INGRESSQID_V(cq->cqid); writel(val, cq->gts); cq->cidx_inc = 0; return 0; @@ -597,11 +597,11 @@ static inline void t4_swcq_consume(struct t4_cq *cq) static inline void t4_hwcq_consume(struct t4_cq *cq) { cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; - if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_MASK) { + if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) { u32 val; - val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) | - INGRESSQID(cq->cqid); + val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) | + INGRESSQID_V(cq->cqid); writel(val, cq->gts); cq->cidx_inc = 0; } diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index 5709e77faf7c..5e53327fc647 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h @@ -162,102 +162,102 @@ struct fw_ri_tpte { __be32 len_hi; }; -#define S_FW_RI_TPTE_VALID 31 -#define M_FW_RI_TPTE_VALID 0x1 -#define V_FW_RI_TPTE_VALID(x) ((x) << S_FW_RI_TPTE_VALID) -#define G_FW_RI_TPTE_VALID(x) \ - (((x) >> S_FW_RI_TPTE_VALID) & M_FW_RI_TPTE_VALID) -#define F_FW_RI_TPTE_VALID V_FW_RI_TPTE_VALID(1U) - -#define S_FW_RI_TPTE_STAGKEY 23 -#define M_FW_RI_TPTE_STAGKEY 0xff -#define V_FW_RI_TPTE_STAGKEY(x) ((x) << S_FW_RI_TPTE_STAGKEY) -#define G_FW_RI_TPTE_STAGKEY(x) \ - (((x) >> S_FW_RI_TPTE_STAGKEY) & M_FW_RI_TPTE_STAGKEY) - -#define S_FW_RI_TPTE_STAGSTATE 22 -#define M_FW_RI_TPTE_STAGSTATE 0x1 -#define V_FW_RI_TPTE_STAGSTATE(x) ((x) << S_FW_RI_TPTE_STAGSTATE) -#define G_FW_RI_TPTE_STAGSTATE(x) \ - (((x) >> S_FW_RI_TPTE_STAGSTATE) & M_FW_RI_TPTE_STAGSTATE) -#define F_FW_RI_TPTE_STAGSTATE V_FW_RI_TPTE_STAGSTATE(1U) - -#define S_FW_RI_TPTE_STAGTYPE 20 -#define M_FW_RI_TPTE_STAGTYPE 0x3 -#define V_FW_RI_TPTE_STAGTYPE(x) ((x) << S_FW_RI_TPTE_STAGTYPE) -#define G_FW_RI_TPTE_STAGTYPE(x) \ - (((x) >> S_FW_RI_TPTE_STAGTYPE) & M_FW_RI_TPTE_STAGTYPE) - -#define S_FW_RI_TPTE_PDID 0 -#define M_FW_RI_TPTE_PDID 0xfffff -#define V_FW_RI_TPTE_PDID(x) ((x) << S_FW_RI_TPTE_PDID) -#define G_FW_RI_TPTE_PDID(x) \ - (((x) >> S_FW_RI_TPTE_PDID) & M_FW_RI_TPTE_PDID) - -#define S_FW_RI_TPTE_PERM 28 -#define M_FW_RI_TPTE_PERM 0xf -#define V_FW_RI_TPTE_PERM(x) ((x) << S_FW_RI_TPTE_PERM) -#define G_FW_RI_TPTE_PERM(x) \ - (((x) >> S_FW_RI_TPTE_PERM) & M_FW_RI_TPTE_PERM) - -#define S_FW_RI_TPTE_REMINVDIS 27 -#define M_FW_RI_TPTE_REMINVDIS 0x1 -#define V_FW_RI_TPTE_REMINVDIS(x) ((x) << S_FW_RI_TPTE_REMINVDIS) -#define G_FW_RI_TPTE_REMINVDIS(x) \ - (((x) >> S_FW_RI_TPTE_REMINVDIS) & M_FW_RI_TPTE_REMINVDIS) -#define F_FW_RI_TPTE_REMINVDIS V_FW_RI_TPTE_REMINVDIS(1U) - -#define S_FW_RI_TPTE_ADDRTYPE 26 -#define M_FW_RI_TPTE_ADDRTYPE 1 -#define V_FW_RI_TPTE_ADDRTYPE(x) ((x) << S_FW_RI_TPTE_ADDRTYPE) -#define G_FW_RI_TPTE_ADDRTYPE(x) \ - (((x) >> S_FW_RI_TPTE_ADDRTYPE) & M_FW_RI_TPTE_ADDRTYPE) -#define F_FW_RI_TPTE_ADDRTYPE V_FW_RI_TPTE_ADDRTYPE(1U) - -#define S_FW_RI_TPTE_MWBINDEN 25 -#define M_FW_RI_TPTE_MWBINDEN 0x1 -#define V_FW_RI_TPTE_MWBINDEN(x) ((x) << S_FW_RI_TPTE_MWBINDEN) -#define G_FW_RI_TPTE_MWBINDEN(x) \ - (((x) >> S_FW_RI_TPTE_MWBINDEN) & M_FW_RI_TPTE_MWBINDEN) -#define F_FW_RI_TPTE_MWBINDEN V_FW_RI_TPTE_MWBINDEN(1U) - -#define S_FW_RI_TPTE_PS 20 -#define M_FW_RI_TPTE_PS 0x1f -#define V_FW_RI_TPTE_PS(x) ((x) << S_FW_RI_TPTE_PS) -#define G_FW_RI_TPTE_PS(x) \ - (((x) >> S_FW_RI_TPTE_PS) & M_FW_RI_TPTE_PS) - -#define S_FW_RI_TPTE_QPID 0 -#define M_FW_RI_TPTE_QPID 0xfffff -#define V_FW_RI_TPTE_QPID(x) ((x) << S_FW_RI_TPTE_QPID) -#define G_FW_RI_TPTE_QPID(x) \ - (((x) >> S_FW_RI_TPTE_QPID) & M_FW_RI_TPTE_QPID) - -#define S_FW_RI_TPTE_NOSNOOP 30 -#define M_FW_RI_TPTE_NOSNOOP 0x1 -#define V_FW_RI_TPTE_NOSNOOP(x) ((x) << S_FW_RI_TPTE_NOSNOOP) -#define G_FW_RI_TPTE_NOSNOOP(x) \ - (((x) >> S_FW_RI_TPTE_NOSNOOP) & M_FW_RI_TPTE_NOSNOOP) -#define F_FW_RI_TPTE_NOSNOOP V_FW_RI_TPTE_NOSNOOP(1U) - -#define S_FW_RI_TPTE_PBLADDR 0 -#define M_FW_RI_TPTE_PBLADDR 0x1fffffff -#define V_FW_RI_TPTE_PBLADDR(x) ((x) << S_FW_RI_TPTE_PBLADDR) -#define G_FW_RI_TPTE_PBLADDR(x) \ - (((x) >> S_FW_RI_TPTE_PBLADDR) & M_FW_RI_TPTE_PBLADDR) - -#define S_FW_RI_TPTE_DCA 24 -#define M_FW_RI_TPTE_DCA 0x1f -#define V_FW_RI_TPTE_DCA(x) ((x) << S_FW_RI_TPTE_DCA) -#define G_FW_RI_TPTE_DCA(x) \ - (((x) >> S_FW_RI_TPTE_DCA) & M_FW_RI_TPTE_DCA) - -#define S_FW_RI_TPTE_MWBCNT_PSTAG 0 -#define M_FW_RI_TPTE_MWBCNT_PSTAG 0xffffff -#define V_FW_RI_TPTE_MWBCNT_PSTAT(x) \ - ((x) << S_FW_RI_TPTE_MWBCNT_PSTAG) -#define G_FW_RI_TPTE_MWBCNT_PSTAG(x) \ - (((x) >> S_FW_RI_TPTE_MWBCNT_PSTAG) & M_FW_RI_TPTE_MWBCNT_PSTAG) +#define FW_RI_TPTE_VALID_S 31 +#define FW_RI_TPTE_VALID_M 0x1 +#define FW_RI_TPTE_VALID_V(x) ((x) << FW_RI_TPTE_VALID_S) +#define FW_RI_TPTE_VALID_G(x) \ + (((x) >> FW_RI_TPTE_VALID_S) & FW_RI_TPTE_VALID_M) +#define FW_RI_TPTE_VALID_F FW_RI_TPTE_VALID_V(1U) + +#define FW_RI_TPTE_STAGKEY_S 23 +#define FW_RI_TPTE_STAGKEY_M 0xff +#define FW_RI_TPTE_STAGKEY_V(x) ((x) << FW_RI_TPTE_STAGKEY_S) +#define FW_RI_TPTE_STAGKEY_G(x) \ + (((x) >> FW_RI_TPTE_STAGKEY_S) & FW_RI_TPTE_STAGKEY_M) + +#define FW_RI_TPTE_STAGSTATE_S 22 +#define FW_RI_TPTE_STAGSTATE_M 0x1 +#define FW_RI_TPTE_STAGSTATE_V(x) ((x) << FW_RI_TPTE_STAGSTATE_S) +#define FW_RI_TPTE_STAGSTATE_G(x) \ + (((x) >> FW_RI_TPTE_STAGSTATE_S) & FW_RI_TPTE_STAGSTATE_M) +#define FW_RI_TPTE_STAGSTATE_F FW_RI_TPTE_STAGSTATE_V(1U) + +#define FW_RI_TPTE_STAGTYPE_S 20 +#define FW_RI_TPTE_STAGTYPE_M 0x3 +#define FW_RI_TPTE_STAGTYPE_V(x) ((x) << FW_RI_TPTE_STAGTYPE_S) +#define FW_RI_TPTE_STAGTYPE_G(x) \ + (((x) >> FW_RI_TPTE_STAGTYPE_S) & FW_RI_TPTE_STAGTYPE_M) + +#define FW_RI_TPTE_PDID_S 0 +#define FW_RI_TPTE_PDID_M 0xfffff +#define FW_RI_TPTE_PDID_V(x) ((x) << FW_RI_TPTE_PDID_S) +#define FW_RI_TPTE_PDID_G(x) \ + (((x) >> FW_RI_TPTE_PDID_S) & FW_RI_TPTE_PDID_M) + +#define FW_RI_TPTE_PERM_S 28 +#define FW_RI_TPTE_PERM_M 0xf +#define FW_RI_TPTE_PERM_V(x) ((x) << FW_RI_TPTE_PERM_S) +#define FW_RI_TPTE_PERM_G(x) \ + (((x) >> FW_RI_TPTE_PERM_S) & FW_RI_TPTE_PERM_M) + +#define FW_RI_TPTE_REMINVDIS_S 27 +#define FW_RI_TPTE_REMINVDIS_M 0x1 +#define FW_RI_TPTE_REMINVDIS_V(x) ((x) << FW_RI_TPTE_REMINVDIS_S) +#define FW_RI_TPTE_REMINVDIS_G(x) \ + (((x) >> FW_RI_TPTE_REMINVDIS_S) & FW_RI_TPTE_REMINVDIS_M) +#define FW_RI_TPTE_REMINVDIS_F FW_RI_TPTE_REMINVDIS_V(1U) + +#define FW_RI_TPTE_ADDRTYPE_S 26 +#define FW_RI_TPTE_ADDRTYPE_M 1 +#define FW_RI_TPTE_ADDRTYPE_V(x) ((x) << FW_RI_TPTE_ADDRTYPE_S) +#define FW_RI_TPTE_ADDRTYPE_G(x) \ + (((x) >> FW_RI_TPTE_ADDRTYPE_S) & FW_RI_TPTE_ADDRTYPE_M) +#define FW_RI_TPTE_ADDRTYPE_F FW_RI_TPTE_ADDRTYPE_V(1U) + +#define FW_RI_TPTE_MWBINDEN_S 25 +#define FW_RI_TPTE_MWBINDEN_M 0x1 +#define FW_RI_TPTE_MWBINDEN_V(x) ((x) << FW_RI_TPTE_MWBINDEN_S) +#define FW_RI_TPTE_MWBINDEN_G(x) \ + (((x) >> FW_RI_TPTE_MWBINDEN_S) & FW_RI_TPTE_MWBINDEN_M) +#define FW_RI_TPTE_MWBINDEN_F FW_RI_TPTE_MWBINDEN_V(1U) + +#define FW_RI_TPTE_PS_S 20 +#define FW_RI_TPTE_PS_M 0x1f +#define FW_RI_TPTE_PS_V(x) ((x) << FW_RI_TPTE_PS_S) +#define FW_RI_TPTE_PS_G(x) \ + (((x) >> FW_RI_TPTE_PS_S) & FW_RI_TPTE_PS_M) + +#define FW_RI_TPTE_QPID_S 0 +#define FW_RI_TPTE_QPID_M 0xfffff +#define FW_RI_TPTE_QPID_V(x) ((x) << FW_RI_TPTE_QPID_S) +#define FW_RI_TPTE_QPID_G(x) \ + (((x) >> FW_RI_TPTE_QPID_S) & FW_RI_TPTE_QPID_M) + +#define FW_RI_TPTE_NOSNOOP_S 30 +#define FW_RI_TPTE_NOSNOOP_M 0x1 +#define FW_RI_TPTE_NOSNOOP_V(x) ((x) << FW_RI_TPTE_NOSNOOP_S) +#define FW_RI_TPTE_NOSNOOP_G(x) \ + (((x) >> FW_RI_TPTE_NOSNOOP_S) & FW_RI_TPTE_NOSNOOP_M) +#define FW_RI_TPTE_NOSNOOP_F FW_RI_TPTE_NOSNOOP_V(1U) + +#define FW_RI_TPTE_PBLADDR_S 0 +#define FW_RI_TPTE_PBLADDR_M 0x1fffffff +#define FW_RI_TPTE_PBLADDR_V(x) ((x) << FW_RI_TPTE_PBLADDR_S) +#define FW_RI_TPTE_PBLADDR_G(x) \ + (((x) >> FW_RI_TPTE_PBLADDR_S) & FW_RI_TPTE_PBLADDR_M) + +#define FW_RI_TPTE_DCA_S 24 +#define FW_RI_TPTE_DCA_M 0x1f +#define FW_RI_TPTE_DCA_V(x) ((x) << FW_RI_TPTE_DCA_S) +#define FW_RI_TPTE_DCA_G(x) \ + (((x) >> FW_RI_TPTE_DCA_S) & FW_RI_TPTE_DCA_M) + +#define FW_RI_TPTE_MWBCNT_PSTAG_S 0 +#define FW_RI_TPTE_MWBCNT_PSTAG_M 0xffffff +#define FW_RI_TPTE_MWBCNT_PSTAT_V(x) \ + ((x) << FW_RI_TPTE_MWBCNT_PSTAG_S) +#define FW_RI_TPTE_MWBCNT_PSTAG_G(x) \ + (((x) >> FW_RI_TPTE_MWBCNT_PSTAG_S) & FW_RI_TPTE_MWBCNT_PSTAG_M) enum fw_ri_res_type { FW_RI_RES_TYPE_SQ, @@ -308,222 +308,222 @@ struct fw_ri_res_wr { #endif }; -#define S_FW_RI_RES_WR_NRES 0 -#define M_FW_RI_RES_WR_NRES 0xff -#define V_FW_RI_RES_WR_NRES(x) ((x) << S_FW_RI_RES_WR_NRES) -#define G_FW_RI_RES_WR_NRES(x) \ - (((x) >> S_FW_RI_RES_WR_NRES) & M_FW_RI_RES_WR_NRES) - -#define S_FW_RI_RES_WR_FETCHSZM 26 -#define M_FW_RI_RES_WR_FETCHSZM 0x1 -#define V_FW_RI_RES_WR_FETCHSZM(x) ((x) << S_FW_RI_RES_WR_FETCHSZM) -#define G_FW_RI_RES_WR_FETCHSZM(x) \ - (((x) >> S_FW_RI_RES_WR_FETCHSZM) & M_FW_RI_RES_WR_FETCHSZM) -#define F_FW_RI_RES_WR_FETCHSZM V_FW_RI_RES_WR_FETCHSZM(1U) - -#define S_FW_RI_RES_WR_STATUSPGNS 25 -#define M_FW_RI_RES_WR_STATUSPGNS 0x1 -#define V_FW_RI_RES_WR_STATUSPGNS(x) ((x) << S_FW_RI_RES_WR_STATUSPGNS) -#define G_FW_RI_RES_WR_STATUSPGNS(x) \ - (((x) >> S_FW_RI_RES_WR_STATUSPGNS) & M_FW_RI_RES_WR_STATUSPGNS) -#define F_FW_RI_RES_WR_STATUSPGNS V_FW_RI_RES_WR_STATUSPGNS(1U) - -#define S_FW_RI_RES_WR_STATUSPGRO 24 -#define M_FW_RI_RES_WR_STATUSPGRO 0x1 -#define V_FW_RI_RES_WR_STATUSPGRO(x) ((x) << S_FW_RI_RES_WR_STATUSPGRO) -#define G_FW_RI_RES_WR_STATUSPGRO(x) \ - (((x) >> S_FW_RI_RES_WR_STATUSPGRO) & M_FW_RI_RES_WR_STATUSPGRO) -#define F_FW_RI_RES_WR_STATUSPGRO V_FW_RI_RES_WR_STATUSPGRO(1U) - -#define S_FW_RI_RES_WR_FETCHNS 23 -#define M_FW_RI_RES_WR_FETCHNS 0x1 -#define V_FW_RI_RES_WR_FETCHNS(x) ((x) << S_FW_RI_RES_WR_FETCHNS) -#define G_FW_RI_RES_WR_FETCHNS(x) \ - (((x) >> S_FW_RI_RES_WR_FETCHNS) & M_FW_RI_RES_WR_FETCHNS) -#define F_FW_RI_RES_WR_FETCHNS V_FW_RI_RES_WR_FETCHNS(1U) - -#define S_FW_RI_RES_WR_FETCHRO 22 -#define M_FW_RI_RES_WR_FETCHRO 0x1 -#define V_FW_RI_RES_WR_FETCHRO(x) ((x) << S_FW_RI_RES_WR_FETCHRO) -#define G_FW_RI_RES_WR_FETCHRO(x) \ - (((x) >> S_FW_RI_RES_WR_FETCHRO) & M_FW_RI_RES_WR_FETCHRO) -#define F_FW_RI_RES_WR_FETCHRO V_FW_RI_RES_WR_FETCHRO(1U) - -#define S_FW_RI_RES_WR_HOSTFCMODE 20 -#define M_FW_RI_RES_WR_HOSTFCMODE 0x3 -#define V_FW_RI_RES_WR_HOSTFCMODE(x) ((x) << S_FW_RI_RES_WR_HOSTFCMODE) -#define G_FW_RI_RES_WR_HOSTFCMODE(x) \ - (((x) >> S_FW_RI_RES_WR_HOSTFCMODE) & M_FW_RI_RES_WR_HOSTFCMODE) - -#define S_FW_RI_RES_WR_CPRIO 19 -#define M_FW_RI_RES_WR_CPRIO 0x1 -#define V_FW_RI_RES_WR_CPRIO(x) ((x) << S_FW_RI_RES_WR_CPRIO) -#define G_FW_RI_RES_WR_CPRIO(x) \ - (((x) >> S_FW_RI_RES_WR_CPRIO) & M_FW_RI_RES_WR_CPRIO) -#define F_FW_RI_RES_WR_CPRIO V_FW_RI_RES_WR_CPRIO(1U) - -#define S_FW_RI_RES_WR_ONCHIP 18 -#define M_FW_RI_RES_WR_ONCHIP 0x1 -#define V_FW_RI_RES_WR_ONCHIP(x) ((x) << S_FW_RI_RES_WR_ONCHIP) -#define G_FW_RI_RES_WR_ONCHIP(x) \ - (((x) >> S_FW_RI_RES_WR_ONCHIP) & M_FW_RI_RES_WR_ONCHIP) -#define F_FW_RI_RES_WR_ONCHIP V_FW_RI_RES_WR_ONCHIP(1U) - -#define S_FW_RI_RES_WR_PCIECHN 16 -#define M_FW_RI_RES_WR_PCIECHN 0x3 -#define V_FW_RI_RES_WR_PCIECHN(x) ((x) << S_FW_RI_RES_WR_PCIECHN) -#define G_FW_RI_RES_WR_PCIECHN(x) \ - (((x) >> S_FW_RI_RES_WR_PCIECHN) & M_FW_RI_RES_WR_PCIECHN) - -#define S_FW_RI_RES_WR_IQID 0 -#define M_FW_RI_RES_WR_IQID 0xffff -#define V_FW_RI_RES_WR_IQID(x) ((x) << S_FW_RI_RES_WR_IQID) -#define G_FW_RI_RES_WR_IQID(x) \ - (((x) >> S_FW_RI_RES_WR_IQID) & M_FW_RI_RES_WR_IQID) - -#define S_FW_RI_RES_WR_DCAEN 31 -#define M_FW_RI_RES_WR_DCAEN 0x1 -#define V_FW_RI_RES_WR_DCAEN(x) ((x) << S_FW_RI_RES_WR_DCAEN) -#define G_FW_RI_RES_WR_DCAEN(x) \ - (((x) >> S_FW_RI_RES_WR_DCAEN) & M_FW_RI_RES_WR_DCAEN) -#define F_FW_RI_RES_WR_DCAEN V_FW_RI_RES_WR_DCAEN(1U) - -#define S_FW_RI_RES_WR_DCACPU 26 -#define M_FW_RI_RES_WR_DCACPU 0x1f -#define V_FW_RI_RES_WR_DCACPU(x) ((x) << S_FW_RI_RES_WR_DCACPU) -#define G_FW_RI_RES_WR_DCACPU(x) \ - (((x) >> S_FW_RI_RES_WR_DCACPU) & M_FW_RI_RES_WR_DCACPU) - -#define S_FW_RI_RES_WR_FBMIN 23 -#define M_FW_RI_RES_WR_FBMIN 0x7 -#define V_FW_RI_RES_WR_FBMIN(x) ((x) << S_FW_RI_RES_WR_FBMIN) -#define G_FW_RI_RES_WR_FBMIN(x) \ - (((x) >> S_FW_RI_RES_WR_FBMIN) & M_FW_RI_RES_WR_FBMIN) - -#define S_FW_RI_RES_WR_FBMAX 20 -#define M_FW_RI_RES_WR_FBMAX 0x7 -#define V_FW_RI_RES_WR_FBMAX(x) ((x) << S_FW_RI_RES_WR_FBMAX) -#define G_FW_RI_RES_WR_FBMAX(x) \ - (((x) >> S_FW_RI_RES_WR_FBMAX) & M_FW_RI_RES_WR_FBMAX) - -#define S_FW_RI_RES_WR_CIDXFTHRESHO 19 -#define M_FW_RI_RES_WR_CIDXFTHRESHO 0x1 -#define V_FW_RI_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESHO) -#define G_FW_RI_RES_WR_CIDXFTHRESHO(x) \ - (((x) >> S_FW_RI_RES_WR_CIDXFTHRESHO) & M_FW_RI_RES_WR_CIDXFTHRESHO) -#define F_FW_RI_RES_WR_CIDXFTHRESHO V_FW_RI_RES_WR_CIDXFTHRESHO(1U) - -#define S_FW_RI_RES_WR_CIDXFTHRESH 16 -#define M_FW_RI_RES_WR_CIDXFTHRESH 0x7 -#define V_FW_RI_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESH) -#define G_FW_RI_RES_WR_CIDXFTHRESH(x) \ - (((x) >> S_FW_RI_RES_WR_CIDXFTHRESH) & M_FW_RI_RES_WR_CIDXFTHRESH) - -#define S_FW_RI_RES_WR_EQSIZE 0 -#define M_FW_RI_RES_WR_EQSIZE 0xffff -#define V_FW_RI_RES_WR_EQSIZE(x) ((x) << S_FW_RI_RES_WR_EQSIZE) -#define G_FW_RI_RES_WR_EQSIZE(x) \ - (((x) >> S_FW_RI_RES_WR_EQSIZE) & M_FW_RI_RES_WR_EQSIZE) - -#define S_FW_RI_RES_WR_IQANDST 15 -#define M_FW_RI_RES_WR_IQANDST 0x1 -#define V_FW_RI_RES_WR_IQANDST(x) ((x) << S_FW_RI_RES_WR_IQANDST) -#define G_FW_RI_RES_WR_IQANDST(x) \ - (((x) >> S_FW_RI_RES_WR_IQANDST) & M_FW_RI_RES_WR_IQANDST) -#define F_FW_RI_RES_WR_IQANDST V_FW_RI_RES_WR_IQANDST(1U) - -#define S_FW_RI_RES_WR_IQANUS 14 -#define M_FW_RI_RES_WR_IQANUS 0x1 -#define V_FW_RI_RES_WR_IQANUS(x) ((x) << S_FW_RI_RES_WR_IQANUS) -#define G_FW_RI_RES_WR_IQANUS(x) \ - (((x) >> S_FW_RI_RES_WR_IQANUS) & M_FW_RI_RES_WR_IQANUS) -#define F_FW_RI_RES_WR_IQANUS V_FW_RI_RES_WR_IQANUS(1U) - -#define S_FW_RI_RES_WR_IQANUD 12 -#define M_FW_RI_RES_WR_IQANUD 0x3 -#define V_FW_RI_RES_WR_IQANUD(x) ((x) << S_FW_RI_RES_WR_IQANUD) -#define G_FW_RI_RES_WR_IQANUD(x) \ - (((x) >> S_FW_RI_RES_WR_IQANUD) & M_FW_RI_RES_WR_IQANUD) - -#define S_FW_RI_RES_WR_IQANDSTINDEX 0 -#define M_FW_RI_RES_WR_IQANDSTINDEX 0xfff -#define V_FW_RI_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_RI_RES_WR_IQANDSTINDEX) -#define G_FW_RI_RES_WR_IQANDSTINDEX(x) \ - (((x) >> S_FW_RI_RES_WR_IQANDSTINDEX) & M_FW_RI_RES_WR_IQANDSTINDEX) - -#define S_FW_RI_RES_WR_IQDROPRSS 15 -#define M_FW_RI_RES_WR_IQDROPRSS 0x1 -#define V_FW_RI_RES_WR_IQDROPRSS(x) ((x) << S_FW_RI_RES_WR_IQDROPRSS) -#define G_FW_RI_RES_WR_IQDROPRSS(x) \ - (((x) >> S_FW_RI_RES_WR_IQDROPRSS) & M_FW_RI_RES_WR_IQDROPRSS) -#define F_FW_RI_RES_WR_IQDROPRSS V_FW_RI_RES_WR_IQDROPRSS(1U) - -#define S_FW_RI_RES_WR_IQGTSMODE 14 -#define M_FW_RI_RES_WR_IQGTSMODE 0x1 -#define V_FW_RI_RES_WR_IQGTSMODE(x) ((x) << S_FW_RI_RES_WR_IQGTSMODE) -#define G_FW_RI_RES_WR_IQGTSMODE(x) \ - (((x) >> S_FW_RI_RES_WR_IQGTSMODE) & M_FW_RI_RES_WR_IQGTSMODE) -#define F_FW_RI_RES_WR_IQGTSMODE V_FW_RI_RES_WR_IQGTSMODE(1U) - -#define S_FW_RI_RES_WR_IQPCIECH 12 -#define M_FW_RI_RES_WR_IQPCIECH 0x3 -#define V_FW_RI_RES_WR_IQPCIECH(x) ((x) << S_FW_RI_RES_WR_IQPCIECH) -#define G_FW_RI_RES_WR_IQPCIECH(x) \ - (((x) >> S_FW_RI_RES_WR_IQPCIECH) & M_FW_RI_RES_WR_IQPCIECH) - -#define S_FW_RI_RES_WR_IQDCAEN 11 -#define M_FW_RI_RES_WR_IQDCAEN 0x1 -#define V_FW_RI_RES_WR_IQDCAEN(x) ((x) << S_FW_RI_RES_WR_IQDCAEN) -#define G_FW_RI_RES_WR_IQDCAEN(x) \ - (((x) >> S_FW_RI_RES_WR_IQDCAEN) & M_FW_RI_RES_WR_IQDCAEN) -#define F_FW_RI_RES_WR_IQDCAEN V_FW_RI_RES_WR_IQDCAEN(1U) - -#define S_FW_RI_RES_WR_IQDCACPU 6 -#define M_FW_RI_RES_WR_IQDCACPU 0x1f -#define V_FW_RI_RES_WR_IQDCACPU(x) ((x) << S_FW_RI_RES_WR_IQDCACPU) -#define G_FW_RI_RES_WR_IQDCACPU(x) \ - (((x) >> S_FW_RI_RES_WR_IQDCACPU) & M_FW_RI_RES_WR_IQDCACPU) - -#define S_FW_RI_RES_WR_IQINTCNTTHRESH 4 -#define M_FW_RI_RES_WR_IQINTCNTTHRESH 0x3 -#define V_FW_RI_RES_WR_IQINTCNTTHRESH(x) \ - ((x) << S_FW_RI_RES_WR_IQINTCNTTHRESH) -#define G_FW_RI_RES_WR_IQINTCNTTHRESH(x) \ - (((x) >> S_FW_RI_RES_WR_IQINTCNTTHRESH) & M_FW_RI_RES_WR_IQINTCNTTHRESH) - -#define S_FW_RI_RES_WR_IQO 3 -#define M_FW_RI_RES_WR_IQO 0x1 -#define V_FW_RI_RES_WR_IQO(x) ((x) << S_FW_RI_RES_WR_IQO) -#define G_FW_RI_RES_WR_IQO(x) \ - (((x) >> S_FW_RI_RES_WR_IQO) & M_FW_RI_RES_WR_IQO) -#define F_FW_RI_RES_WR_IQO V_FW_RI_RES_WR_IQO(1U) - -#define S_FW_RI_RES_WR_IQCPRIO 2 -#define M_FW_RI_RES_WR_IQCPRIO 0x1 -#define V_FW_RI_RES_WR_IQCPRIO(x) ((x) << S_FW_RI_RES_WR_IQCPRIO) -#define G_FW_RI_RES_WR_IQCPRIO(x) \ - (((x) >> S_FW_RI_RES_WR_IQCPRIO) & M_FW_RI_RES_WR_IQCPRIO) -#define F_FW_RI_RES_WR_IQCPRIO V_FW_RI_RES_WR_IQCPRIO(1U) - -#define S_FW_RI_RES_WR_IQESIZE 0 -#define M_FW_RI_RES_WR_IQESIZE 0x3 -#define V_FW_RI_RES_WR_IQESIZE(x) ((x) << S_FW_RI_RES_WR_IQESIZE) -#define G_FW_RI_RES_WR_IQESIZE(x) \ - (((x) >> S_FW_RI_RES_WR_IQESIZE) & M_FW_RI_RES_WR_IQESIZE) - -#define S_FW_RI_RES_WR_IQNS 31 -#define M_FW_RI_RES_WR_IQNS 0x1 -#define V_FW_RI_RES_WR_IQNS(x) ((x) << S_FW_RI_RES_WR_IQNS) -#define G_FW_RI_RES_WR_IQNS(x) \ - (((x) >> S_FW_RI_RES_WR_IQNS) & M_FW_RI_RES_WR_IQNS) -#define F_FW_RI_RES_WR_IQNS V_FW_RI_RES_WR_IQNS(1U) - -#define S_FW_RI_RES_WR_IQRO 30 -#define M_FW_RI_RES_WR_IQRO 0x1 -#define V_FW_RI_RES_WR_IQRO(x) ((x) << S_FW_RI_RES_WR_IQRO) -#define G_FW_RI_RES_WR_IQRO(x) \ - (((x) >> S_FW_RI_RES_WR_IQRO) & M_FW_RI_RES_WR_IQRO) -#define F_FW_RI_RES_WR_IQRO V_FW_RI_RES_WR_IQRO(1U) +#define FW_RI_RES_WR_NRES_S 0 +#define FW_RI_RES_WR_NRES_M 0xff +#define FW_RI_RES_WR_NRES_V(x) ((x) << FW_RI_RES_WR_NRES_S) +#define FW_RI_RES_WR_NRES_G(x) \ + (((x) >> FW_RI_RES_WR_NRES_S) & FW_RI_RES_WR_NRES_M) + +#define FW_RI_RES_WR_FETCHSZM_S 26 +#define FW_RI_RES_WR_FETCHSZM_M 0x1 +#define FW_RI_RES_WR_FETCHSZM_V(x) ((x) << FW_RI_RES_WR_FETCHSZM_S) +#define FW_RI_RES_WR_FETCHSZM_G(x) \ + (((x) >> FW_RI_RES_WR_FETCHSZM_S) & FW_RI_RES_WR_FETCHSZM_M) +#define FW_RI_RES_WR_FETCHSZM_F FW_RI_RES_WR_FETCHSZM_V(1U) + +#define FW_RI_RES_WR_STATUSPGNS_S 25 +#define FW_RI_RES_WR_STATUSPGNS_M 0x1 +#define FW_RI_RES_WR_STATUSPGNS_V(x) ((x) << FW_RI_RES_WR_STATUSPGNS_S) +#define FW_RI_RES_WR_STATUSPGNS_G(x) \ + (((x) >> FW_RI_RES_WR_STATUSPGNS_S) & FW_RI_RES_WR_STATUSPGNS_M) +#define FW_RI_RES_WR_STATUSPGNS_F FW_RI_RES_WR_STATUSPGNS_V(1U) + +#define FW_RI_RES_WR_STATUSPGRO_S 24 +#define FW_RI_RES_WR_STATUSPGRO_M 0x1 +#define FW_RI_RES_WR_STATUSPGRO_V(x) ((x) << FW_RI_RES_WR_STATUSPGRO_S) +#define FW_RI_RES_WR_STATUSPGRO_G(x) \ + (((x) >> FW_RI_RES_WR_STATUSPGRO_S) & FW_RI_RES_WR_STATUSPGRO_M) +#define FW_RI_RES_WR_STATUSPGRO_F FW_RI_RES_WR_STATUSPGRO_V(1U) + +#define FW_RI_RES_WR_FETCHNS_S 23 +#define FW_RI_RES_WR_FETCHNS_M 0x1 +#define FW_RI_RES_WR_FETCHNS_V(x) ((x) << FW_RI_RES_WR_FETCHNS_S) +#define FW_RI_RES_WR_FETCHNS_G(x) \ + (((x) >> FW_RI_RES_WR_FETCHNS_S) & FW_RI_RES_WR_FETCHNS_M) +#define FW_RI_RES_WR_FETCHNS_F FW_RI_RES_WR_FETCHNS_V(1U) + +#define FW_RI_RES_WR_FETCHRO_S 22 +#define FW_RI_RES_WR_FETCHRO_M 0x1 +#define FW_RI_RES_WR_FETCHRO_V(x) ((x) << FW_RI_RES_WR_FETCHRO_S) +#define FW_RI_RES_WR_FETCHRO_G(x) \ + (((x) >> FW_RI_RES_WR_FETCHRO_S) & FW_RI_RES_WR_FETCHRO_M) +#define FW_RI_RES_WR_FETCHRO_F FW_RI_RES_WR_FETCHRO_V(1U) + +#define FW_RI_RES_WR_HOSTFCMODE_S 20 +#define FW_RI_RES_WR_HOSTFCMODE_M 0x3 +#define FW_RI_RES_WR_HOSTFCMODE_V(x) ((x) << FW_RI_RES_WR_HOSTFCMODE_S) +#define FW_RI_RES_WR_HOSTFCMODE_G(x) \ + (((x) >> FW_RI_RES_WR_HOSTFCMODE_S) & FW_RI_RES_WR_HOSTFCMODE_M) + +#define FW_RI_RES_WR_CPRIO_S 19 +#define FW_RI_RES_WR_CPRIO_M 0x1 +#define FW_RI_RES_WR_CPRIO_V(x) ((x) << FW_RI_RES_WR_CPRIO_S) +#define FW_RI_RES_WR_CPRIO_G(x) \ + (((x) >> FW_RI_RES_WR_CPRIO_S) & FW_RI_RES_WR_CPRIO_M) +#define FW_RI_RES_WR_CPRIO_F FW_RI_RES_WR_CPRIO_V(1U) + +#define FW_RI_RES_WR_ONCHIP_S 18 +#define FW_RI_RES_WR_ONCHIP_M 0x1 +#define FW_RI_RES_WR_ONCHIP_V(x) ((x) << FW_RI_RES_WR_ONCHIP_S) +#define FW_RI_RES_WR_ONCHIP_G(x) \ + (((x) >> FW_RI_RES_WR_ONCHIP_S) & FW_RI_RES_WR_ONCHIP_M) +#define FW_RI_RES_WR_ONCHIP_F FW_RI_RES_WR_ONCHIP_V(1U) + +#define FW_RI_RES_WR_PCIECHN_S 16 +#define FW_RI_RES_WR_PCIECHN_M 0x3 +#define FW_RI_RES_WR_PCIECHN_V(x) ((x) << FW_RI_RES_WR_PCIECHN_S) +#define FW_RI_RES_WR_PCIECHN_G(x) \ + (((x) >> FW_RI_RES_WR_PCIECHN_S) & FW_RI_RES_WR_PCIECHN_M) + +#define FW_RI_RES_WR_IQID_S 0 +#define FW_RI_RES_WR_IQID_M 0xffff +#define FW_RI_RES_WR_IQID_V(x) ((x) << FW_RI_RES_WR_IQID_S) +#define FW_RI_RES_WR_IQID_G(x) \ + (((x) >> FW_RI_RES_WR_IQID_S) & FW_RI_RES_WR_IQID_M) + +#define FW_RI_RES_WR_DCAEN_S 31 +#define FW_RI_RES_WR_DCAEN_M 0x1 +#define FW_RI_RES_WR_DCAEN_V(x) ((x) << FW_RI_RES_WR_DCAEN_S) +#define FW_RI_RES_WR_DCAEN_G(x) \ + (((x) >> FW_RI_RES_WR_DCAEN_S) & FW_RI_RES_WR_DCAEN_M) +#define FW_RI_RES_WR_DCAEN_F FW_RI_RES_WR_DCAEN_V(1U) + +#define FW_RI_RES_WR_DCACPU_S 26 +#define FW_RI_RES_WR_DCACPU_M 0x1f +#define FW_RI_RES_WR_DCACPU_V(x) ((x) << FW_RI_RES_WR_DCACPU_S) +#define FW_RI_RES_WR_DCACPU_G(x) \ + (((x) >> FW_RI_RES_WR_DCACPU_S) & FW_RI_RES_WR_DCACPU_M) + +#define FW_RI_RES_WR_FBMIN_S 23 +#define FW_RI_RES_WR_FBMIN_M 0x7 +#define FW_RI_RES_WR_FBMIN_V(x) ((x) << FW_RI_RES_WR_FBMIN_S) +#define FW_RI_RES_WR_FBMIN_G(x) \ + (((x) >> FW_RI_RES_WR_FBMIN_S) & FW_RI_RES_WR_FBMIN_M) + +#define FW_RI_RES_WR_FBMAX_S 20 +#define FW_RI_RES_WR_FBMAX_M 0x7 +#define FW_RI_RES_WR_FBMAX_V(x) ((x) << FW_RI_RES_WR_FBMAX_S) +#define FW_RI_RES_WR_FBMAX_G(x) \ + (((x) >> FW_RI_RES_WR_FBMAX_S) & FW_RI_RES_WR_FBMAX_M) + +#define FW_RI_RES_WR_CIDXFTHRESHO_S 19 +#define FW_RI_RES_WR_CIDXFTHRESHO_M 0x1 +#define FW_RI_RES_WR_CIDXFTHRESHO_V(x) ((x) << FW_RI_RES_WR_CIDXFTHRESHO_S) +#define FW_RI_RES_WR_CIDXFTHRESHO_G(x) \ + (((x) >> FW_RI_RES_WR_CIDXFTHRESHO_S) & FW_RI_RES_WR_CIDXFTHRESHO_M) +#define FW_RI_RES_WR_CIDXFTHRESHO_F FW_RI_RES_WR_CIDXFTHRESHO_V(1U) + +#define FW_RI_RES_WR_CIDXFTHRESH_S 16 +#define FW_RI_RES_WR_CIDXFTHRESH_M 0x7 +#define FW_RI_RES_WR_CIDXFTHRESH_V(x) ((x) << FW_RI_RES_WR_CIDXFTHRESH_S) +#define FW_RI_RES_WR_CIDXFTHRESH_G(x) \ + (((x) >> FW_RI_RES_WR_CIDXFTHRESH_S) & FW_RI_RES_WR_CIDXFTHRESH_M) + +#define FW_RI_RES_WR_EQSIZE_S 0 +#define FW_RI_RES_WR_EQSIZE_M 0xffff +#define FW_RI_RES_WR_EQSIZE_V(x) ((x) << FW_RI_RES_WR_EQSIZE_S) +#define FW_RI_RES_WR_EQSIZE_G(x) \ + (((x) >> FW_RI_RES_WR_EQSIZE_S) & FW_RI_RES_WR_EQSIZE_M) + +#define FW_RI_RES_WR_IQANDST_S 15 +#define FW_RI_RES_WR_IQANDST_M 0x1 +#define FW_RI_RES_WR_IQANDST_V(x) ((x) << FW_RI_RES_WR_IQANDST_S) +#define FW_RI_RES_WR_IQANDST_G(x) \ + (((x) >> FW_RI_RES_WR_IQANDST_S) & FW_RI_RES_WR_IQANDST_M) +#define FW_RI_RES_WR_IQANDST_F FW_RI_RES_WR_IQANDST_V(1U) + +#define FW_RI_RES_WR_IQANUS_S 14 +#define FW_RI_RES_WR_IQANUS_M 0x1 +#define FW_RI_RES_WR_IQANUS_V(x) ((x) << FW_RI_RES_WR_IQANUS_S) +#define FW_RI_RES_WR_IQANUS_G(x) \ + (((x) >> FW_RI_RES_WR_IQANUS_S) & FW_RI_RES_WR_IQANUS_M) +#define FW_RI_RES_WR_IQANUS_F FW_RI_RES_WR_IQANUS_V(1U) + +#define FW_RI_RES_WR_IQANUD_S 12 +#define FW_RI_RES_WR_IQANUD_M 0x3 +#define FW_RI_RES_WR_IQANUD_V(x) ((x) << FW_RI_RES_WR_IQANUD_S) +#define FW_RI_RES_WR_IQANUD_G(x) \ + (((x) >> FW_RI_RES_WR_IQANUD_S) & FW_RI_RES_WR_IQANUD_M) + +#define FW_RI_RES_WR_IQANDSTINDEX_S 0 +#define FW_RI_RES_WR_IQANDSTINDEX_M 0xfff +#define FW_RI_RES_WR_IQANDSTINDEX_V(x) ((x) << FW_RI_RES_WR_IQANDSTINDEX_S) +#define FW_RI_RES_WR_IQANDSTINDEX_G(x) \ + (((x) >> FW_RI_RES_WR_IQANDSTINDEX_S) & FW_RI_RES_WR_IQANDSTINDEX_M) + +#define FW_RI_RES_WR_IQDROPRSS_S 15 +#define FW_RI_RES_WR_IQDROPRSS_M 0x1 +#define FW_RI_RES_WR_IQDROPRSS_V(x) ((x) << FW_RI_RES_WR_IQDROPRSS_S) +#define FW_RI_RES_WR_IQDROPRSS_G(x) \ + (((x) >> FW_RI_RES_WR_IQDROPRSS_S) & FW_RI_RES_WR_IQDROPRSS_M) +#define FW_RI_RES_WR_IQDROPRSS_F FW_RI_RES_WR_IQDROPRSS_V(1U) + +#define FW_RI_RES_WR_IQGTSMODE_S 14 +#define FW_RI_RES_WR_IQGTSMODE_M 0x1 +#define FW_RI_RES_WR_IQGTSMODE_V(x) ((x) << FW_RI_RES_WR_IQGTSMODE_S) +#define FW_RI_RES_WR_IQGTSMODE_G(x) \ + (((x) >> FW_RI_RES_WR_IQGTSMODE_S) & FW_RI_RES_WR_IQGTSMODE_M) +#define FW_RI_RES_WR_IQGTSMODE_F FW_RI_RES_WR_IQGTSMODE_V(1U) + +#define FW_RI_RES_WR_IQPCIECH_S 12 +#define FW_RI_RES_WR_IQPCIECH_M 0x3 +#define FW_RI_RES_WR_IQPCIECH_V(x) ((x) << FW_RI_RES_WR_IQPCIECH_S) +#define FW_RI_RES_WR_IQPCIECH_G(x) \ + (((x) >> FW_RI_RES_WR_IQPCIECH_S) & FW_RI_RES_WR_IQPCIECH_M) + +#define FW_RI_RES_WR_IQDCAEN_S 11 +#define FW_RI_RES_WR_IQDCAEN_M 0x1 +#define FW_RI_RES_WR_IQDCAEN_V(x) ((x) << FW_RI_RES_WR_IQDCAEN_S) +#define FW_RI_RES_WR_IQDCAEN_G(x) \ + (((x) >> FW_RI_RES_WR_IQDCAEN_S) & FW_RI_RES_WR_IQDCAEN_M) +#define FW_RI_RES_WR_IQDCAEN_F FW_RI_RES_WR_IQDCAEN_V(1U) + +#define FW_RI_RES_WR_IQDCACPU_S 6 +#define FW_RI_RES_WR_IQDCACPU_M 0x1f +#define FW_RI_RES_WR_IQDCACPU_V(x) ((x) << FW_RI_RES_WR_IQDCACPU_S) +#define FW_RI_RES_WR_IQDCACPU_G(x) \ + (((x) >> FW_RI_RES_WR_IQDCACPU_S) & FW_RI_RES_WR_IQDCACPU_M) + +#define FW_RI_RES_WR_IQINTCNTTHRESH_S 4 +#define FW_RI_RES_WR_IQINTCNTTHRESH_M 0x3 +#define FW_RI_RES_WR_IQINTCNTTHRESH_V(x) \ + ((x) << FW_RI_RES_WR_IQINTCNTTHRESH_S) +#define FW_RI_RES_WR_IQINTCNTTHRESH_G(x) \ + (((x) >> FW_RI_RES_WR_IQINTCNTTHRESH_S) & FW_RI_RES_WR_IQINTCNTTHRESH_M) + +#define FW_RI_RES_WR_IQO_S 3 +#define FW_RI_RES_WR_IQO_M 0x1 +#define FW_RI_RES_WR_IQO_V(x) ((x) << FW_RI_RES_WR_IQO_S) +#define FW_RI_RES_WR_IQO_G(x) \ + (((x) >> FW_RI_RES_WR_IQO_S) & FW_RI_RES_WR_IQO_M) +#define FW_RI_RES_WR_IQO_F FW_RI_RES_WR_IQO_V(1U) + +#define FW_RI_RES_WR_IQCPRIO_S 2 +#define FW_RI_RES_WR_IQCPRIO_M 0x1 +#define FW_RI_RES_WR_IQCPRIO_V(x) ((x) << FW_RI_RES_WR_IQCPRIO_S) +#define FW_RI_RES_WR_IQCPRIO_G(x) \ + (((x) >> FW_RI_RES_WR_IQCPRIO_S) & FW_RI_RES_WR_IQCPRIO_M) +#define FW_RI_RES_WR_IQCPRIO_F FW_RI_RES_WR_IQCPRIO_V(1U) + +#define FW_RI_RES_WR_IQESIZE_S 0 +#define FW_RI_RES_WR_IQESIZE_M 0x3 +#define FW_RI_RES_WR_IQESIZE_V(x) ((x) << FW_RI_RES_WR_IQESIZE_S) +#define FW_RI_RES_WR_IQESIZE_G(x) \ + (((x) >> FW_RI_RES_WR_IQESIZE_S) & FW_RI_RES_WR_IQESIZE_M) + +#define FW_RI_RES_WR_IQNS_S 31 +#define FW_RI_RES_WR_IQNS_M 0x1 +#define FW_RI_RES_WR_IQNS_V(x) ((x) << FW_RI_RES_WR_IQNS_S) +#define FW_RI_RES_WR_IQNS_G(x) \ + (((x) >> FW_RI_RES_WR_IQNS_S) & FW_RI_RES_WR_IQNS_M) +#define FW_RI_RES_WR_IQNS_F FW_RI_RES_WR_IQNS_V(1U) + +#define FW_RI_RES_WR_IQRO_S 30 +#define FW_RI_RES_WR_IQRO_M 0x1 +#define FW_RI_RES_WR_IQRO_V(x) ((x) << FW_RI_RES_WR_IQRO_S) +#define FW_RI_RES_WR_IQRO_G(x) \ + (((x) >> FW_RI_RES_WR_IQRO_S) & FW_RI_RES_WR_IQRO_M) +#define FW_RI_RES_WR_IQRO_F FW_RI_RES_WR_IQRO_V(1U) struct fw_ri_rdma_write_wr { __u8 opcode; @@ -562,11 +562,11 @@ struct fw_ri_send_wr { #endif }; -#define S_FW_RI_SEND_WR_SENDOP 0 -#define M_FW_RI_SEND_WR_SENDOP 0xf -#define V_FW_RI_SEND_WR_SENDOP(x) ((x) << S_FW_RI_SEND_WR_SENDOP) -#define G_FW_RI_SEND_WR_SENDOP(x) \ - (((x) >> S_FW_RI_SEND_WR_SENDOP) & M_FW_RI_SEND_WR_SENDOP) +#define FW_RI_SEND_WR_SENDOP_S 0 +#define FW_RI_SEND_WR_SENDOP_M 0xf +#define FW_RI_SEND_WR_SENDOP_V(x) ((x) << FW_RI_SEND_WR_SENDOP_S) +#define FW_RI_SEND_WR_SENDOP_G(x) \ + (((x) >> FW_RI_SEND_WR_SENDOP_S) & FW_RI_SEND_WR_SENDOP_M) struct fw_ri_rdma_read_wr { __u8 opcode; @@ -612,25 +612,25 @@ struct fw_ri_bind_mw_wr { __be64 r4; }; -#define S_FW_RI_BIND_MW_WR_QPBINDE 6 -#define M_FW_RI_BIND_MW_WR_QPBINDE 0x1 -#define V_FW_RI_BIND_MW_WR_QPBINDE(x) ((x) << S_FW_RI_BIND_MW_WR_QPBINDE) -#define G_FW_RI_BIND_MW_WR_QPBINDE(x) \ - (((x) >> S_FW_RI_BIND_MW_WR_QPBINDE) & M_FW_RI_BIND_MW_WR_QPBINDE) -#define F_FW_RI_BIND_MW_WR_QPBINDE V_FW_RI_BIND_MW_WR_QPBINDE(1U) +#define FW_RI_BIND_MW_WR_QPBINDE_S 6 +#define FW_RI_BIND_MW_WR_QPBINDE_M 0x1 +#define FW_RI_BIND_MW_WR_QPBINDE_V(x) ((x) << FW_RI_BIND_MW_WR_QPBINDE_S) +#define FW_RI_BIND_MW_WR_QPBINDE_G(x) \ + (((x) >> FW_RI_BIND_MW_WR_QPBINDE_S) & FW_RI_BIND_MW_WR_QPBINDE_M) +#define FW_RI_BIND_MW_WR_QPBINDE_F FW_RI_BIND_MW_WR_QPBINDE_V(1U) -#define S_FW_RI_BIND_MW_WR_NS 5 -#define M_FW_RI_BIND_MW_WR_NS 0x1 -#define V_FW_RI_BIND_MW_WR_NS(x) ((x) << S_FW_RI_BIND_MW_WR_NS) -#define G_FW_RI_BIND_MW_WR_NS(x) \ - (((x) >> S_FW_RI_BIND_MW_WR_NS) & M_FW_RI_BIND_MW_WR_NS) -#define F_FW_RI_BIND_MW_WR_NS V_FW_RI_BIND_MW_WR_NS(1U) +#define FW_RI_BIND_MW_WR_NS_S 5 +#define FW_RI_BIND_MW_WR_NS_M 0x1 +#define FW_RI_BIND_MW_WR_NS_V(x) ((x) << FW_RI_BIND_MW_WR_NS_S) +#define FW_RI_BIND_MW_WR_NS_G(x) \ + (((x) >> FW_RI_BIND_MW_WR_NS_S) & FW_RI_BIND_MW_WR_NS_M) +#define FW_RI_BIND_MW_WR_NS_F FW_RI_BIND_MW_WR_NS_V(1U) -#define S_FW_RI_BIND_MW_WR_DCACPU 0 -#define M_FW_RI_BIND_MW_WR_DCACPU 0x1f -#define V_FW_RI_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_BIND_MW_WR_DCACPU) -#define G_FW_RI_BIND_MW_WR_DCACPU(x) \ - (((x) >> S_FW_RI_BIND_MW_WR_DCACPU) & M_FW_RI_BIND_MW_WR_DCACPU) +#define FW_RI_BIND_MW_WR_DCACPU_S 0 +#define FW_RI_BIND_MW_WR_DCACPU_M 0x1f +#define FW_RI_BIND_MW_WR_DCACPU_V(x) ((x) << FW_RI_BIND_MW_WR_DCACPU_S) +#define FW_RI_BIND_MW_WR_DCACPU_G(x) \ + (((x) >> FW_RI_BIND_MW_WR_DCACPU_S) & FW_RI_BIND_MW_WR_DCACPU_M) struct fw_ri_fr_nsmr_wr { __u8 opcode; @@ -649,25 +649,25 @@ struct fw_ri_fr_nsmr_wr { __be32 va_lo_fbo; }; -#define S_FW_RI_FR_NSMR_WR_QPBINDE 6 -#define M_FW_RI_FR_NSMR_WR_QPBINDE 0x1 -#define V_FW_RI_FR_NSMR_WR_QPBINDE(x) ((x) << S_FW_RI_FR_NSMR_WR_QPBINDE) -#define G_FW_RI_FR_NSMR_WR_QPBINDE(x) \ - (((x) >> S_FW_RI_FR_NSMR_WR_QPBINDE) & M_FW_RI_FR_NSMR_WR_QPBINDE) -#define F_FW_RI_FR_NSMR_WR_QPBINDE V_FW_RI_FR_NSMR_WR_QPBINDE(1U) +#define FW_RI_FR_NSMR_WR_QPBINDE_S 6 +#define FW_RI_FR_NSMR_WR_QPBINDE_M 0x1 +#define FW_RI_FR_NSMR_WR_QPBINDE_V(x) ((x) << FW_RI_FR_NSMR_WR_QPBINDE_S) +#define FW_RI_FR_NSMR_WR_QPBINDE_G(x) \ + (((x) >> FW_RI_FR_NSMR_WR_QPBINDE_S) & FW_RI_FR_NSMR_WR_QPBINDE_M) +#define FW_RI_FR_NSMR_WR_QPBINDE_F FW_RI_FR_NSMR_WR_QPBINDE_V(1U) -#define S_FW_RI_FR_NSMR_WR_NS 5 -#define M_FW_RI_FR_NSMR_WR_NS 0x1 -#define V_FW_RI_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_FR_NSMR_WR_NS) -#define G_FW_RI_FR_NSMR_WR_NS(x) \ - (((x) >> S_FW_RI_FR_NSMR_WR_NS) & M_FW_RI_FR_NSMR_WR_NS) -#define F_FW_RI_FR_NSMR_WR_NS V_FW_RI_FR_NSMR_WR_NS(1U) +#define FW_RI_FR_NSMR_WR_NS_S 5 +#define FW_RI_FR_NSMR_WR_NS_M 0x1 +#define FW_RI_FR_NSMR_WR_NS_V(x) ((x) << FW_RI_FR_NSMR_WR_NS_S) +#define FW_RI_FR_NSMR_WR_NS_G(x) \ + (((x) >> FW_RI_FR_NSMR_WR_NS_S) & FW_RI_FR_NSMR_WR_NS_M) +#define FW_RI_FR_NSMR_WR_NS_F FW_RI_FR_NSMR_WR_NS_V(1U) -#define S_FW_RI_FR_NSMR_WR_DCACPU 0 -#define M_FW_RI_FR_NSMR_WR_DCACPU 0x1f -#define V_FW_RI_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_FR_NSMR_WR_DCACPU) -#define G_FW_RI_FR_NSMR_WR_DCACPU(x) \ - (((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU) +#define FW_RI_FR_NSMR_WR_DCACPU_S 0 +#define FW_RI_FR_NSMR_WR_DCACPU_M 0x1f +#define FW_RI_FR_NSMR_WR_DCACPU_V(x) ((x) << FW_RI_FR_NSMR_WR_DCACPU_S) +#define FW_RI_FR_NSMR_WR_DCACPU_G(x) \ + (((x) >> FW_RI_FR_NSMR_WR_DCACPU_S) & FW_RI_FR_NSMR_WR_DCACPU_M) struct fw_ri_inv_lstag_wr { __u8 opcode; @@ -740,18 +740,18 @@ struct fw_ri_wr { } u; }; -#define S_FW_RI_WR_MPAREQBIT 7 -#define M_FW_RI_WR_MPAREQBIT 0x1 -#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT) -#define G_FW_RI_WR_MPAREQBIT(x) \ - (((x) >> S_FW_RI_WR_MPAREQBIT) & M_FW_RI_WR_MPAREQBIT) -#define F_FW_RI_WR_MPAREQBIT V_FW_RI_WR_MPAREQBIT(1U) +#define FW_RI_WR_MPAREQBIT_S 7 +#define FW_RI_WR_MPAREQBIT_M 0x1 +#define FW_RI_WR_MPAREQBIT_V(x) ((x) << FW_RI_WR_MPAREQBIT_S) +#define FW_RI_WR_MPAREQBIT_G(x) \ + (((x) >> FW_RI_WR_MPAREQBIT_S) & FW_RI_WR_MPAREQBIT_M) +#define FW_RI_WR_MPAREQBIT_F FW_RI_WR_MPAREQBIT_V(1U) -#define S_FW_RI_WR_P2PTYPE 0 -#define M_FW_RI_WR_P2PTYPE 0xf -#define V_FW_RI_WR_P2PTYPE(x) ((x) << S_FW_RI_WR_P2PTYPE) -#define G_FW_RI_WR_P2PTYPE(x) \ - (((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE) +#define FW_RI_WR_P2PTYPE_S 0 +#define FW_RI_WR_P2PTYPE_M 0xf +#define FW_RI_WR_P2PTYPE_V(x) ((x) << FW_RI_WR_P2PTYPE_S) +#define FW_RI_WR_P2PTYPE_G(x) \ + (((x) >> FW_RI_WR_P2PTYPE_S) & FW_RI_WR_P2PTYPE_M) struct tcp_options { __be16 mss; @@ -783,58 +783,58 @@ struct cpl_pass_accept_req { }; /* cpl_pass_accept_req.hdr_len fields */ -#define S_SYN_RX_CHAN 0 -#define M_SYN_RX_CHAN 0xF -#define V_SYN_RX_CHAN(x) ((x) << S_SYN_RX_CHAN) -#define G_SYN_RX_CHAN(x) (((x) >> S_SYN_RX_CHAN) & M_SYN_RX_CHAN) - -#define S_TCP_HDR_LEN 10 -#define M_TCP_HDR_LEN 0x3F -#define V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN) -#define G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN) - -#define S_IP_HDR_LEN 16 -#define M_IP_HDR_LEN 0x3FF -#define V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN) -#define G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN) - -#define S_ETH_HDR_LEN 26 -#define M_ETH_HDR_LEN 0x1F -#define V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN) -#define G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN) +#define SYN_RX_CHAN_S 0 +#define SYN_RX_CHAN_M 0xF +#define SYN_RX_CHAN_V(x) ((x) << SYN_RX_CHAN_S) +#define SYN_RX_CHAN_G(x) (((x) >> SYN_RX_CHAN_S) & SYN_RX_CHAN_M) + +#define TCP_HDR_LEN_S 10 +#define TCP_HDR_LEN_M 0x3F +#define TCP_HDR_LEN_V(x) ((x) << TCP_HDR_LEN_S) +#define TCP_HDR_LEN_G(x) (((x) >> TCP_HDR_LEN_S) & TCP_HDR_LEN_M) + +#define IP_HDR_LEN_S 16 +#define IP_HDR_LEN_M 0x3FF +#define IP_HDR_LEN_V(x) ((x) << IP_HDR_LEN_S) +#define IP_HDR_LEN_G(x) (((x) >> IP_HDR_LEN_S) & IP_HDR_LEN_M) + +#define ETH_HDR_LEN_S 26 +#define ETH_HDR_LEN_M 0x1F +#define ETH_HDR_LEN_V(x) ((x) << ETH_HDR_LEN_S) +#define ETH_HDR_LEN_G(x) (((x) >> ETH_HDR_LEN_S) & ETH_HDR_LEN_M) /* cpl_pass_accept_req.l2info fields */ -#define S_SYN_MAC_IDX 0 -#define M_SYN_MAC_IDX 0x1FF -#define V_SYN_MAC_IDX(x) ((x) << S_SYN_MAC_IDX) -#define G_SYN_MAC_IDX(x) (((x) >> S_SYN_MAC_IDX) & M_SYN_MAC_IDX) +#define SYN_MAC_IDX_S 0 +#define SYN_MAC_IDX_M 0x1FF +#define SYN_MAC_IDX_V(x) ((x) << SYN_MAC_IDX_S) +#define SYN_MAC_IDX_G(x) (((x) >> SYN_MAC_IDX_S) & SYN_MAC_IDX_M) -#define S_SYN_XACT_MATCH 9 -#define V_SYN_XACT_MATCH(x) ((x) << S_SYN_XACT_MATCH) -#define F_SYN_XACT_MATCH V_SYN_XACT_MATCH(1U) +#define SYN_XACT_MATCH_S 9 +#define SYN_XACT_MATCH_V(x) ((x) << SYN_XACT_MATCH_S) +#define SYN_XACT_MATCH_F SYN_XACT_MATCH_V(1U) -#define S_SYN_INTF 12 -#define M_SYN_INTF 0xF -#define V_SYN_INTF(x) ((x) << S_SYN_INTF) -#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF) +#define SYN_INTF_S 12 +#define SYN_INTF_M 0xF +#define SYN_INTF_V(x) ((x) << SYN_INTF_S) +#define SYN_INTF_G(x) (((x) >> SYN_INTF_S) & SYN_INTF_M) struct ulptx_idata { __be32 cmd_more; __be32 len; }; -#define S_ULPTX_NSGE 0 -#define M_ULPTX_NSGE 0xFFFF -#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE) +#define ULPTX_NSGE_S 0 +#define ULPTX_NSGE_M 0xFFFF +#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S) -#define S_RX_DACK_MODE 29 -#define M_RX_DACK_MODE 0x3 -#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE) -#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE) +#define RX_DACK_MODE_S 29 +#define RX_DACK_MODE_M 0x3 +#define RX_DACK_MODE_V(x) ((x) << RX_DACK_MODE_S) +#define RX_DACK_MODE_G(x) (((x) >> RX_DACK_MODE_S) & RX_DACK_MODE_M) -#define S_RX_DACK_CHANGE 31 -#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) -#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) +#define RX_DACK_CHANGE_S 31 +#define RX_DACK_CHANGE_V(x) ((x) << RX_DACK_CHANGE_S) +#define RX_DACK_CHANGE_F RX_DACK_CHANGE_V(1U) enum { /* TCP congestion control algorithms */ CONG_ALG_RENO, @@ -843,10 +843,10 @@ enum { /* TCP congestion control algorithms */ CONG_ALG_HIGHSPEED }; -#define S_CONG_CNTRL 14 -#define M_CONG_CNTRL 0x3 -#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL) -#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL) +#define CONG_CNTRL_S 14 +#define CONG_CNTRL_M 0x3 +#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S) +#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M) #define CONG_CNTRL_VALID (1 << 18) diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index 4977082e081f..33c45dfcbd88 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c @@ -277,7 +277,7 @@ static int remove_file(struct dentry *parent, char *name) } spin_lock(&tmp->d_lock); - if (!(d_unhashed(tmp) && tmp->d_inode)) { + if (!d_unhashed(tmp) && tmp->d_inode) { dget_dlock(tmp); __d_drop(tmp); spin_unlock(&tmp->d_lock); diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index 6559af60bffd..e08db7020cd4 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h @@ -908,9 +908,6 @@ void ipath_chip_cleanup(struct ipath_devdata *); /* clean up any chip type-specific stuff */ void ipath_chip_done(void); -/* check to see if we have to force ordering for write combining */ -int ipath_unordered_wc(void); - void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first, unsigned cnt); void ipath_cancel_sends(struct ipath_devdata *, int); diff --git a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c index 1d7bd82a1fb1..1a7e20a75149 100644 --- a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c +++ b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c @@ -47,16 +47,3 @@ int ipath_enable_wc(struct ipath_devdata *dd) { return 0; } - -/** - * ipath_unordered_wc - indicate whether write combining is unordered - * - * Because our performance depends on our ability to do write - * combining mmio writes in the most efficient way, we need to - * know if we are on a processor that may reorder stores when - * write combining. - */ -int ipath_unordered_wc(void) -{ - return 1; -} diff --git a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c index 3428acb0868c..4ad0b932df1f 100644 --- a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c +++ b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c @@ -167,18 +167,3 @@ void ipath_disable_wc(struct ipath_devdata *dd) dd->ipath_wc_cookie = 0; /* even on failure */ } } - -/** - * ipath_unordered_wc - indicate whether write combining is ordered - * - * Because our performance depends on our ability to do write combining mmio - * writes in the most efficient way, we need to know if we are on an Intel - * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in - * the order completed, and so no special flushing is required to get - * correct ordering. Intel processors, however, will flush write buffers - * out in "random" orders, and so explicit ordering is needed at times. - */ -int ipath_unordered_wc(void) -{ - return boot_cpu_data.x86_vendor != X86_VENDOR_AMD; -} diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 2d8c3397774f..f50a546224ad 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c @@ -36,6 +36,7 @@ #include <linux/slab.h> #include <linux/inet.h> #include <linux/string.h> +#include <linux/mlx4/driver.h> #include "mlx4_ib.h" diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index 0eb141c41416..a31e031afd87 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -154,7 +154,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, continue; slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ; - if (slave_id >= dev->dev->num_vfs + 1) + if (slave_id >= dev->dev->persist->num_vfs + 1) return; tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE]; form_cache_ag = get_cached_alias_guid(dev, port_num, diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c index 56a593e0ae5d..39a488889fc7 100644 --- a/drivers/infiniband/hw/mlx4/cm.c +++ b/drivers/infiniband/hw/mlx4/cm.c @@ -372,7 +372,7 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave, *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id); if (*slave < 0) { mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n", - gid.global.interface_id); + be64_to_cpu(gid.global.interface_id)); return -ENOENT; } return 0; diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index a3b70f6c4035..0176caa5792c 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -188,6 +188,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector spin_lock_init(&cq->lock); cq->resize_buf = NULL; cq->resize_umem = NULL; + INIT_LIST_HEAD(&cq->send_qp_list); + INIT_LIST_HEAD(&cq->recv_qp_list); if (context) { struct mlx4_ib_create_cq ucmd; @@ -367,8 +369,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) int err; mutex_lock(&cq->resize_mutex); - - if (entries < 1) { + if (entries < 1 || entries > dev->dev->caps.max_cqes) { err = -EINVAL; goto out; } @@ -379,7 +380,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) goto out; } - if (entries > dev->dev->caps.max_cqes) { + if (entries > dev->dev->caps.max_cqes + 1) { err = -EINVAL; goto out; } @@ -392,7 +393,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) /* Can't be smaller than the number of outstanding CQEs */ outst_cqe = mlx4_ib_get_outstanding_cqes(cq); if (entries < outst_cqe + 1) { - err = 0; + err = -EINVAL; goto out; } @@ -594,6 +595,55 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct return 0; } +static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries, + struct ib_wc *wc, int *npolled, int is_send) +{ + struct mlx4_ib_wq *wq; + unsigned cur; + int i; + + wq = is_send ? &qp->sq : &qp->rq; + cur = wq->head - wq->tail; + + if (cur == 0) + return; + + for (i = 0; i < cur && *npolled < num_entries; i++) { + wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; + wc->status = IB_WC_WR_FLUSH_ERR; + wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR; + wq->tail++; + (*npolled)++; + wc->qp = &qp->ibqp; + wc++; + } +} + +static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries, + struct ib_wc *wc, int *npolled) +{ + struct mlx4_ib_qp *qp; + + *npolled = 0; + /* Find uncompleted WQEs belonging to that cq and retrun + * simulated FLUSH_ERR completions + */ + list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) { + mlx4_ib_qp_sw_comp(qp, num_entries, wc, npolled, 1); + if (*npolled >= num_entries) + goto out; + } + + list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) { + mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0); + if (*npolled >= num_entries) + goto out; + } + +out: + return; +} + static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, struct mlx4_ib_qp **cur_qp, struct ib_wc *wc) @@ -836,8 +886,13 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) unsigned long flags; int npolled; int err = 0; + struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device); spin_lock_irqsave(&cq->lock, flags); + if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { + mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled); + goto out; + } for (npolled = 0; npolled < num_entries; ++npolled) { err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled); @@ -847,6 +902,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) mlx4_cq_set_ci(&cq->mcq); +out: spin_unlock_irqrestore(&cq->lock, flags); if (err == 0 || err == -EAGAIN) diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 82a7dd87089b..c7619716c31d 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1951,7 +1951,8 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, ctx->ib_dev = &dev->ib_dev; for (i = 0; - i < min(dev->dev->caps.sqp_demux, (u16)(dev->dev->num_vfs + 1)); + i < min(dev->dev->caps.sqp_demux, + (u16)(dev->dev->persist->num_vfs + 1)); i++) { struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev->dev, i); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 9117b7a2d5f8..ac6e2b710ea6 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -198,7 +198,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 0xffffff; - props->vendor_part_id = dev->dev->pdev->device; + props->vendor_part_id = dev->dev->persist->pdev->device; props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); memcpy(&props->sys_image_guid, out_mad->data + 4, 8); @@ -351,6 +351,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, enum ib_mtu tmp; struct mlx4_cmd_mailbox *mailbox; int err = 0; + int is_bonded = mlx4_is_bonded(mdev->dev); mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); if (IS_ERR(mailbox)) @@ -374,8 +375,12 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, props->state = IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); props->active_mtu = IB_MTU_256; + if (is_bonded) + rtnl_lock(); /* required to get upper dev */ spin_lock_bh(&iboe->lock); ndev = iboe->netdevs[port - 1]; + if (ndev && is_bonded) + ndev = netdev_master_upper_dev_get(ndev); if (!ndev) goto out_unlock; @@ -387,6 +392,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, props->phys_state = state_to_phys_state(props->state); out_unlock: spin_unlock_bh(&iboe->lock); + if (is_bonded) + rtnl_unlock(); out: mlx4_free_cmd_mailbox(mdev->dev, mailbox); return err; @@ -844,7 +851,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, struct mlx4_ib_steering { struct list_head list; - u64 reg_id; + struct mlx4_flow_reg_id reg_id; union ib_gid gid; }; @@ -1135,9 +1142,11 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, int domain) { - int err = 0, i = 0; + int err = 0, i = 0, j = 0; struct mlx4_ib_flow *mflow; enum mlx4_net_trans_promisc_mode type[2]; + struct mlx4_dev *dev = (to_mdev(qp->device))->dev; + int is_bonded = mlx4_is_bonded(dev); memset(type, 0, sizeof(type)); @@ -1172,26 +1181,58 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, while (i < ARRAY_SIZE(type) && type[i]) { err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i], - &mflow->reg_id[i]); + &mflow->reg_id[i].id); if (err) goto err_create_flow; i++; + if (is_bonded) { + /* Application always sees one port so the mirror rule + * must be on port #2 + */ + flow_attr->port = 2; + err = __mlx4_ib_create_flow(qp, flow_attr, + domain, type[j], + &mflow->reg_id[j].mirror); + flow_attr->port = 1; + if (err) + goto err_create_flow; + j++; + } + } if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { - err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]); + err = mlx4_ib_tunnel_steer_add(qp, flow_attr, + &mflow->reg_id[i].id); if (err) goto err_create_flow; i++; + if (is_bonded) { + flow_attr->port = 2; + err = mlx4_ib_tunnel_steer_add(qp, flow_attr, + &mflow->reg_id[j].mirror); + flow_attr->port = 1; + if (err) + goto err_create_flow; + j++; + } + /* function to create mirror rule */ } return &mflow->ibflow; err_create_flow: while (i) { - (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]); + (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, + mflow->reg_id[i].id); i--; } + + while (j) { + (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, + mflow->reg_id[j].mirror); + j--; + } err_free: kfree(mflow); return ERR_PTR(err); @@ -1204,10 +1245,16 @@ static int mlx4_ib_destroy_flow(struct ib_flow *flow_id) struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device); struct mlx4_ib_flow *mflow = to_mflow(flow_id); - while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) { - err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]); + while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) { + err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id); if (err) ret = err; + if (mflow->reg_id[i].mirror) { + err = __mlx4_ib_destroy_flow(mdev->dev, + mflow->reg_id[i].mirror); + if (err) + ret = err; + } i++; } @@ -1219,11 +1266,11 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { int err; struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); + struct mlx4_dev *dev = mdev->dev; struct mlx4_ib_qp *mqp = to_mqp(ibqp); - u64 reg_id; struct mlx4_ib_steering *ib_steering = NULL; - enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? - MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; + enum mlx4_protocol prot = MLX4_PROT_IB_IPV6; + struct mlx4_flow_reg_id reg_id; if (mdev->dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { @@ -1235,9 +1282,22 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), - prot, ®_id); - if (err) + prot, ®_id.id); + if (err) { + pr_err("multicast attach op failed, err %d\n", err); goto err_malloc; + } + + reg_id.mirror = 0; + if (mlx4_is_bonded(dev)) { + err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, + (mqp->port == 1) ? 2 : 1, + !!(mqp->flags & + MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), + prot, ®_id.mirror); + if (err) + goto err_add; + } err = add_gid_entry(ibqp, gid); if (err) @@ -1254,7 +1314,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) err_add: mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, - prot, reg_id); + prot, reg_id.id); + if (reg_id.mirror) + mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, + prot, reg_id.mirror); err_malloc: kfree(ib_steering); @@ -1281,12 +1344,12 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { int err; struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); + struct mlx4_dev *dev = mdev->dev; struct mlx4_ib_qp *mqp = to_mqp(ibqp); struct net_device *ndev; struct mlx4_ib_gid_entry *ge; - u64 reg_id = 0; - enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? - MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; + struct mlx4_flow_reg_id reg_id = {0, 0}; + enum mlx4_protocol prot = MLX4_PROT_IB_IPV6; if (mdev->dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { @@ -1309,10 +1372,17 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) } err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, - prot, reg_id); + prot, reg_id.id); if (err) return err; + if (mlx4_is_bonded(dev)) { + err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, + prot, reg_id.mirror); + if (err) + return err; + } + mutex_lock(&mqp->mutex); ge = find_gid_entry(mqp, gid->raw); if (ge) { @@ -1376,7 +1446,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr, { struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev, ib_dev.dev); - return sprintf(buf, "MT%d\n", dev->dev->pdev->device); + return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device); } static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, @@ -1440,6 +1510,7 @@ static void update_gids_task(struct work_struct *work) union ib_gid *gids; int err; struct mlx4_dev *dev = gw->dev->dev; + int is_bonded = mlx4_is_bonded(dev); if (!gw->dev->ib_active) return; @@ -1459,7 +1530,10 @@ static void update_gids_task(struct work_struct *work) if (err) pr_warn("set port command failed\n"); else - mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE); + if ((gw->port == 1) || !is_bonded) + mlx4_ib_dispatch_event(gw->dev, + is_bonded ? 1 : gw->port, + IB_EVENT_GID_CHANGE); mlx4_free_cmd_mailbox(dev, mailbox); kfree(gw); @@ -1875,7 +1949,8 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, * don't want the bond IP based gids in the table since * flows that select port by gid may get the down port. */ - if (port_state == IB_PORT_DOWN) { + if (port_state == IB_PORT_DOWN && + !mlx4_is_bonded(ibdev->dev)) { reset_gid_table(ibdev, port); mlx4_ib_set_default_gid(ibdev, curr_netdev, @@ -1938,7 +2013,8 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev) int i; if (mlx4_is_master(ibdev->dev)) { - for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) { + for (slave = 0; slave <= ibdev->dev->persist->num_vfs; + ++slave) { for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { for (i = 0; i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; @@ -1995,7 +2071,7 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) { for (j = 0; j < eq_per_port; j++) { snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s", - i, j, dev->pdev->bus->name); + i, j, dev->persist->pdev->bus->name); /* Set IRQ for specific name (per ring) */ if (mlx4_assign_eq(dev, name, NULL, &ibdev->eq_table[eq])) { @@ -2046,6 +2122,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) int err; struct mlx4_ib_iboe *iboe; int ib_num_ports = 0; + int num_req_counters; pr_info_once("%s", mlx4_ib_version); @@ -2059,7 +2136,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); if (!ibdev) { - dev_err(&dev->pdev->dev, "Device struct alloc failed\n"); + dev_err(&dev->persist->pdev->dev, + "Device struct alloc failed\n"); return NULL; } @@ -2078,15 +2156,17 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); ibdev->dev = dev; + ibdev->bond_next_port = 0; strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); ibdev->ib_dev.owner = THIS_MODULE; ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; ibdev->num_ports = num_ports; - ibdev->ib_dev.phys_port_cnt = ibdev->num_ports; + ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ? + 1 : ibdev->num_ports; ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; - ibdev->ib_dev.dma_device = &dev->pdev->dev; + ibdev->ib_dev.dma_device = &dev->persist->pdev->dev; if (dev->caps.userspace_caps) ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; @@ -2205,7 +2285,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) if (init_node_data(ibdev)) goto err_map; - for (i = 0; i < ibdev->num_ports; ++i) { + num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; + for (i = 0; i < num_req_counters; ++i) { mutex_init(&ibdev->qp1_proxy_lock[i]); if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == IB_LINK_LAYER_ETHERNET) { @@ -2216,12 +2297,18 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->counters[i] = -1; } } + if (mlx4_is_bonded(dev)) + for (i = 1; i < ibdev->num_ports ; ++i) + ibdev->counters[i] = ibdev->counters[0]; + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) ib_num_ports++; spin_lock_init(&ibdev->sm_lock); mutex_init(&ibdev->cap_mask_mutex); + INIT_LIST_HEAD(&ibdev->qp_list); + spin_lock_init(&ibdev->reset_flow_resource_lock); if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED && ib_num_ports) { @@ -2237,7 +2324,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) sizeof(long), GFP_KERNEL); if (!ibdev->ib_uc_qpns_bitmap) { - dev_err(&dev->pdev->dev, "bit map alloc failed\n"); + dev_err(&dev->persist->pdev->dev, + "bit map alloc failed\n"); goto err_steer_qp_release; } @@ -2535,6 +2623,99 @@ out: return; } +static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev) +{ + struct mlx4_ib_qp *mqp; + unsigned long flags_qp; + unsigned long flags_cq; + struct mlx4_ib_cq *send_mcq, *recv_mcq; + struct list_head cq_notify_list; + struct mlx4_cq *mcq; + unsigned long flags; + + pr_warn("mlx4_ib_handle_catas_error was started\n"); + INIT_LIST_HEAD(&cq_notify_list); + + /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ + spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); + + list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { + spin_lock_irqsave(&mqp->sq.lock, flags_qp); + if (mqp->sq.tail != mqp->sq.head) { + send_mcq = to_mcq(mqp->ibqp.send_cq); + spin_lock_irqsave(&send_mcq->lock, flags_cq); + if (send_mcq->mcq.comp && + mqp->ibqp.send_cq->comp_handler) { + if (!send_mcq->mcq.reset_notify_added) { + send_mcq->mcq.reset_notify_added = 1; + list_add_tail(&send_mcq->mcq.reset_notify, + &cq_notify_list); + } + } + spin_unlock_irqrestore(&send_mcq->lock, flags_cq); + } + spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); + /* Now, handle the QP's receive queue */ + spin_lock_irqsave(&mqp->rq.lock, flags_qp); + /* no handling is needed for SRQ */ + if (!mqp->ibqp.srq) { + if (mqp->rq.tail != mqp->rq.head) { + recv_mcq = to_mcq(mqp->ibqp.recv_cq); + spin_lock_irqsave(&recv_mcq->lock, flags_cq); + if (recv_mcq->mcq.comp && + mqp->ibqp.recv_cq->comp_handler) { + if (!recv_mcq->mcq.reset_notify_added) { + recv_mcq->mcq.reset_notify_added = 1; + list_add_tail(&recv_mcq->mcq.reset_notify, + &cq_notify_list); + } + } + spin_unlock_irqrestore(&recv_mcq->lock, + flags_cq); + } + } + spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); + } + + list_for_each_entry(mcq, &cq_notify_list, reset_notify) { + mcq->comp(mcq); + } + spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); + pr_warn("mlx4_ib_handle_catas_error ended\n"); +} + +static void handle_bonded_port_state_event(struct work_struct *work) +{ + struct ib_event_work *ew = + container_of(work, struct ib_event_work, work); + struct mlx4_ib_dev *ibdev = ew->ib_dev; + enum ib_port_state bonded_port_state = IB_PORT_NOP; + int i; + struct ib_event ibev; + + kfree(ew); + spin_lock_bh(&ibdev->iboe.lock); + for (i = 0; i < MLX4_MAX_PORTS; ++i) { + struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; + + enum ib_port_state curr_port_state = + (netif_running(curr_netdev) && + netif_carrier_ok(curr_netdev)) ? + IB_PORT_ACTIVE : IB_PORT_DOWN; + + bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ? + curr_port_state : IB_PORT_ACTIVE; + } + spin_unlock_bh(&ibdev->iboe.lock); + + ibev.device = &ibdev->ib_dev; + ibev.element.port_num = 1; + ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ? + IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; + + ib_dispatch_event(&ibev); +} + static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, enum mlx4_dev_event event, unsigned long param) { @@ -2544,6 +2725,18 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, struct ib_event_work *ew; int p = 0; + if (mlx4_is_bonded(dev) && + ((event == MLX4_DEV_EVENT_PORT_UP) || + (event == MLX4_DEV_EVENT_PORT_DOWN))) { + ew = kmalloc(sizeof(*ew), GFP_ATOMIC); + if (!ew) + return; + INIT_WORK(&ew->work, handle_bonded_port_state_event); + ew->ib_dev = ibdev; + queue_work(wq, &ew->work); + return; + } + if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) eqe = (struct mlx4_eqe *)param; else @@ -2570,6 +2763,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: ibdev->ib_active = false; ibev.event = IB_EVENT_DEVICE_FATAL; + mlx4_ib_handle_catas_error(ibdev); break; case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: @@ -2604,7 +2798,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, } ibev.device = ibdev_ptr; - ibev.element.port_num = (u8) p; + ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p; ib_dispatch_event(&ibev); } @@ -2613,7 +2807,8 @@ static struct mlx4_interface mlx4_ib_interface = { .add = mlx4_ib_add, .remove = mlx4_ib_remove, .event = mlx4_ib_event, - .protocol = MLX4_PROT_IB_IPV6 + .protocol = MLX4_PROT_IB_IPV6, + .flags = MLX4_INTFF_BONDING }; static int __init mlx4_ib_init(void) diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 6eb743f65f6f..f829fd935b79 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -110,6 +110,9 @@ struct mlx4_ib_cq { struct mutex resize_mutex; struct ib_umem *umem; struct ib_umem *resize_umem; + /* List of qps that it serves.*/ + struct list_head send_qp_list; + struct list_head recv_qp_list; }; struct mlx4_ib_mr { @@ -134,10 +137,17 @@ struct mlx4_ib_fmr { struct mlx4_fmr mfmr; }; +#define MAX_REGS_PER_FLOW 2 + +struct mlx4_flow_reg_id { + u64 id; + u64 mirror; +}; + struct mlx4_ib_flow { struct ib_flow ibflow; /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */ - u64 reg_id[2]; + struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW]; }; struct mlx4_ib_wq { @@ -293,6 +303,9 @@ struct mlx4_ib_qp { struct mlx4_roce_smac_vlan_info pri; struct mlx4_roce_smac_vlan_info alt; u64 reg_id; + struct list_head qps_list; + struct list_head cq_recv_list; + struct list_head cq_send_list; }; struct mlx4_ib_srq { @@ -527,6 +540,10 @@ struct mlx4_ib_dev { struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS]; /* lock when destroying qp1_proxy and getting netdev events */ struct mutex qp1_proxy_lock[MLX4_MAX_PORTS]; + u8 bond_next_port; + /* protect resources needed as part of reset flow */ + spinlock_t reset_flow_resource_lock; + struct list_head qp_list; }; struct ib_event_work { @@ -622,6 +639,13 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah) return container_of(ibah, struct mlx4_ib_ah, ibah); } +static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev) +{ + dev->bond_next_port = (dev->bond_next_port + 1) % dev->num_ports; + + return dev->bond_next_port + 1; +} + int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev); void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev); diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index c36ccbd9a644..e0d271782d0a 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -401,7 +401,8 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device if (!mfrpl->ibfrpl.page_list) goto err_free; - mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev, + mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->persist-> + pdev->dev, size, &mfrpl->map, GFP_KERNEL); if (!mfrpl->mapped_page_list) @@ -423,7 +424,8 @@ void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list); int size = page_list->max_page_list_len * sizeof (u64); - dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list, + dma_free_coherent(&dev->dev->persist->pdev->dev, size, + mfrpl->mapped_page_list, mfrpl->map); kfree(mfrpl->ibfrpl.page_list); kfree(mfrpl); diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index cf000b7ad64f..ed2bd6701f9b 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -40,11 +40,17 @@ #include <rdma/ib_addr.h> #include <rdma/ib_mad.h> +#include <linux/mlx4/driver.h> #include <linux/mlx4/qp.h> #include "mlx4_ib.h" #include "user.h" +static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, + struct mlx4_ib_cq *recv_cq); +static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, + struct mlx4_ib_cq *recv_cq); + enum { MLX4_IB_ACK_REQ_FREQ = 8, }; @@ -93,17 +99,6 @@ enum { #ifndef ETH_ALEN #define ETH_ALEN 6 #endif -static inline u64 mlx4_mac_to_u64(u8 *addr) -{ - u64 mac = 0; - int i; - - for (i = 0; i < ETH_ALEN; i++) { - mac <<= 8; - mac |= addr[i]; - } - return mac; -} static const __be32 mlx4_ib_opcode[] = { [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), @@ -628,6 +623,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, struct mlx4_ib_sqp *sqp; struct mlx4_ib_qp *qp; enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type; + struct mlx4_ib_cq *mcq; + unsigned long flags; /* When tunneling special qps, we use a plain UD qp */ if (sqpn) { @@ -838,6 +835,24 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, qp->mqp.event = mlx4_ib_qp_event; if (!*caller_qp) *caller_qp = qp; + + spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); + mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), + to_mcq(init_attr->recv_cq)); + /* Maintain device to QPs access, needed for further handling + * via reset flow + */ + list_add_tail(&qp->qps_list, &dev->qp_list); + /* Maintain CQ to QPs access, needed for further handling + * via reset flow + */ + mcq = to_mcq(init_attr->send_cq); + list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); + mcq = to_mcq(init_attr->recv_cq); + list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); + mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), + to_mcq(init_attr->recv_cq)); + spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); return 0; err_qpn: @@ -896,13 +911,13 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv __acquires(&send_cq->lock) __acquires(&recv_cq->lock) { if (send_cq == recv_cq) { - spin_lock_irq(&send_cq->lock); + spin_lock(&send_cq->lock); __acquire(&recv_cq->lock); } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { - spin_lock_irq(&send_cq->lock); + spin_lock(&send_cq->lock); spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); } else { - spin_lock_irq(&recv_cq->lock); + spin_lock(&recv_cq->lock); spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); } } @@ -912,13 +927,13 @@ static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *re { if (send_cq == recv_cq) { __release(&recv_cq->lock); - spin_unlock_irq(&send_cq->lock); + spin_unlock(&send_cq->lock); } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { spin_unlock(&recv_cq->lock); - spin_unlock_irq(&send_cq->lock); + spin_unlock(&send_cq->lock); } else { spin_unlock(&send_cq->lock); - spin_unlock_irq(&recv_cq->lock); + spin_unlock(&recv_cq->lock); } } @@ -963,6 +978,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, int is_user) { struct mlx4_ib_cq *send_cq, *recv_cq; + unsigned long flags; if (qp->state != IB_QPS_RESET) { if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), @@ -994,8 +1010,13 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, get_cqs(qp, &send_cq, &recv_cq); + spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); mlx4_ib_lock_cqs(send_cq, recv_cq); + /* del from lists under both locks above to protect reset flow paths */ + list_del(&qp->qps_list); + list_del(&qp->cq_send_list); + list_del(&qp->cq_recv_list); if (!is_user) { __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); @@ -1006,6 +1027,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, mlx4_qp_remove(dev->dev, &qp->mqp); mlx4_ib_unlock_cqs(send_cq, recv_cq); + spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); mlx4_qp_free(dev->dev, &qp->mqp); @@ -1674,8 +1696,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); - if (err) - return -EINVAL; + if (err) { + err = -EINVAL; + goto out; + } if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) dev->qp1_proxy[qp->port - 1] = qp; } @@ -1915,6 +1939,22 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, goto out; } + if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) { + if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) { + if ((ibqp->qp_type == IB_QPT_RC) || + (ibqp->qp_type == IB_QPT_UD) || + (ibqp->qp_type == IB_QPT_UC) || + (ibqp->qp_type == IB_QPT_RAW_PACKET) || + (ibqp->qp_type == IB_QPT_XRC_INI)) { + attr->port_num = mlx4_ib_bond_next_port(dev); + } + } else { + /* no sense in changing port_num + * when ports are bonded */ + attr_mask &= ~IB_QP_PORT; + } + } + if ((attr_mask & IB_QP_PORT) && (attr->port_num == 0 || attr->port_num > dev->num_ports)) { pr_debug("qpn 0x%x: invalid port number (%d) specified " @@ -1965,6 +2005,9 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); + if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) + attr->port_num = 1; + out: mutex_unlock(&qp->mutex); return err; @@ -2609,8 +2652,15 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, __be32 uninitialized_var(lso_hdr_sz); __be32 blh; int i; + struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); spin_lock_irqsave(&qp->sq.lock, flags); + if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { + err = -EIO; + *bad_wr = wr; + nreq = 0; + goto out; + } ind = qp->sq_next_wqe; @@ -2908,10 +2958,18 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, int ind; int max_gs; int i; + struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); max_gs = qp->rq.max_gs; spin_lock_irqsave(&qp->rq.lock, flags); + if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { + err = -EIO; + *bad_wr = wr; + nreq = 0; + goto out; + } + ind = qp->rq.head & (qp->rq.wqe_cnt - 1); for (nreq = 0; wr; ++nreq, wr = wr->next) { diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index 62d9285300af..dce5dfe3a70e 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c @@ -316,8 +316,15 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, int err = 0; int nreq; int i; + struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device); spin_lock_irqsave(&srq->lock, flags); + if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { + err = -EIO; + *bad_wr = wr; + nreq = 0; + goto out; + } for (nreq = 0; wr; ++nreq, wr = wr->next) { if (unlikely(wr->num_sge > srq->msrq.max_gs)) { @@ -362,6 +369,7 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, *srq->db.db = cpu_to_be32(srq->wqe_ctr); } +out: spin_unlock_irqrestore(&srq->lock, flags); diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index cb4c66e723b5..d10c2b8a5dad 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c @@ -375,7 +375,7 @@ static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max) char base_name[9]; /* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */ - strlcpy(name, pci_name(dev->dev->pdev), max); + strlcpy(name, pci_name(dev->dev->persist->pdev), max); strncpy(base_name, name, 8); /*till xxxx:yy:*/ base_name[8] = '\0'; /* with no ARI only 3 last bits are used so when the fn is higher than 8 @@ -792,7 +792,7 @@ static int register_pkey_tree(struct mlx4_ib_dev *device) if (!mlx4_is_master(device->dev)) return 0; - for (i = 0; i <= device->dev->num_vfs; ++i) + for (i = 0; i <= device->dev->persist->num_vfs; ++i) register_one_pkey_tree(device, i); return 0; @@ -807,7 +807,7 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device) if (!mlx4_is_master(device->dev)) return; - for (slave = device->dev->num_vfs; slave >= 0; --slave) { + for (slave = device->dev->persist->num_vfs; slave >= 0; --slave) { list_for_each_entry_safe(p, t, &device->pkeys.pkey_port_list[slave], entry) { diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 03bf81211a54..cc4ac1e583b2 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -997,7 +997,7 @@ static int get_port_caps(struct mlx5_ib_dev *dev) struct ib_device_attr *dprops = NULL; struct ib_port_attr *pprops = NULL; struct mlx5_general_caps *gen; - int err = 0; + int err = -ENOMEM; int port; gen = &dev->mdev->caps.gen; @@ -1331,6 +1331,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | (1ull << IB_USER_VERBS_CMD_OPEN_QP); + dev->ib_dev.uverbs_ex_cmd_mask = + (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE); dev->ib_dev.query_device = mlx5_ib_query_device; dev->ib_dev.query_port = mlx5_ib_query_port; diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index b56e4c5593ee..611a9fdf2f38 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c @@ -81,7 +81,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, for (k = 0; k < len; k++) { if (!(i & mask)) { tmp = (unsigned long)pfn; - m = min(m, find_first_bit(&tmp, sizeof(tmp))); + m = min_t(unsigned long, m, find_first_bit(&tmp, sizeof(tmp))); skip = 1 << m; mask = skip - 1; base = pfn; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 32a28bd50b20..cd9822eeacae 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1012,6 +1012,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, goto err_2; } mr->umem = umem; + mr->dev = dev; mr->live = 1; kvfree(in); diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 49eb5111d2cd..70acda91eb2a 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -373,11 +373,11 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev) wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX]; /* setup the VLAN tag if present */ - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n", - netdev->name, vlan_tx_tag_get(skb)); + netdev->name, skb_vlan_tag_get(skb)); wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE; - wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb); + wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb); } else wqe_misc = 0; @@ -576,11 +576,12 @@ tso_sq_no_longer_full: wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX]; /* setup the VLAN tag if present */ - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n", - netdev->name, vlan_tx_tag_get(skb) ); + netdev->name, + skb_vlan_tag_get(skb)); wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE; - wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb); + wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb); } else wqe_misc = 0; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index b43456ae124b..c9780d919769 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -40,7 +40,7 @@ #include <be_roce.h> #include "ocrdma_sli.h" -#define OCRDMA_ROCE_DRV_VERSION "10.2.287.0u" +#define OCRDMA_ROCE_DRV_VERSION "10.4.205.0u" #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" @@ -55,12 +55,19 @@ #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) #define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo) +#define EQ_INTR_PER_SEC_THRSH_HI 150000 +#define EQ_INTR_PER_SEC_THRSH_LOW 100000 +#define EQ_AIC_MAX_EQD 20 +#define EQ_AIC_MIN_EQD 0 + +void ocrdma_eqd_set_task(struct work_struct *work); struct ocrdma_dev_attr { u8 fw_ver[32]; u32 vendor_id; u32 device_id; u16 max_pd; + u16 max_dpp_pds; u16 max_cq; u16 max_cqe; u16 max_qp; @@ -116,12 +123,19 @@ struct ocrdma_queue_info { bool created; }; +struct ocrdma_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ + u32 prev_eqd; + u64 eq_intr_cnt; + u64 prev_eq_intr_cnt; +}; + struct ocrdma_eq { struct ocrdma_queue_info q; u32 vector; int cq_cnt; struct ocrdma_dev *dev; char irq_name[32]; + struct ocrdma_aic_obj aic_obj; }; struct ocrdma_mq { @@ -171,6 +185,21 @@ struct ocrdma_stats { struct ocrdma_dev *dev; }; +struct ocrdma_pd_resource_mgr { + u32 pd_norm_start; + u16 pd_norm_count; + u16 pd_norm_thrsh; + u16 max_normal_pd; + u32 pd_dpp_start; + u16 pd_dpp_count; + u16 pd_dpp_thrsh; + u16 max_dpp_pd; + u16 dpp_page_index; + unsigned long *pd_norm_bitmap; + unsigned long *pd_dpp_bitmap; + bool pd_prealloc_valid; +}; + struct stats_mem { struct ocrdma_mqe mqe; void *va; @@ -198,6 +227,7 @@ struct ocrdma_dev { struct ocrdma_eq *eq_tbl; int eq_cnt; + struct delayed_work eqd_work; u16 base_eqid; u16 max_eq; @@ -255,7 +285,12 @@ struct ocrdma_dev { struct ocrdma_stats rx_qp_err_stats; struct ocrdma_stats tx_dbg_stats; struct ocrdma_stats rx_dbg_stats; + struct ocrdma_stats driver_stats; + struct ocrdma_stats reset_stats; struct dentry *dir; + atomic_t async_err_stats[OCRDMA_MAX_ASYNC_ERRORS]; + atomic_t cqe_err_stats[OCRDMA_MAX_CQE_ERR]; + struct ocrdma_pd_resource_mgr *pd_mgr; }; struct ocrdma_cq { @@ -335,7 +370,6 @@ struct ocrdma_srq { struct ocrdma_qp { struct ib_qp ibqp; - struct ocrdma_dev *dev; u8 __iomem *sq_db; struct ocrdma_qp_hwq_info sq; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index f3cc8c9e65ae..d812904f3984 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c @@ -29,19 +29,22 @@ #include <net/netevent.h> #include <rdma/ib_addr.h> +#include <rdma/ib_mad.h> #include "ocrdma.h" #include "ocrdma_verbs.h" #include "ocrdma_ah.h" #include "ocrdma_hw.h" +#include "ocrdma_stats.h" #define OCRDMA_VID_PCP_SHIFT 0xD static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, - struct ib_ah_attr *attr, union ib_gid *sgid, int pdid) + struct ib_ah_attr *attr, union ib_gid *sgid, + int pdid, bool *isvlan) { int status = 0; - u16 vlan_tag; bool vlan_enabled = false; + u16 vlan_tag; struct ocrdma_eth_vlan eth; struct ocrdma_grh grh; int eth_sz; @@ -59,7 +62,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT; eth.vlan_tag = cpu_to_be16(vlan_tag); eth_sz = sizeof(struct ocrdma_eth_vlan); - vlan_enabled = true; + *isvlan = true; } else { eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); eth_sz = sizeof(struct ocrdma_eth_basic); @@ -82,7 +85,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, /* Eth HDR */ memcpy(&ah->av->eth_hdr, ð, eth_sz); memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); - if (vlan_enabled) + if (*isvlan) ah->av->valid |= OCRDMA_AV_VLAN_VALID; ah->av->valid = cpu_to_le32(ah->av->valid); return status; @@ -91,6 +94,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) { u32 *ahid_addr; + bool isvlan = false; int status; struct ocrdma_ah *ah; struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); @@ -127,15 +131,20 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) } } - status = set_av_attr(dev, ah, attr, &sgid, pd->id); + status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan); if (status) goto av_conf_err; /* if pd is for the user process, pass the ah_id to user space */ if ((pd->uctx) && (pd->uctx->ah_tbl.va)) { ahid_addr = pd->uctx->ah_tbl.va + attr->dlid; - *ahid_addr = ah->id; + *ahid_addr = 0; + *ahid_addr |= ah->id & OCRDMA_AH_ID_MASK; + if (isvlan) + *ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK << + OCRDMA_AH_VLAN_VALID_SHIFT); } + return &ah->ibah; av_conf_err: @@ -191,5 +200,20 @@ int ocrdma_process_mad(struct ib_device *ibdev, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad) { - return IB_MAD_RESULT_SUCCESS; + int status; + struct ocrdma_dev *dev; + + switch (in_mad->mad_hdr.mgmt_class) { + case IB_MGMT_CLASS_PERF_MGMT: + dev = get_ocrdma_dev(ibdev); + if (!ocrdma_pma_counters(dev, out_mad)) + status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; + else + status = IB_MAD_RESULT_SUCCESS; + break; + default: + status = IB_MAD_RESULT_SUCCESS; + break; + } + return status; } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h index 8ac49e7f96d1..726a87cf22dc 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h @@ -28,6 +28,12 @@ #ifndef __OCRDMA_AH_H__ #define __OCRDMA_AH_H__ +enum { + OCRDMA_AH_ID_MASK = 0x3FF, + OCRDMA_AH_VLAN_VALID_MASK = 0x01, + OCRDMA_AH_VLAN_VALID_SHIFT = 0x1F +}; + struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *); int ocrdma_destroy_ah(struct ib_ah *); int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 638bff1ffc6c..0c9e95909a64 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -734,6 +734,9 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, break; } + if (type < OCRDMA_MAX_ASYNC_ERRORS) + atomic_inc(&dev->async_err_stats[type]); + if (qp_event) { if (qp->ibqp.event_handler) qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context); @@ -831,20 +834,20 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id) return 0; } -static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, - struct ocrdma_cq *cq) +static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, + struct ocrdma_cq *cq, bool sq) { - unsigned long flags; struct ocrdma_qp *qp; - bool buddy_cq_found = false; - /* Go through list of QPs in error state which are using this CQ - * and invoke its callback handler to trigger CQE processing for - * error/flushed CQE. It is rare to find more than few entries in - * this list as most consumers stops after getting error CQE. - * List is traversed only once when a matching buddy cq found for a QP. - */ - spin_lock_irqsave(&dev->flush_q_lock, flags); - list_for_each_entry(qp, &cq->sq_head, sq_entry) { + struct list_head *cur; + struct ocrdma_cq *bcq = NULL; + struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head); + + list_for_each(cur, head) { + if (sq) + qp = list_entry(cur, struct ocrdma_qp, sq_entry); + else + qp = list_entry(cur, struct ocrdma_qp, rq_entry); + if (qp->srq) continue; /* if wq and rq share the same cq, than comp_handler @@ -856,19 +859,41 @@ static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, * if completion came on rq, sq's cq is buddy cq. */ if (qp->sq_cq == cq) - cq = qp->rq_cq; + bcq = qp->rq_cq; else - cq = qp->sq_cq; - buddy_cq_found = true; - break; + bcq = qp->sq_cq; + return bcq; } + return NULL; +} + +static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, + struct ocrdma_cq *cq) +{ + unsigned long flags; + struct ocrdma_cq *bcq = NULL; + + /* Go through list of QPs in error state which are using this CQ + * and invoke its callback handler to trigger CQE processing for + * error/flushed CQE. It is rare to find more than few entries in + * this list as most consumers stops after getting error CQE. + * List is traversed only once when a matching buddy cq found for a QP. + */ + spin_lock_irqsave(&dev->flush_q_lock, flags); + /* Check if buddy CQ is present. + * true - Check for SQ CQ + * false - Check for RQ CQ + */ + bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true); + if (bcq == NULL) + bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false); spin_unlock_irqrestore(&dev->flush_q_lock, flags); - if (buddy_cq_found == false) - return; - if (cq->ibcq.comp_handler) { - spin_lock_irqsave(&cq->comp_handler_lock, flags); - (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); - spin_unlock_irqrestore(&cq->comp_handler_lock, flags); + + /* if there is valid buddy cq, look for its completion handler */ + if (bcq && bcq->ibcq.comp_handler) { + spin_lock_irqsave(&bcq->comp_handler_lock, flags); + (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context); + spin_unlock_irqrestore(&bcq->comp_handler_lock, flags); } } @@ -935,6 +960,7 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle) } while (budget); + eq->aic_obj.eq_intr_cnt++; ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); return IRQ_HANDLED; } @@ -1050,6 +1076,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev, attr->max_pd = (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >> OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT; + attr->max_dpp_pds = + (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >> + OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET; attr->max_qp = (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >> OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT; @@ -1396,6 +1425,122 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd) return status; } + +static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) +{ + int status = -ENOMEM; + size_t pd_bitmap_size; + struct ocrdma_alloc_pd_range *cmd; + struct ocrdma_alloc_pd_range_rsp *rsp; + + /* Pre allocate the DPP PDs */ + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd)); + if (!cmd) + return -ENOMEM; + cmd->pd_count = dev->attr.max_dpp_pds; + cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP; + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd; + + if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) { + dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >> + OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT; + dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid & + OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; + dev->pd_mgr->max_dpp_pd = rsp->pd_count; + pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long); + dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size, + GFP_KERNEL); + } + kfree(cmd); + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd)); + if (!cmd) + return -ENOMEM; + + cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds; + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd; + if (rsp->pd_count) { + dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid & + OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; + dev->pd_mgr->max_normal_pd = rsp->pd_count; + pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long); + dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size, + GFP_KERNEL); + } + + if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) { + /* Enable PD resource manager */ + dev->pd_mgr->pd_prealloc_valid = true; + } else { + return -ENOMEM; + } +mbx_err: + kfree(cmd); + return status; +} + +static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev) +{ + struct ocrdma_dealloc_pd_range *cmd; + + /* return normal PDs to firmware */ + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd)); + if (!cmd) + goto mbx_err; + + if (dev->pd_mgr->max_normal_pd) { + cmd->start_pd_id = dev->pd_mgr->pd_norm_start; + cmd->pd_count = dev->pd_mgr->max_normal_pd; + ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + } + + if (dev->pd_mgr->max_dpp_pd) { + kfree(cmd); + /* return DPP PDs to firmware */ + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, + sizeof(*cmd)); + if (!cmd) + goto mbx_err; + + cmd->start_pd_id = dev->pd_mgr->pd_dpp_start; + cmd->pd_count = dev->pd_mgr->max_dpp_pd; + ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + } +mbx_err: + kfree(cmd); +} + +void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev) +{ + int status; + + dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr), + GFP_KERNEL); + if (!dev->pd_mgr) { + pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id); + return; + } + status = ocrdma_mbx_alloc_pd_range(dev); + if (status) { + pr_err("%s(%d) Unable to initialize PD pool, using default.\n", + __func__, dev->id); + } +} + +static void ocrdma_free_pd_pool(struct ocrdma_dev *dev) +{ + ocrdma_mbx_dealloc_pd_range(dev); + kfree(dev->pd_mgr->pd_norm_bitmap); + kfree(dev->pd_mgr->pd_dpp_bitmap); + kfree(dev->pd_mgr); +} + static int ocrdma_build_q_conf(u32 *num_entries, int entry_size, int *num_pages, int *page_size) { @@ -1896,8 +2041,9 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp) { bool found; unsigned long flags; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); - spin_lock_irqsave(&qp->dev->flush_q_lock, flags); + spin_lock_irqsave(&dev->flush_q_lock, flags); found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); if (!found) list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head); @@ -1906,7 +2052,7 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp) if (!found) list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head); } - spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags); + spin_unlock_irqrestore(&dev->flush_q_lock, flags); } static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp) @@ -1972,7 +2118,8 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd, int status; u32 len, hw_pages, hw_page_size; dma_addr_t pa; - struct ocrdma_dev *dev = qp->dev; + struct ocrdma_pd *pd = qp->pd; + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); struct pci_dev *pdev = dev->nic_info.pdev; u32 max_wqe_allocated; u32 max_sges = attrs->cap.max_send_sge; @@ -2027,7 +2174,8 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd, int status; u32 len, hw_pages, hw_page_size; dma_addr_t pa = 0; - struct ocrdma_dev *dev = qp->dev; + struct ocrdma_pd *pd = qp->pd; + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); struct pci_dev *pdev = dev->nic_info.pdev; u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1; @@ -2086,7 +2234,8 @@ static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd, static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd, struct ocrdma_qp *qp) { - struct ocrdma_dev *dev = qp->dev; + struct ocrdma_pd *pd = qp->pd; + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); struct pci_dev *pdev = dev->nic_info.pdev; dma_addr_t pa = 0; int ird_page_size = dev->attr.ird_page_size; @@ -2157,8 +2306,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs, { int status = -ENOMEM; u32 flags = 0; - struct ocrdma_dev *dev = qp->dev; struct ocrdma_pd *pd = qp->pd; + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); struct pci_dev *pdev = dev->nic_info.pdev; struct ocrdma_cq *cq; struct ocrdma_create_qp_req *cmd; @@ -2281,11 +2430,12 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, union ib_gid sgid, zgid; u32 vlan_id; u8 mac_addr[6]; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); if ((ah_attr->ah_flags & IB_AH_GRH) == 0) return -EINVAL; - if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0)) - ocrdma_init_service_level(qp->dev); + if (atomic_cmpxchg(&dev->update_sl, 1, 0)) + ocrdma_init_service_level(dev); cmd->params.tclass_sq_psn |= (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT); cmd->params.rnt_rc_sl_fl |= @@ -2296,7 +2446,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID; memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0], sizeof(cmd->params.dgid)); - status = ocrdma_query_gid(&qp->dev->ibdev, 1, + status = ocrdma_query_gid(&dev->ibdev, 1, ah_attr->grh.sgid_index, &sgid); if (status) return status; @@ -2307,7 +2457,9 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, qp->sgid_idx = ah_attr->grh.sgid_index; memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid)); - ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]); + status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]); + if (status) + return status; cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) | (mac_addr[2] << 16) | (mac_addr[3] << 24); /* convert them to LE format. */ @@ -2320,7 +2472,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; cmd->params.rnt_rc_sl_fl |= - (qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; + (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; } return 0; } @@ -2330,6 +2482,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp, struct ib_qp_attr *attrs, int attr_mask) { int status = 0; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); if (attr_mask & IB_QP_PKEY_INDEX) { cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index & @@ -2347,12 +2500,12 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp, return status; } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) { /* set the default mac address for UD, GSI QPs */ - cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] | - (qp->dev->nic_info.mac_addr[1] << 8) | - (qp->dev->nic_info.mac_addr[2] << 16) | - (qp->dev->nic_info.mac_addr[3] << 24); - cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] | - (qp->dev->nic_info.mac_addr[5] << 8); + cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] | + (dev->nic_info.mac_addr[1] << 8) | + (dev->nic_info.mac_addr[2] << 16) | + (dev->nic_info.mac_addr[3] << 24); + cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] | + (dev->nic_info.mac_addr[5] << 8); } if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) && attrs->en_sqd_async_notify) { @@ -2409,7 +2562,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp, cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID; } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { - if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) { + if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) { status = -EINVAL; goto pmtu_err; } @@ -2417,7 +2570,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp, cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID; } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { - if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) { + if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) { status = -EINVAL; goto pmtu_err; } @@ -2870,6 +3023,82 @@ done: return status; } +static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq, + int num) +{ + int i, status = -ENOMEM; + struct ocrdma_modify_eqd_req *cmd; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd)); + if (!cmd) + return status; + + ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY, + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); + + cmd->cmd.num_eq = num; + for (i = 0; i < num; i++) { + cmd->cmd.set_eqd[i].eq_id = eq[i].q.id; + cmd->cmd.set_eqd[i].phase = 0; + cmd->cmd.set_eqd[i].delay_multiplier = + (eq[i].aic_obj.prev_eqd * 65)/100; + } + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; +mbx_err: + kfree(cmd); + return status; +} + +static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq, + int num) +{ + int num_eqs, i = 0; + if (num > 8) { + while (num) { + num_eqs = min(num, 8); + ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs); + i += num_eqs; + num -= num_eqs; + } + } else { + ocrdma_mbx_modify_eqd(dev, eq, num); + } + return 0; +} + +void ocrdma_eqd_set_task(struct work_struct *work) +{ + struct ocrdma_dev *dev = + container_of(work, struct ocrdma_dev, eqd_work.work); + struct ocrdma_eq *eq = 0; + int i, num = 0, status = -EINVAL; + u64 eq_intr; + + for (i = 0; i < dev->eq_cnt; i++) { + eq = &dev->eq_tbl[i]; + if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) { + eq_intr = eq->aic_obj.eq_intr_cnt - + eq->aic_obj.prev_eq_intr_cnt; + if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) && + (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) { + eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD; + num++; + } else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) && + (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) { + eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD; + num++; + } + } + eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt; + } + + if (num) + status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num); + schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000)); +} + int ocrdma_init_hw(struct ocrdma_dev *dev) { int status; @@ -2915,6 +3144,7 @@ qpeq_err: void ocrdma_cleanup_hw(struct ocrdma_dev *dev) { + ocrdma_free_pd_pool(dev); ocrdma_mbx_delete_ah_tbl(dev); /* cleanup the eqs */ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h index 6eed8f191322..e905972fceb7 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h @@ -136,5 +136,7 @@ int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq); int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset); char *port_speed_string(struct ocrdma_dev *dev); void ocrdma_init_service_level(struct ocrdma_dev *); +void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev); +void ocrdma_free_pd_range(struct ocrdma_dev *dev); #endif /* __OCRDMA_HW_H__ */ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index b0b2257b8e04..7a2b59aca004 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -239,7 +239,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) dev->ibdev.node_type = RDMA_NODE_IB_CA; dev->ibdev.phys_port_cnt = 1; - dev->ibdev.num_comp_vectors = 1; + dev->ibdev.num_comp_vectors = dev->eq_cnt; /* mandatory verbs. */ dev->ibdev.query_device = ocrdma_query_device; @@ -329,6 +329,8 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev) if (dev->stag_arr == NULL) goto alloc_err; + ocrdma_alloc_pd_pool(dev); + spin_lock_init(&dev->av_tbl.lock); spin_lock_init(&dev->flush_q_lock); return 0; @@ -491,6 +493,9 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) spin_unlock(&ocrdma_devlist_lock); /* Init stats */ ocrdma_add_port_stats(dev); + /* Interrupt Moderation */ + INIT_DELAYED_WORK(&dev->eqd_work, ocrdma_eqd_set_task); + schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000)); pr_info("%s %s: %s \"%s\" port %d\n", dev_name(&dev->nic_info.pdev->dev), hca_name(dev), @@ -528,11 +533,12 @@ static void ocrdma_remove(struct ocrdma_dev *dev) /* first unregister with stack to stop all the active traffic * of the registered clients. */ - ocrdma_rem_port_stats(dev); + cancel_delayed_work_sync(&dev->eqd_work); ocrdma_remove_sysfiles(dev); - ib_unregister_device(&dev->ibdev); + ocrdma_rem_port_stats(dev); + spin_lock(&ocrdma_devlist_lock); list_del_rcu(&dev->entry); spin_unlock(&ocrdma_devlist_lock); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index 4e036480c1a8..243c87c8bd65 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h @@ -75,6 +75,8 @@ enum { OCRDMA_CMD_DESTROY_RBQ = 26, OCRDMA_CMD_GET_RDMA_STATS = 27, + OCRDMA_CMD_ALLOC_PD_RANGE = 28, + OCRDMA_CMD_DEALLOC_PD_RANGE = 29, OCRDMA_CMD_MAX }; @@ -87,6 +89,7 @@ enum { OCRDMA_CMD_CREATE_MQ = 21, OCRDMA_CMD_GET_CTRL_ATTRIBUTES = 32, OCRDMA_CMD_GET_FW_VER = 35, + OCRDMA_CMD_MODIFY_EQ_DELAY = 41, OCRDMA_CMD_DELETE_MQ = 53, OCRDMA_CMD_DELETE_CQ = 54, OCRDMA_CMD_DELETE_EQ = 55, @@ -101,7 +104,7 @@ enum { QTYPE_MCCQ = 3 }; -#define OCRDMA_MAX_SGID 8 +#define OCRDMA_MAX_SGID 16 #define OCRDMA_MAX_QP 2048 #define OCRDMA_MAX_CQ 2048 @@ -314,6 +317,29 @@ struct ocrdma_create_eq_rsp { #define OCRDMA_EQ_MINOR_OTHER 0x1 +struct ocrmda_set_eqd { + u32 eq_id; + u32 phase; + u32 delay_multiplier; +}; + +struct ocrdma_modify_eqd_cmd { + struct ocrdma_mbx_hdr req; + u32 num_eq; + struct ocrmda_set_eqd set_eqd[8]; +} __packed; + +struct ocrdma_modify_eqd_req { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_modify_eqd_cmd cmd; +}; + + +struct ocrdma_modify_eq_delay_rsp { + struct ocrdma_mbx_rsp hdr; + u32 rsvd0; +} __packed; + enum { OCRDMA_MCQE_STATUS_SHIFT = 0, OCRDMA_MCQE_STATUS_MASK = 0xFFFF, @@ -441,7 +467,9 @@ enum OCRDMA_ASYNC_EVENT_TYPE { OCRDMA_DEVICE_FATAL_EVENT = 0x08, OCRDMA_SRQCAT_ERROR = 0x0E, OCRDMA_SRQ_LIMIT_EVENT = 0x0F, - OCRDMA_QP_LAST_WQE_EVENT = 0x10 + OCRDMA_QP_LAST_WQE_EVENT = 0x10, + + OCRDMA_MAX_ASYNC_ERRORS }; /* mailbox command request and responses */ @@ -1297,6 +1325,37 @@ struct ocrdma_dealloc_pd_rsp { struct ocrdma_mbx_rsp rsp; }; +struct ocrdma_alloc_pd_range { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + u32 enable_dpp_rsvd; + u32 pd_count; +}; + +struct ocrdma_alloc_pd_range_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; + u32 dpp_page_pdid; + u32 pd_count; +}; + +enum { + OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK = 0xFFFF, +}; + +struct ocrdma_dealloc_pd_range { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + u32 start_pd_id; + u32 pd_count; +}; + +struct ocrdma_dealloc_pd_range_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + u32 rsvd; +}; + enum { OCRDMA_ADDR_CHECK_ENABLE = 1, OCRDMA_ADDR_CHECK_DISABLE = 0 @@ -1597,7 +1656,9 @@ enum OCRDMA_CQE_STATUS { OCRDMA_CQE_INV_EEC_STATE_ERR, OCRDMA_CQE_FATAL_ERR, OCRDMA_CQE_RESP_TIMEOUT_ERR, - OCRDMA_CQE_GENERAL_ERR + OCRDMA_CQE_GENERAL_ERR, + + OCRDMA_MAX_CQE_ERR }; enum { @@ -1673,6 +1734,7 @@ enum { OCRDMA_FLAG_FENCE_R = 0x8, OCRDMA_FLAG_SOLICIT = 0x10, OCRDMA_FLAG_IMM = 0x20, + OCRDMA_FLAG_AH_VLAN_PR = 0x40, /* Stag flags */ OCRDMA_LKEY_FLAG_LOCAL_WR = 0x1, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c index 41a9aec9998d..48d7ef51aa0c 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c @@ -26,6 +26,7 @@ *******************************************************************/ #include <rdma/ib_addr.h> +#include <rdma/ib_pma.h> #include "ocrdma_stats.h" static struct dentry *ocrdma_dbgfs_dir; @@ -249,6 +250,27 @@ static char *ocrdma_rx_stats(struct ocrdma_dev *dev) return stats; } +static u64 ocrdma_sysfs_rcv_pkts(struct ocrdma_dev *dev) +{ + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats; + + return convert_to_64bit(rx_stats->roce_frames_lo, + rx_stats->roce_frames_hi) + (u64)rx_stats->roce_frame_icrc_drops + + (u64)rx_stats->roce_frame_payload_len_drops; +} + +static u64 ocrdma_sysfs_rcv_data(struct ocrdma_dev *dev) +{ + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats; + + return (convert_to_64bit(rx_stats->roce_frame_bytes_lo, + rx_stats->roce_frame_bytes_hi))/4; +} + static char *ocrdma_tx_stats(struct ocrdma_dev *dev) { char *stats = dev->stats_mem.debugfs_mem, *pcur; @@ -292,6 +314,37 @@ static char *ocrdma_tx_stats(struct ocrdma_dev *dev) return stats; } +static u64 ocrdma_sysfs_xmit_pkts(struct ocrdma_dev *dev) +{ + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats; + + return (convert_to_64bit(tx_stats->send_pkts_lo, + tx_stats->send_pkts_hi) + + convert_to_64bit(tx_stats->write_pkts_lo, tx_stats->write_pkts_hi) + + convert_to_64bit(tx_stats->read_pkts_lo, tx_stats->read_pkts_hi) + + convert_to_64bit(tx_stats->read_rsp_pkts_lo, + tx_stats->read_rsp_pkts_hi) + + convert_to_64bit(tx_stats->ack_pkts_lo, tx_stats->ack_pkts_hi)); +} + +static u64 ocrdma_sysfs_xmit_data(struct ocrdma_dev *dev) +{ + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats; + + return (convert_to_64bit(tx_stats->send_bytes_lo, + tx_stats->send_bytes_hi) + + convert_to_64bit(tx_stats->write_bytes_lo, + tx_stats->write_bytes_hi) + + convert_to_64bit(tx_stats->read_req_bytes_lo, + tx_stats->read_req_bytes_hi) + + convert_to_64bit(tx_stats->read_rsp_bytes_lo, + tx_stats->read_rsp_bytes_hi))/4; +} + static char *ocrdma_wqe_stats(struct ocrdma_dev *dev) { char *stats = dev->stats_mem.debugfs_mem, *pcur; @@ -432,10 +485,118 @@ static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev) return dev->stats_mem.debugfs_mem; } +static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev) +{ + char *stats = dev->stats_mem.debugfs_mem, *pcur; + + + memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); + + pcur = stats; + pcur += ocrdma_add_stat(stats, pcur, "async_cq_err", + (u64)(dev->async_err_stats + [OCRDMA_CQ_ERROR].counter)); + pcur += ocrdma_add_stat(stats, pcur, "async_cq_overrun_err", + (u64)dev->async_err_stats + [OCRDMA_CQ_OVERRUN_ERROR].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_cq_qpcat_err", + (u64)dev->async_err_stats + [OCRDMA_CQ_QPCAT_ERROR].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_qp_access_err", + (u64)dev->async_err_stats + [OCRDMA_QP_ACCESS_ERROR].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_qp_commm_est_evt", + (u64)dev->async_err_stats + [OCRDMA_QP_COMM_EST_EVENT].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_sq_drained_evt", + (u64)dev->async_err_stats + [OCRDMA_SQ_DRAINED_EVENT].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_dev_fatal_evt", + (u64)dev->async_err_stats + [OCRDMA_DEVICE_FATAL_EVENT].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_srqcat_err", + (u64)dev->async_err_stats + [OCRDMA_SRQCAT_ERROR].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_srq_limit_evt", + (u64)dev->async_err_stats + [OCRDMA_SRQ_LIMIT_EVENT].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_qp_last_wqe_evt", + (u64)dev->async_err_stats + [OCRDMA_QP_LAST_WQE_EVENT].counter); + + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_len_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_LOC_LEN_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_qp_op_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_LOC_QP_OP_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_eec_op_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_LOC_EEC_OP_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_prot_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_LOC_PROT_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_wr_flush_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_WR_FLUSH_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_mw_bind_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_MW_BIND_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_bad_resp_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_BAD_RESP_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_access_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_LOC_ACCESS_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_req_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_REM_INV_REQ_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_access_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_REM_ACCESS_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_op_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_REM_OP_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_retry_exc_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_RETRY_EXC_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_rnr_retry_exc_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_RNR_RETRY_EXC_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_rdd_viol_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_LOC_RDD_VIOL_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_rd_req_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_REM_INV_RD_REQ_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_abort_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_REM_ABORT_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eecn_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_INV_EECN_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eec_state_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_INV_EEC_STATE_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_fatal_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_FATAL_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_resp_timeout_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_RESP_TIMEOUT_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_general_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_GENERAL_ERR].counter); + return stats; +} + static void ocrdma_update_stats(struct ocrdma_dev *dev) { ulong now = jiffies, secs; int status = 0; + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats; secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U; if (secs) { @@ -444,10 +605,74 @@ static void ocrdma_update_stats(struct ocrdma_dev *dev) if (status) pr_err("%s: stats mbox failed with status = %d\n", __func__, status); + /* Update PD counters from PD resource manager */ + if (dev->pd_mgr->pd_prealloc_valid) { + rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_count; + rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_count; + /* Threshold stata*/ + rsrc_stats = &rdma_stats->th_rsrc_stats; + rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_thrsh; + rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_thrsh; + } dev->last_stats_time = jiffies; } } +static ssize_t ocrdma_dbgfs_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + char tmp_str[32]; + long reset; + int status = 0; + struct ocrdma_stats *pstats = filp->private_data; + struct ocrdma_dev *dev = pstats->dev; + + if (count > 32) + goto err; + + if (copy_from_user(tmp_str, buffer, count)) + goto err; + + tmp_str[count-1] = '\0'; + if (kstrtol(tmp_str, 10, &reset)) + goto err; + + switch (pstats->type) { + case OCRDMA_RESET_STATS: + if (reset) { + status = ocrdma_mbx_rdma_stats(dev, true); + if (status) { + pr_err("Failed to reset stats = %d", status); + goto err; + } + } + break; + default: + goto err; + } + + return count; +err: + return -EFAULT; +} + +int ocrdma_pma_counters(struct ocrdma_dev *dev, + struct ib_mad *out_mad) +{ + struct ib_pma_portcounters *pma_cnt; + + memset(out_mad->data, 0, sizeof out_mad->data); + pma_cnt = (void *)(out_mad->data + 40); + ocrdma_update_stats(dev); + + pma_cnt->port_xmit_data = cpu_to_be32(ocrdma_sysfs_xmit_data(dev)); + pma_cnt->port_rcv_data = cpu_to_be32(ocrdma_sysfs_rcv_data(dev)); + pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev)); + pma_cnt->port_rcv_packets = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev)); + return 0; +} + static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer, size_t usr_buf_len, loff_t *ppos) { @@ -492,6 +717,9 @@ static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer, case OCRDMA_RX_DBG_STATS: data = ocrdma_rx_dbg_stats(dev); break; + case OCRDMA_DRV_STATS: + data = ocrdma_driver_dbg_stats(dev); + break; default: status = -EFAULT; @@ -514,6 +742,7 @@ static const struct file_operations ocrdma_dbg_ops = { .owner = THIS_MODULE, .open = simple_open, .read = ocrdma_dbgfs_ops_read, + .write = ocrdma_dbgfs_ops_write, }; void ocrdma_add_port_stats(struct ocrdma_dev *dev) @@ -582,6 +811,18 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev) &dev->rx_dbg_stats, &ocrdma_dbg_ops)) goto err; + dev->driver_stats.type = OCRDMA_DRV_STATS; + dev->driver_stats.dev = dev; + if (!debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir, + &dev->driver_stats, &ocrdma_dbg_ops)) + goto err; + + dev->reset_stats.type = OCRDMA_RESET_STATS; + dev->reset_stats.dev = dev; + if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir, + &dev->reset_stats, &ocrdma_dbg_ops)) + goto err; + /* Now create dma_mem for stats mbx command */ if (!ocrdma_alloc_stats_mem(dev)) goto err; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h index 5f5e20c46d7c..091edd68a8a3 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h @@ -43,12 +43,16 @@ enum OCRDMA_STATS_TYPE { OCRDMA_RXQP_ERRSTATS, OCRDMA_TXQP_ERRSTATS, OCRDMA_TX_DBG_STATS, - OCRDMA_RX_DBG_STATS + OCRDMA_RX_DBG_STATS, + OCRDMA_DRV_STATS, + OCRDMA_RESET_STATS }; void ocrdma_rem_debugfs(void); void ocrdma_init_debugfs(void); void ocrdma_rem_port_stats(struct ocrdma_dev *dev); void ocrdma_add_port_stats(struct ocrdma_dev *dev); +int ocrdma_pma_counters(struct ocrdma_dev *dev, + struct ib_mad *out_mad); #endif /* __OCRDMA_STATS_H__ */ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index fb8d8c4dfbb9..877175563634 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port, dev = get_ocrdma_dev(ibdev); memset(sgid, 0, sizeof(*sgid)); - if (index > OCRDMA_MAX_SGID) + if (index >= OCRDMA_MAX_SGID) return -EINVAL; memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); @@ -253,6 +253,107 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, return found; } + +static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool) +{ + u16 pd_bitmap_idx = 0; + const unsigned long *pd_bitmap; + + if (dpp_pool) { + pd_bitmap = dev->pd_mgr->pd_dpp_bitmap; + pd_bitmap_idx = find_first_zero_bit(pd_bitmap, + dev->pd_mgr->max_dpp_pd); + __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap); + dev->pd_mgr->pd_dpp_count++; + if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh) + dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count; + } else { + pd_bitmap = dev->pd_mgr->pd_norm_bitmap; + pd_bitmap_idx = find_first_zero_bit(pd_bitmap, + dev->pd_mgr->max_normal_pd); + __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap); + dev->pd_mgr->pd_norm_count++; + if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh) + dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count; + } + return pd_bitmap_idx; +} + +static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id, + bool dpp_pool) +{ + u16 pd_count; + u16 pd_bit_index; + + pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count : + dev->pd_mgr->pd_norm_count; + if (pd_count == 0) + return -EINVAL; + + if (dpp_pool) { + pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start; + if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) { + return -EINVAL; + } else { + __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap); + dev->pd_mgr->pd_dpp_count--; + } + } else { + pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start; + if (pd_bit_index >= dev->pd_mgr->max_normal_pd) { + return -EINVAL; + } else { + __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap); + dev->pd_mgr->pd_norm_count--; + } + } + + return 0; +} + +static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id, + bool dpp_pool) +{ + int status; + + mutex_lock(&dev->dev_lock); + status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool); + mutex_unlock(&dev->dev_lock); + return status; +} + +static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd) +{ + u16 pd_idx = 0; + int status = 0; + + mutex_lock(&dev->dev_lock); + if (pd->dpp_enabled) { + /* try allocating DPP PD, if not available then normal PD */ + if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) { + pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true); + pd->id = dev->pd_mgr->pd_dpp_start + pd_idx; + pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx; + } else if (dev->pd_mgr->pd_norm_count < + dev->pd_mgr->max_normal_pd) { + pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); + pd->id = dev->pd_mgr->pd_norm_start + pd_idx; + pd->dpp_enabled = false; + } else { + status = -EINVAL; + } + } else { + if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) { + pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); + pd->id = dev->pd_mgr->pd_norm_start + pd_idx; + } else { + status = -EINVAL; + } + } + mutex_unlock(&dev->dev_lock); + return status; +} + static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_ucontext *uctx, struct ib_udata *udata) @@ -272,6 +373,11 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, dev->attr.wqe_size) : 0; } + if (dev->pd_mgr->pd_prealloc_valid) { + status = ocrdma_get_pd_num(dev, pd); + return (status == 0) ? pd : ERR_PTR(status); + } + retry: status = ocrdma_mbx_alloc_pd(dev, pd); if (status) { @@ -299,7 +405,11 @@ static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev, { int status = 0; - status = ocrdma_mbx_dealloc_pd(dev, pd); + if (dev->pd_mgr->pd_prealloc_valid) + status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled); + else + status = ocrdma_mbx_dealloc_pd(dev, pd); + kfree(pd); return status; } @@ -325,7 +435,6 @@ err: static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) { - int status = 0; struct ocrdma_pd *pd = uctx->cntxt_pd; struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); @@ -334,8 +443,8 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) __func__, dev->id, pd->id); } uctx->cntxt_pd = NULL; - status = _ocrdma_dealloc_pd(dev, pd); - return status; + (void)_ocrdma_dealloc_pd(dev, pd); + return 0; } static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) @@ -569,7 +678,7 @@ err: if (is_uctx_pd) { ocrdma_release_ucontext_pd(uctx); } else { - status = ocrdma_mbx_dealloc_pd(dev, pd); + status = _ocrdma_dealloc_pd(dev, pd); kfree(pd); } exit: @@ -837,9 +946,8 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr) { struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); - int status; - status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); + (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); @@ -850,11 +958,10 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr) /* Don't stop cleanup, in case FW is unresponsive */ if (dev->mqe_ctx.fw_error_state) { - status = 0; pr_err("%s(%d) fw not responding.\n", __func__, dev->id); } - return status; + return 0; } static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, @@ -986,7 +1093,6 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq) int ocrdma_destroy_cq(struct ib_cq *ibcq) { - int status; struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); struct ocrdma_eq *eq = NULL; struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); @@ -1003,7 +1109,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq) synchronize_irq(irq); ocrdma_flush_cq(cq); - status = ocrdma_mbx_destroy_cq(dev, cq); + (void)ocrdma_mbx_destroy_cq(dev, cq); if (cq->ucontext) { pdid = cq->ucontext->cntxt_pd->id; ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, @@ -1014,7 +1120,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq) } kfree(cq); - return status; + return 0; } static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) @@ -1113,8 +1219,8 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, int status = 0; u64 usr_db; struct ocrdma_create_qp_uresp uresp; - struct ocrdma_dev *dev = qp->dev; struct ocrdma_pd *pd = qp->pd; + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); memset(&uresp, 0, sizeof(uresp)); usr_db = dev->nic_info.unmapped_db + @@ -1253,7 +1359,6 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd, status = -ENOMEM; goto gen_err; } - qp->dev = dev; ocrdma_set_qp_init_params(qp, pd, attrs); if (udata == NULL) qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | @@ -1312,7 +1417,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, enum ib_qp_state old_qps; qp = get_ocrdma_qp(ibqp); - dev = qp->dev; + dev = get_ocrdma_dev(ibqp->device); if (attr_mask & IB_QP_STATE) status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps); /* if new and previous states are same hw doesn't need to @@ -1335,7 +1440,7 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, enum ib_qp_state old_qps, new_qps; qp = get_ocrdma_qp(ibqp); - dev = qp->dev; + dev = get_ocrdma_dev(ibqp->device); /* syncronize with multiple context trying to change, retrive qps */ mutex_lock(&dev->dev_lock); @@ -1402,7 +1507,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp, u32 qp_state; struct ocrdma_qp_params params; struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); - struct ocrdma_dev *dev = qp->dev; + struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device); memset(¶ms, 0, sizeof(params)); mutex_lock(&dev->dev_lock); @@ -1412,8 +1517,6 @@ int ocrdma_query_qp(struct ib_qp *ibqp, goto mbx_err; if (qp->qp_type == IB_QPT_UD) qp_attr->qkey = params.qkey; - qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT); - qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT); qp_attr->path_mtu = ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx & OCRDMA_QP_PARAMS_PATH_MTU_MASK) >> @@ -1468,6 +1571,8 @@ int ocrdma_query_qp(struct ib_qp *ibqp, memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >> OCRDMA_QP_PARAMS_STATE_SHIFT; + qp_attr->qp_state = get_ibqp_state(qp_state); + qp_attr->cur_qp_state = qp_attr->qp_state; qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0; qp_attr->max_dest_rd_atomic = params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT; @@ -1475,19 +1580,18 @@ int ocrdma_query_qp(struct ib_qp *ibqp, params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK; qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0; + /* Sync driver QP state with FW */ + ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL); mbx_err: return status; } -static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx) +static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx) { - int i = idx / 32; - unsigned int mask = (1 << (idx % 32)); + unsigned int i = idx / 32; + u32 mask = (1U << (idx % 32)); - if (srq->idx_bit_fields[i] & mask) - srq->idx_bit_fields[i] &= ~mask; - else - srq->idx_bit_fields[i] |= mask; + srq->idx_bit_fields[i] ^= mask; } static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) @@ -1596,7 +1700,7 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp) { int found = false; unsigned long flags; - struct ocrdma_dev *dev = qp->dev; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); /* sync with any active CQ poll */ spin_lock_irqsave(&dev->flush_q_lock, flags); @@ -1613,7 +1717,6 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp) int ocrdma_destroy_qp(struct ib_qp *ibqp) { - int status; struct ocrdma_pd *pd; struct ocrdma_qp *qp; struct ocrdma_dev *dev; @@ -1622,7 +1725,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp) unsigned long flags; qp = get_ocrdma_qp(ibqp); - dev = qp->dev; + dev = get_ocrdma_dev(ibqp->device); attrs.qp_state = IB_QPS_ERR; pd = qp->pd; @@ -1635,7 +1738,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp) * discarded until the old CQEs are discarded. */ mutex_lock(&dev->dev_lock); - status = ocrdma_mbx_destroy_qp(dev, qp); + (void) ocrdma_mbx_destroy_qp(dev, qp); /* * acquire CQ lock while destroy is in progress, in order to @@ -1670,7 +1773,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp) kfree(qp->wqe_wr_id_tbl); kfree(qp->rqe_wr_id_tbl); kfree(qp); - return status; + return 0; } static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, @@ -1831,6 +1934,8 @@ static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp, else ud_hdr->qkey = wr->wr.ud.remote_qkey; ud_hdr->rsvd_ahid = ah->id; + if (ah->av->valid & OCRDMA_AV_VLAN_VALID) + hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT); } static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr, @@ -2007,11 +2112,12 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, u64 fbo; struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); struct ocrdma_mr *mr; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr); wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); - if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr) + if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr) return -EINVAL; hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); @@ -2039,7 +2145,7 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, fast_reg->size_sge = get_encoded_page_size(1 << wr->wr.fast_reg.page_shift); mr = (struct ocrdma_mr *) (unsigned long) - qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; + dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); return 0; } @@ -2112,8 +2218,6 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); status = ocrdma_build_write(qp, hdr, wr); break; - case IB_WR_RDMA_READ_WITH_INV: - hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); case IB_WR_RDMA_READ: ocrdma_build_read(qp, hdr, wr); break; @@ -2484,8 +2588,11 @@ static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp, bool *polled, bool *stop) { bool expand; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); int status = (le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; + if (status < OCRDMA_MAX_CQE_ERR) + atomic_inc(&dev->cqe_err_stats[status]); /* when hw sq is empty, but rq is not empty, so we continue * to keep the cqe in order to get the cq event again. @@ -2604,6 +2711,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, int status) { bool expand; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); + + if (status < OCRDMA_MAX_CQE_ERR) + atomic_inc(&dev->cqe_err_stats[status]); /* when hw_rq is empty, but wq is not empty, so continue * to keep the cqe to get the cq event again. diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index c00ae093b6f8..ffd48bfc4923 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -1082,12 +1082,6 @@ struct qib_devdata { /* control high-level access to EEPROM */ struct mutex eep_lock; uint64_t traffic_wds; - /* active time is kept in seconds, but logged in hours */ - atomic_t active_time; - /* Below are nominal shadow of EEPROM, new since last EEPROM update */ - uint8_t eep_st_errs[QIB_EEP_LOG_CNT]; - uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT]; - uint16_t eep_hrs; /* * masks for which bits of errs, hwerrs that cause * each of the counters to increment. @@ -1309,8 +1303,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer, int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr, const void *buffer, int len); void qib_get_eeprom_info(struct qib_devdata *); -int qib_update_eeprom_log(struct qib_devdata *dd); -void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr); +#define qib_inc_eeprom_err(dd, eidx, incr) void qib_dump_lookup_output_queue(struct qib_devdata *); void qib_force_pio_avail_update(struct qib_devdata *); void qib_clear_symerror_on_linkup(unsigned long opaque); @@ -1467,11 +1460,14 @@ const char *qib_get_unit_name(int unit); * Flush write combining store buffers (if present) and perform a write * barrier. */ +static inline void qib_flush_wc(void) +{ #if defined(CONFIG_X86_64) -#define qib_flush_wc() asm volatile("sfence" : : : "memory") + asm volatile("sfence" : : : "memory"); #else -#define qib_flush_wc() wmb() /* no reorder around wc flush */ + wmb(); /* no reorder around wc flush */ #endif +} /* global module parameter variables */ extern unsigned qib_ibmtu; diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h index 5670ace27c63..4fb78abd8ba1 100644 --- a/drivers/infiniband/hw/qib/qib_common.h +++ b/drivers/infiniband/hw/qib/qib_common.h @@ -257,7 +257,7 @@ struct qib_base_info { /* shared memory page for send buffer disarm status */ __u64 spi_sendbuf_status; -} __attribute__ ((aligned(8))); +} __aligned(8); /* * This version number is given to the driver by the user code during @@ -361,7 +361,7 @@ struct qib_user_info { */ __u64 spu_base_info; -} __attribute__ ((aligned(8))); +} __aligned(8); /* User commands. */ diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c index 6abd3ed3cd51..5e75b43c596b 100644 --- a/drivers/infiniband/hw/qib/qib_debugfs.c +++ b/drivers/infiniband/hw/qib/qib_debugfs.c @@ -255,7 +255,6 @@ void qib_dbg_ibdev_init(struct qib_ibdev *ibd) DEBUGFS_FILE_CREATE(opcode_stats); DEBUGFS_FILE_CREATE(ctx_stats); DEBUGFS_FILE_CREATE(qp_stats); - return; } void qib_dbg_ibdev_exit(struct qib_ibdev *ibd) diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c index 5dfda4c5cc9c..8c34b23e5bf6 100644 --- a/drivers/infiniband/hw/qib/qib_diag.c +++ b/drivers/infiniband/hw/qib/qib_diag.c @@ -85,7 +85,7 @@ static struct qib_diag_client *get_client(struct qib_devdata *dd) client_pool = dc->next; else /* None in pool, alloc and init */ - dc = kmalloc(sizeof *dc, GFP_KERNEL); + dc = kmalloc(sizeof(*dc), GFP_KERNEL); if (dc) { dc->next = NULL; @@ -257,6 +257,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset, if (dd->userbase) { /* If user regs mapped, they are after send, so set limit. */ u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase; + if (!dd->piovl15base) snd_lim = dd->uregbase; krb32 = (u32 __iomem *)dd->userbase; @@ -280,6 +281,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset, snd_bottom = dd->pio2k_bufbase; if (snd_lim == 0) { u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign); + snd_lim = snd_bottom + tot2k; } /* If 4k buffers exist, account for them by bumping @@ -398,6 +400,7 @@ static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs, /* not very efficient, but it works for now */ while (reg_addr < reg_end) { u64 data; + if (copy_from_user(&data, uaddr, sizeof(data))) { ret = -EFAULT; goto bail; @@ -698,7 +701,7 @@ int qib_register_observer(struct qib_devdata *dd, if (!dd || !op) return -EINVAL; - olp = vmalloc(sizeof *olp); + olp = vmalloc(sizeof(*olp)); if (!olp) { pr_err("vmalloc for observer failed\n"); return -ENOMEM; @@ -796,6 +799,7 @@ static ssize_t qib_diag_read(struct file *fp, char __user *data, op = diag_get_observer(dd, *off); if (op) { u32 offset = *off; + ret = op->hook(dd, op, offset, &data64, 0, use_32); } /* @@ -873,6 +877,7 @@ static ssize_t qib_diag_write(struct file *fp, const char __user *data, if (count == 4 || count == 8) { u64 data64; u32 offset = *off; + ret = copy_from_user(&data64, data, count); if (ret) { ret = -EFAULT; diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index 5bee08f16d74..f58fdc3d25a2 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c @@ -86,7 +86,7 @@ const char *qib_get_unit_name(int unit) { static char iname[16]; - snprintf(iname, sizeof iname, "infinipath%u", unit); + snprintf(iname, sizeof(iname), "infinipath%u", unit); return iname; } @@ -349,6 +349,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; if (qp_num != QIB_MULTICAST_QPN) { int ruc_res; + qp = qib_lookup_qpn(ibp, qp_num); if (!qp) goto drop; @@ -461,6 +462,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; if (dd->flags & QIB_NODMA_RTAIL) { u32 seq = qib_hdrget_seq(rhf_addr); + if (seq != rcd->seq_cnt) goto bail; hdrqtail = 0; @@ -651,6 +653,7 @@ bail: int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc) { struct qib_devdata *dd = ppd->dd; + ppd->lid = lid; ppd->lmc = lmc; diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c index 4d5d71aaa2b4..311ee6c3dd5e 100644 --- a/drivers/infiniband/hw/qib/qib_eeprom.c +++ b/drivers/infiniband/hw/qib/qib_eeprom.c @@ -153,6 +153,7 @@ void qib_get_eeprom_info(struct qib_devdata *dd) if (t && dd0->nguid > 1 && t <= dd0->nguid) { u8 oguid; + dd->base_guid = dd0->base_guid; bguid = (u8 *) &dd->base_guid; @@ -251,206 +252,25 @@ void qib_get_eeprom_info(struct qib_devdata *dd) * This board has a Serial-prefix, which is stored * elsewhere for backward-compatibility. */ - memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix); - snp[sizeof ifp->if_sprefix] = '\0'; + memcpy(snp, ifp->if_sprefix, sizeof(ifp->if_sprefix)); + snp[sizeof(ifp->if_sprefix)] = '\0'; len = strlen(snp); snp += len; - len = (sizeof dd->serial) - len; - if (len > sizeof ifp->if_serial) - len = sizeof ifp->if_serial; + len = sizeof(dd->serial) - len; + if (len > sizeof(ifp->if_serial)) + len = sizeof(ifp->if_serial); memcpy(snp, ifp->if_serial, len); - } else - memcpy(dd->serial, ifp->if_serial, - sizeof ifp->if_serial); + } else { + memcpy(dd->serial, ifp->if_serial, sizeof(ifp->if_serial)); + } if (!strstr(ifp->if_comment, "Tested successfully")) qib_dev_err(dd, "Board SN %s did not pass functional test: %s\n", dd->serial, ifp->if_comment); - memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT); - /* - * Power-on (actually "active") hours are kept as little-endian value - * in EEPROM, but as seconds in a (possibly as small as 24-bit) - * atomic_t while running. - */ - atomic_set(&dd->active_time, 0); - dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8); - done: vfree(buf); bail:; } -/** - * qib_update_eeprom_log - copy active-time and error counters to eeprom - * @dd: the qlogic_ib device - * - * Although the time is kept as seconds in the qib_devdata struct, it is - * rounded to hours for re-write, as we have only 16 bits in EEPROM. - * First-cut code reads whole (expected) struct qib_flash, modifies, - * re-writes. Future direction: read/write only what we need, assuming - * that the EEPROM had to have been "good enough" for driver init, and - * if not, we aren't making it worse. - * - */ -int qib_update_eeprom_log(struct qib_devdata *dd) -{ - void *buf; - struct qib_flash *ifp; - int len, hi_water; - uint32_t new_time, new_hrs; - u8 csum; - int ret, idx; - unsigned long flags; - - /* first, check if we actually need to do anything. */ - ret = 0; - for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { - if (dd->eep_st_new_errs[idx]) { - ret = 1; - break; - } - } - new_time = atomic_read(&dd->active_time); - - if (ret == 0 && new_time < 3600) - goto bail; - - /* - * The quick-check above determined that there is something worthy - * of logging, so get current contents and do a more detailed idea. - * read full flash, not just currently used part, since it may have - * been written with a newer definition - */ - len = sizeof(struct qib_flash); - buf = vmalloc(len); - ret = 1; - if (!buf) { - qib_dev_err(dd, - "Couldn't allocate memory to read %u bytes from eeprom for logging\n", - len); - goto bail; - } - - /* Grab semaphore and read current EEPROM. If we get an - * error, let go, but if not, keep it until we finish write. - */ - ret = mutex_lock_interruptible(&dd->eep_lock); - if (ret) { - qib_dev_err(dd, "Unable to acquire EEPROM for logging\n"); - goto free_bail; - } - ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len); - if (ret) { - mutex_unlock(&dd->eep_lock); - qib_dev_err(dd, "Unable read EEPROM for logging\n"); - goto free_bail; - } - ifp = (struct qib_flash *)buf; - - csum = flash_csum(ifp, 0); - if (csum != ifp->if_csum) { - mutex_unlock(&dd->eep_lock); - qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n", - csum, ifp->if_csum); - ret = 1; - goto free_bail; - } - hi_water = 0; - spin_lock_irqsave(&dd->eep_st_lock, flags); - for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { - int new_val = dd->eep_st_new_errs[idx]; - if (new_val) { - /* - * If we have seen any errors, add to EEPROM values - * We need to saturate at 0xFF (255) and we also - * would need to adjust the checksum if we were - * trying to minimize EEPROM traffic - * Note that we add to actual current count in EEPROM, - * in case it was altered while we were running. - */ - new_val += ifp->if_errcntp[idx]; - if (new_val > 0xFF) - new_val = 0xFF; - if (ifp->if_errcntp[idx] != new_val) { - ifp->if_errcntp[idx] = new_val; - hi_water = offsetof(struct qib_flash, - if_errcntp) + idx; - } - /* - * update our shadow (used to minimize EEPROM - * traffic), to match what we are about to write. - */ - dd->eep_st_errs[idx] = new_val; - dd->eep_st_new_errs[idx] = 0; - } - } - /* - * Now update active-time. We would like to round to the nearest hour - * but unless atomic_t are sure to be proper signed ints we cannot, - * because we need to account for what we "transfer" to EEPROM and - * if we log an hour at 31 minutes, then we would need to set - * active_time to -29 to accurately count the _next_ hour. - */ - if (new_time >= 3600) { - new_hrs = new_time / 3600; - atomic_sub((new_hrs * 3600), &dd->active_time); - new_hrs += dd->eep_hrs; - if (new_hrs > 0xFFFF) - new_hrs = 0xFFFF; - dd->eep_hrs = new_hrs; - if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) { - ifp->if_powerhour[0] = new_hrs & 0xFF; - hi_water = offsetof(struct qib_flash, if_powerhour); - } - if ((new_hrs >> 8) != ifp->if_powerhour[1]) { - ifp->if_powerhour[1] = new_hrs >> 8; - hi_water = offsetof(struct qib_flash, if_powerhour) + 1; - } - } - /* - * There is a tiny possibility that we could somehow fail to write - * the EEPROM after updating our shadows, but problems from holding - * the spinlock too long are a much bigger issue. - */ - spin_unlock_irqrestore(&dd->eep_st_lock, flags); - if (hi_water) { - /* we made some change to the data, uopdate cksum and write */ - csum = flash_csum(ifp, 1); - ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1); - } - mutex_unlock(&dd->eep_lock); - if (ret) - qib_dev_err(dd, "Failed updating EEPROM\n"); - -free_bail: - vfree(buf); -bail: - return ret; -} - -/** - * qib_inc_eeprom_err - increment one of the four error counters - * that are logged to EEPROM. - * @dd: the qlogic_ib device - * @eidx: 0..3, the counter to increment - * @incr: how much to add - * - * Each counter is 8-bits, and saturates at 255 (0xFF). They - * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log() - * is called, but it can only be called in a context that allows sleep. - * This function can be called even at interrupt level. - */ -void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr) -{ - uint new_val; - unsigned long flags; - - spin_lock_irqsave(&dd->eep_st_lock, flags); - new_val = dd->eep_st_new_errs[eidx] + incr; - if (new_val > 255) - new_val = 255; - dd->eep_st_new_errs[eidx] = new_val; - spin_unlock_irqrestore(&dd->eep_st_lock, flags); -} diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index b15e34eeef68..41937c6f888a 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -351,9 +351,10 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp, * unless perhaps the user has mpin'ed the pages * themselves. */ - qib_devinfo(dd->pcidev, - "Failed to lock addr %p, %u pages: " - "errno %d\n", (void *) vaddr, cnt, -ret); + qib_devinfo( + dd->pcidev, + "Failed to lock addr %p, %u pages: errno %d\n", + (void *) vaddr, cnt, -ret); goto done; } for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { @@ -437,7 +438,7 @@ cleanup: goto cleanup; } if (copy_to_user((void __user *) (unsigned long) ti->tidmap, - tidmap, sizeof tidmap)) { + tidmap, sizeof(tidmap))) { ret = -EFAULT; goto cleanup; } @@ -484,7 +485,7 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt, } if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap, - sizeof tidmap)) { + sizeof(tidmap))) { ret = -EFAULT; goto done; } @@ -951,8 +952,8 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, /* rcvegrbufs are read-only on the slave */ if (vma->vm_flags & VM_WRITE) { qib_devinfo(dd->pcidev, - "Can't map eager buffers as " - "writable (flags=%lx)\n", vma->vm_flags); + "Can't map eager buffers as writable (flags=%lx)\n", + vma->vm_flags); ret = -EPERM; goto bail; } @@ -1185,6 +1186,7 @@ static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd) */ if (weight >= qib_cpulist_count) { int cpu; + cpu = find_first_zero_bit(qib_cpulist, qib_cpulist_count); if (cpu == qib_cpulist_count) @@ -1247,10 +1249,7 @@ static int init_subctxts(struct qib_devdata *dd, if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16, uinfo->spu_userversion & 0xffff)) { qib_devinfo(dd->pcidev, - "Mismatched user version (%d.%d) and driver " - "version (%d.%d) while context sharing. Ensure " - "that driver and library are from the same " - "release.\n", + "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n", (int) (uinfo->spu_userversion >> 16), (int) (uinfo->spu_userversion & 0xffff), QIB_USER_SWMAJOR, QIB_USER_SWMINOR); @@ -1391,6 +1390,7 @@ static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port, } if (!ppd) { u32 pidx = ctxt % dd->num_pports; + if (usable(dd->pport + pidx)) ppd = dd->pport + pidx; else { @@ -1438,10 +1438,12 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo, if (alg == QIB_PORT_ALG_ACROSS) { unsigned inuse = ~0U; + /* find device (with ACTIVE ports) with fewest ctxts in use */ for (ndev = 0; ndev < devmax; ndev++) { struct qib_devdata *dd = qib_lookup(ndev); unsigned cused = 0, cfree = 0, pusable = 0; + if (!dd) continue; if (port && port <= dd->num_pports && @@ -1471,6 +1473,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo, } else { for (ndev = 0; ndev < devmax; ndev++) { struct qib_devdata *dd = qib_lookup(ndev); + if (dd) { ret = choose_port_ctxt(fp, dd, port, uinfo); if (!ret) @@ -1556,6 +1559,7 @@ static int find_hca(unsigned int cpu, int *unit) } for (ndev = 0; ndev < devmax; ndev++) { struct qib_devdata *dd = qib_lookup(ndev); + if (dd) { if (pcibus_to_node(dd->pcidev->bus) < 0) { ret = -EINVAL; diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index 81854586c081..650897a8591e 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -106,7 +106,7 @@ static ssize_t driver_stats_read(struct file *file, char __user *buf, { qib_stats.sps_ints = qib_sps_ints(); return simple_read_from_buffer(buf, count, ppos, &qib_stats, - sizeof qib_stats); + sizeof(qib_stats)); } /* @@ -133,7 +133,7 @@ static ssize_t driver_names_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return simple_read_from_buffer(buf, count, ppos, qib_statnames, - sizeof qib_statnames - 1); /* no null */ + sizeof(qib_statnames) - 1); /* no null */ } static const struct file_operations driver_ops[] = { @@ -379,7 +379,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) int ret, i; /* create the per-unit directory */ - snprintf(unit, sizeof unit, "%u", dd->unit); + snprintf(unit, sizeof(unit), "%u", dd->unit); ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir, &simple_dir_operations, dd); if (ret) { @@ -455,7 +455,7 @@ static int remove_file(struct dentry *parent, char *name) } spin_lock(&tmp->d_lock); - if (!(d_unhashed(tmp) && tmp->d_inode)) { + if (!d_unhashed(tmp) && tmp->d_inode) { __d_drop(tmp); spin_unlock(&tmp->d_lock); simple_unlink(parent->d_inode, tmp); @@ -482,7 +482,7 @@ static int remove_device_files(struct super_block *sb, root = dget(sb->s_root); mutex_lock(&root->d_inode->i_mutex); - snprintf(unit, sizeof unit, "%u", dd->unit); + snprintf(unit, sizeof(unit), "%u", dd->unit); dir = lookup_one_len(unit, root, strlen(unit)); if (IS_ERR(dir)) { @@ -560,6 +560,7 @@ static struct dentry *qibfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct dentry *ret; + ret = mount_single(fs_type, flags, data, qibfs_fill_super); if (!IS_ERR(ret)) qib_super = ret->d_sb; diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index d68266ac7619..0d2ba59af30a 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -333,6 +333,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd, enum qib_ureg regno, u64 value, int ctxt) { u64 __iomem *ubase; + if (dd->userbase) ubase = (u64 __iomem *) ((char __iomem *) dd->userbase + @@ -834,14 +835,14 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg, bits = (u32) ((hwerrs >> QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) & QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK); - snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, + snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf), "[PCIe Mem Parity Errs %x] ", bits); strlcat(msg, bitsmsg, msgl); } if (hwerrs & _QIB_PLL_FAIL) { isfatal = 1; - snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, + snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf), "[PLL failed (%llx), InfiniPath hardware unusable]", (unsigned long long) hwerrs & _QIB_PLL_FAIL); strlcat(msg, bitsmsg, msgl); @@ -1014,7 +1015,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs) /* do these first, they are most important */ if (errs & ERR_MASK(HardwareErr)) - qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); + qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf)); else for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) if (errs & dd->eep_st_masks[log_idx].errs_to_log) @@ -1062,7 +1063,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs) */ mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr); - qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask); + qib_decode_6120_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask); if (errs & E_SUM_PKTERRS) qib_stats.sps_rcverrs++; @@ -1670,6 +1671,7 @@ static irqreturn_t qib_6120intr(int irq, void *data) } if (crcs) { u32 cntr = dd->cspec->lli_counter; + cntr += crcs; if (cntr) { if (cntr > dd->cspec->lli_thresh) { @@ -1722,6 +1724,7 @@ static void qib_setup_6120_interrupt(struct qib_devdata *dd) "irq is 0, BIOS error? Interrupts won't work\n"); else { int ret; + ret = request_irq(dd->cspec->irq, qib_6120intr, 0, QIB_DRV_NAME, dd); if (ret) @@ -2681,8 +2684,6 @@ static void qib_get_6120_faststats(unsigned long opaque) spin_lock_irqsave(&dd->eep_st_lock, flags); traffic_wds -= dd->traffic_wds; dd->traffic_wds += traffic_wds; - if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) - atomic_add(5, &dd->active_time); /* S/B #define */ spin_unlock_irqrestore(&dd->eep_st_lock, flags); qib_chk_6120_errormask(dd); @@ -2929,6 +2930,7 @@ bail: static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what) { int ret = 0; + if (!strncmp(what, "ibc", 3)) { ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback); qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", @@ -3170,6 +3172,7 @@ static void get_6120_chip_params(struct qib_devdata *dd) static void set_6120_baseaddrs(struct qib_devdata *dd) { u32 cregbase; + cregbase = qib_read_kreg32(dd, kr_counterregbase); dd->cspec->cregbase = (u64 __iomem *) ((char __iomem *) dd->kregbase + cregbase); diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index 7dec89fdc124..22affda8af88 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -902,7 +902,8 @@ static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs) errs &= QLOGIC_IB_E_SDMAERRS; msg = dd->cspec->sdmamsgbuf; - qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf); + qib_decode_7220_sdma_errs(ppd, errs, msg, + sizeof(dd->cspec->sdmamsgbuf)); spin_lock_irqsave(&ppd->sdma_lock, flags); if (errs & ERR_MASK(SendBufMisuseErr)) { @@ -1043,6 +1044,7 @@ done: static void reenable_7220_chase(unsigned long opaque) { struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; + ppd->cpspec->chase_timer.expires = 0; qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, QLOGIC_IB_IBCC_LINKINITCMD_POLL); @@ -1101,7 +1103,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs) /* do these first, they are most important */ if (errs & ERR_MASK(HardwareErr)) - qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); + qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf)); else for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) if (errs & dd->eep_st_masks[log_idx].errs_to_log) @@ -1155,7 +1157,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs) ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr); - qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask); + qib_decode_7220_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask); if (errs & E_SUM_PKTERRS) qib_stats.sps_rcverrs++; @@ -1380,7 +1382,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg, bits = (u32) ((hwerrs >> QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) & QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK); - snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, + snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf), "[PCIe Mem Parity Errs %x] ", bits); strlcat(msg, bitsmsg, msgl); } @@ -1390,7 +1392,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg, if (hwerrs & _QIB_PLL_FAIL) { isfatal = 1; - snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, + snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf), "[PLL failed (%llx), InfiniPath hardware unusable]", (unsigned long long) hwerrs & _QIB_PLL_FAIL); strlcat(msg, bitsmsg, msgl); @@ -3297,8 +3299,6 @@ static void qib_get_7220_faststats(unsigned long opaque) spin_lock_irqsave(&dd->eep_st_lock, flags); traffic_wds -= dd->traffic_wds; dd->traffic_wds += traffic_wds; - if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) - atomic_add(5, &dd->active_time); /* S/B #define */ spin_unlock_irqrestore(&dd->eep_st_lock, flags); done: mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index a7eb32517a04..ef97b71c8f7d 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -117,7 +117,7 @@ MODULE_PARM_DESC(chase, "Enable state chase handling"); static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */ module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO); -MODULE_PARM_DESC(long_attenuation, \ +MODULE_PARM_DESC(long_attenuation, "attenuation cutoff (dB) for long copper cable setup"); static ushort qib_singleport; @@ -153,11 +153,12 @@ static struct kparam_string kp_txselect = { static int setup_txselect(const char *, struct kernel_param *); module_param_call(txselect, setup_txselect, param_get_string, &kp_txselect, S_IWUSR | S_IRUGO); -MODULE_PARM_DESC(txselect, \ +MODULE_PARM_DESC(txselect, "Tx serdes indices (for no QSFP or invalid QSFP data)"); #define BOARD_QME7342 5 #define BOARD_QMH7342 6 +#define BOARD_QMH7360 9 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ BOARD_QMH7342) #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ @@ -817,6 +818,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd, enum qib_ureg regno, u64 value, int ctxt) { u64 __iomem *ubase; + if (dd->userbase) ubase = (u64 __iomem *) ((char __iomem *) dd->userbase + @@ -1677,7 +1679,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd) /* do these first, they are most important */ if (errs & QIB_E_HARDWARE) { *msg = '\0'; - qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); + qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf)); } else for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) if (errs & dd->eep_st_masks[log_idx].errs_to_log) @@ -1702,7 +1704,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd) mask = QIB_E_HARDWARE; *msg = '\0'; - err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask, + err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask, qib_7322error_msgs); /* @@ -1889,10 +1891,10 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) *msg = '\0'; if (errs & ~QIB_E_P_BITSEXTANT) { - err_decode(msg, sizeof ppd->cpspec->epmsgbuf, + err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs); if (!*msg) - snprintf(msg, sizeof ppd->cpspec->epmsgbuf, + snprintf(msg, sizeof(ppd->cpspec->epmsgbuf), "no others"); qib_dev_porterr(dd, ppd->port, "error interrupt with unknown errors 0x%016Lx set (and %s)\n", @@ -1906,7 +1908,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) /* determine cause, then write to clear */ symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom); qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0); - err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom, + err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom, hdrchk_msgs); *msg = '\0'; /* senderrbuf cleared in SPKTERRS below */ @@ -1922,7 +1924,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) * isn't valid. We don't want to confuse people, so * we just don't print them, except at debug */ - err_decode(msg, sizeof ppd->cpspec->epmsgbuf, + err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), (errs & QIB_E_P_LINK_PKTERRS), qib_7322p_error_msgs); *msg = '\0'; @@ -1938,7 +1940,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) * valid. We don't want to confuse people, so we just * don't print them, except at debug */ - err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs, + err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs, qib_7322p_error_msgs); ignore_this_time = errs & QIB_E_P_LINK_PKTERRS; *msg = '\0'; @@ -2031,6 +2033,7 @@ static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable) if (dd->cspec->num_msix_entries) { /* and same for MSIx */ u64 val = qib_read_kreg64(dd, kr_intgranted); + if (val) qib_write_kreg(dd, kr_intgranted, val); } @@ -2176,6 +2179,7 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, int err; unsigned long flags; struct qib_pportdata *ppd = dd->pport; + for (; pidx < dd->num_pports; ++pidx, ppd++) { err = 0; if (pidx == 0 && (hwerrs & @@ -2801,9 +2805,11 @@ static void qib_irq_notifier_notify(struct irq_affinity_notify *notify, if (n->rcv) { struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; + qib_update_rhdrq_dca(rcd, cpu); } else { struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; + qib_update_sdma_dca(ppd, cpu); } } @@ -2816,9 +2822,11 @@ static void qib_irq_notifier_release(struct kref *ref) if (n->rcv) { struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; + dd = rcd->dd; } else { struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; + dd = ppd->dd; } qib_devinfo(dd->pcidev, @@ -2994,6 +3002,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd) struct qib_pportdata *ppd; struct qib_qsfp_data *qd; u32 mask; + if (!dd->pport[pidx].link_speed_supported) continue; mask = QSFP_GPIO_MOD_PRS_N; @@ -3001,6 +3010,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd) mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx); if (gpiostatus & dd->cspec->gpio_mask & mask) { u64 pins; + qd = &ppd->cpspec->qsfp_data; gpiostatus &= ~mask; pins = qib_read_kreg64(dd, kr_extstatus); @@ -3442,7 +3452,7 @@ try_intx: } /* Try to get MSIx interrupts */ - memset(redirect, 0, sizeof redirect); + memset(redirect, 0, sizeof(redirect)); mask = ~0ULL; msixnum = 0; local_mask = cpumask_of_pcibus(dd->pcidev->bus); @@ -3617,6 +3627,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd) n = "InfiniPath_QME7362"; dd->flags |= QIB_HAS_QSFP; break; + case BOARD_QMH7360: + n = "Intel IB QDR 1P FLR-QSFP Adptr"; + dd->flags |= QIB_HAS_QSFP; + break; case 15: n = "InfiniPath_QLE7342_TEST"; dd->flags |= QIB_HAS_QSFP; @@ -3694,6 +3708,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd) */ for (i = 0; i < msix_entries; i++) { u64 vecaddr, vecdata; + vecaddr = qib_read_kreg64(dd, 2 * i + (QIB_7322_MsixTable_OFFS / sizeof(u64))); vecdata = qib_read_kreg64(dd, 1 + 2 * i + @@ -5178,8 +5193,6 @@ static void qib_get_7322_faststats(unsigned long opaque) spin_lock_irqsave(&ppd->dd->eep_st_lock, flags); traffic_wds -= ppd->dd->traffic_wds; ppd->dd->traffic_wds += traffic_wds; - if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) - atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time); spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags); if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active & QIB_IB_QDR) && @@ -5357,6 +5370,7 @@ static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which) static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed) { u64 newctrlb; + newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK | IBA7322_IBC_MAX_SPEED_MASK); @@ -5843,6 +5857,7 @@ static void get_7322_chip_params(struct qib_devdata *dd) static void qib_7322_set_baseaddrs(struct qib_devdata *dd) { u32 cregbase; + cregbase = qib_read_kreg32(dd, kr_counterregbase); dd->cspec->cregbase = (u64 __iomem *)(cregbase + @@ -6183,6 +6198,7 @@ static int setup_txselect(const char *str, struct kernel_param *kp) struct qib_devdata *dd; unsigned long val; char *n; + if (strlen(str) >= MAX_ATTEN_LEN) { pr_info("txselect_values string too long\n"); return -ENOSPC; @@ -6393,6 +6409,7 @@ static void write_7322_initregs(struct qib_devdata *dd) val = TIDFLOW_ERRBITS; /* these are W1C */ for (i = 0; i < dd->cfgctxts; i++) { int flow; + for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++) qib_write_ureg(dd, ur_rcvflowtable+flow, val, i); } @@ -6503,6 +6520,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd) for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) { struct qib_chippport_specific *cp = ppd->cpspec; + ppd->link_speed_supported = features & PORT_SPD_CAP; features >>= PORT_SPD_CAP_SHIFT; if (!ppd->link_speed_supported) { @@ -6581,8 +6599,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd) ppd->vls_supported = IB_VL_VL0_7; else { qib_devinfo(dd->pcidev, - "Invalid num_vls %u for MTU %d " - ", using 4 VLs\n", + "Invalid num_vls %u for MTU %d , using 4 VLs\n", qib_num_cfg_vls, mtu); ppd->vls_supported = IB_VL_VL0_3; qib_num_cfg_vls = 4; @@ -7890,6 +7907,7 @@ static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable) static int serdes_7322_init(struct qib_pportdata *ppd) { int ret = 0; + if (ppd->dd->cspec->r1) ret = serdes_7322_init_old(ppd); else @@ -8305,8 +8323,8 @@ static void force_h1(struct qib_pportdata *ppd) static int qib_r_grab(struct qib_devdata *dd) { - u64 val; - val = SJA_EN; + u64 val = SJA_EN; + qib_write_kreg(dd, kr_r_access, val); qib_read_kreg32(dd, kr_scratch); return 0; @@ -8319,6 +8337,7 @@ static int qib_r_wait_for_rdy(struct qib_devdata *dd) { u64 val; int timeout; + for (timeout = 0; timeout < 100 ; ++timeout) { val = qib_read_kreg32(dd, kr_r_access); if (val & R_RDY) @@ -8346,6 +8365,7 @@ static int qib_r_shift(struct qib_devdata *dd, int bisten, } if (inp) { int tdi = inp[pos >> 3] >> (pos & 7); + val |= ((tdi & 1) << R_TDI_LSB); } qib_write_kreg(dd, kr_r_access, val); diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 729da39c49ed..2ee36953e234 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -140,7 +140,7 @@ int qib_create_ctxts(struct qib_devdata *dd) * Allocate full ctxtcnt array, rather than just cfgctxts, because * cleanup iterates across all possible ctxts. */ - dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL); + dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL); if (!dd->rcd) { qib_dev_err(dd, "Unable to allocate ctxtdata array, failing\n"); @@ -234,6 +234,7 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, u8 hw_pidx, u8 port) { int size; + ppd->dd = dd; ppd->hw_pidx = hw_pidx; ppd->port = port; /* IB port number, not index */ @@ -613,6 +614,7 @@ static int qib_create_workqueues(struct qib_devdata *dd) ppd = dd->pport + pidx; if (!ppd->qib_wq) { char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */ + snprintf(wq_name, sizeof(wq_name), "qib%d_%d", dd->unit, pidx); ppd->qib_wq = @@ -714,6 +716,7 @@ int qib_init(struct qib_devdata *dd, int reinit) for (pidx = 0; pidx < dd->num_pports; ++pidx) { int mtu; + if (lastfail) ret = lastfail; ppd = dd->pport + pidx; @@ -931,7 +934,6 @@ static void qib_shutdown_device(struct qib_devdata *dd) qib_free_pportdata(ppd); } - qib_update_eeprom_log(dd); } /** @@ -1026,8 +1028,7 @@ static void qib_verify_pioperf(struct qib_devdata *dd) addr = vmalloc(cnt); if (!addr) { qib_devinfo(dd->pcidev, - "Couldn't get memory for checking PIO perf," - " skipping\n"); + "Couldn't get memory for checking PIO perf, skipping\n"); goto done; } @@ -1163,6 +1164,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) if (!qib_cpulist_count) { u32 count = num_online_cpus(); + qib_cpulist = kzalloc(BITS_TO_LONGS(count) * sizeof(long), GFP_KERNEL); if (qib_cpulist) @@ -1179,7 +1181,7 @@ bail: if (!list_empty(&dd->list)) list_del_init(&dd->list); ib_dealloc_device(&dd->verbs_dev.ibdev); - return ERR_PTR(ret);; + return ERR_PTR(ret); } /* diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c index f4918f2165ec..086616d071b9 100644 --- a/drivers/infiniband/hw/qib/qib_intr.c +++ b/drivers/infiniband/hw/qib/qib_intr.c @@ -168,7 +168,6 @@ skip_ibchange: ppd->lastibcstat = ibcs; if (ev) signal_ib_event(ppd, ev); - return; } void qib_clear_symerror_on_linkup(unsigned long opaque) diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c index 3b9afccaaade..ad843c786e72 100644 --- a/drivers/infiniband/hw/qib/qib_keys.c +++ b/drivers/infiniband/hw/qib/qib_keys.c @@ -122,10 +122,10 @@ void qib_free_lkey(struct qib_mregion *mr) if (!mr->lkey_published) goto out; if (lkey == 0) - rcu_assign_pointer(dev->dma_mr, NULL); + RCU_INIT_POINTER(dev->dma_mr, NULL); else { r = lkey >> (32 - ib_qib_lkey_table_size); - rcu_assign_pointer(rkt->table[r], NULL); + RCU_INIT_POINTER(rkt->table[r], NULL); } qib_put_mr(mr); mr->lkey_published = 0; diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 636be117b578..395f4046dba2 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -152,14 +152,14 @@ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, data.trap_num = trap_num; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; - memset(&data.details, 0, sizeof data.details); + memset(&data.details, 0, sizeof(data.details)); data.details.ntc_257_258.lid1 = lid1; data.details.ntc_257_258.lid2 = lid2; data.details.ntc_257_258.key = cpu_to_be32(key); data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1); data.details.ntc_257_258.qp2 = cpu_to_be32(qp2); - qib_send_trap(ibp, &data, sizeof data); + qib_send_trap(ibp, &data, sizeof(data)); } /* @@ -176,7 +176,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp) data.trap_num = IB_NOTICE_TRAP_BAD_MKEY; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; - memset(&data.details, 0, sizeof data.details); + memset(&data.details, 0, sizeof(data.details)); data.details.ntc_256.lid = data.issuer_lid; data.details.ntc_256.method = smp->method; data.details.ntc_256.attr_id = smp->attr_id; @@ -198,7 +198,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp) hop_cnt); } - qib_send_trap(ibp, &data, sizeof data); + qib_send_trap(ibp, &data, sizeof(data)); } /* @@ -214,11 +214,11 @@ void qib_cap_mask_chg(struct qib_ibport *ibp) data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; - memset(&data.details, 0, sizeof data.details); + memset(&data.details, 0, sizeof(data.details)); data.details.ntc_144.lid = data.issuer_lid; data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags); - qib_send_trap(ibp, &data, sizeof data); + qib_send_trap(ibp, &data, sizeof(data)); } /* @@ -234,11 +234,11 @@ void qib_sys_guid_chg(struct qib_ibport *ibp) data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; - memset(&data.details, 0, sizeof data.details); + memset(&data.details, 0, sizeof(data.details)); data.details.ntc_145.lid = data.issuer_lid; data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid; - qib_send_trap(ibp, &data, sizeof data); + qib_send_trap(ibp, &data, sizeof(data)); } /* @@ -254,12 +254,12 @@ void qib_node_desc_chg(struct qib_ibport *ibp) data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; - memset(&data.details, 0, sizeof data.details); + memset(&data.details, 0, sizeof(data.details)); data.details.ntc_144.lid = data.issuer_lid; data.details.ntc_144.local_changes = 1; data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG; - qib_send_trap(ibp, &data, sizeof data); + qib_send_trap(ibp, &data, sizeof(data)); } static int subn_get_nodedescription(struct ib_smp *smp, diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c index 8b73a11d571c..146cf29a2e1d 100644 --- a/drivers/infiniband/hw/qib/qib_mmap.c +++ b/drivers/infiniband/hw/qib/qib_mmap.c @@ -134,7 +134,7 @@ struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, void *obj) { struct qib_mmap_info *ip; - ip = kmalloc(sizeof *ip, GFP_KERNEL); + ip = kmalloc(sizeof(*ip), GFP_KERNEL); if (!ip) goto bail; diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c index a77fb4fb14e4..c4473db46699 100644 --- a/drivers/infiniband/hw/qib/qib_mr.c +++ b/drivers/infiniband/hw/qib/qib_mr.c @@ -55,7 +55,7 @@ static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd, m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; for (; i < m; i++) { - mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL); + mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL); if (!mr->map[i]) goto bail; } @@ -104,7 +104,7 @@ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc) goto bail; } - mr = kzalloc(sizeof *mr, GFP_KERNEL); + mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) { ret = ERR_PTR(-ENOMEM); goto bail; @@ -143,7 +143,7 @@ static struct qib_mr *alloc_mr(int count, struct ib_pd *pd) /* Allocate struct plus pointers to first level page tables. */ m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; - mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); + mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL); if (!mr) goto bail; @@ -347,7 +347,7 @@ qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len) if (size > PAGE_SIZE) return ERR_PTR(-EINVAL); - pl = kzalloc(sizeof *pl, GFP_KERNEL); + pl = kzalloc(sizeof(*pl), GFP_KERNEL); if (!pl) return ERR_PTR(-ENOMEM); @@ -386,7 +386,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, /* Allocate struct plus pointers to first level page tables. */ m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ; - fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); + fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL); if (!fmr) goto bail; diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 61a0046efb76..4758a3801ae8 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c @@ -210,7 +210,7 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, /* We can't pass qib_msix_entry array to qib_msix_setup * so use a dummy msix_entry array and copy the allocated * irq back to the qib_msix_entry array. */ - msix_entry = kmalloc(nvec * sizeof(*msix_entry), GFP_KERNEL); + msix_entry = kcalloc(nvec, sizeof(*msix_entry), GFP_KERNEL); if (!msix_entry) goto do_intx; @@ -234,8 +234,10 @@ free_msix_entry: kfree(msix_entry); do_intx: - qib_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, " - "falling back to INTx\n", nvec, ret); + qib_dev_err( + dd, + "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n", + nvec, ret); *msixcnt = 0; qib_enable_intx(dd->pcidev); } @@ -459,6 +461,7 @@ void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline) void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline) { int r; + r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, dd->pcibar0); if (r) @@ -696,6 +699,7 @@ static void qib_pci_resume(struct pci_dev *pdev) { struct qib_devdata *dd = pci_get_drvdata(pdev); + qib_devinfo(pdev, "QIB resume function called\n"); pci_cleanup_aer_uncorrect_error_status(pdev); /* diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 6ddc0264aad2..4fa88ba2963e 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -255,10 +255,10 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) if (rcu_dereference_protected(ibp->qp0, lockdep_is_held(&dev->qpt_lock)) == qp) { - rcu_assign_pointer(ibp->qp0, NULL); + RCU_INIT_POINTER(ibp->qp0, NULL); } else if (rcu_dereference_protected(ibp->qp1, lockdep_is_held(&dev->qpt_lock)) == qp) { - rcu_assign_pointer(ibp->qp1, NULL); + RCU_INIT_POINTER(ibp->qp1, NULL); } else { struct qib_qp *q; struct qib_qp __rcu **qpp; @@ -269,7 +269,7 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) lockdep_is_held(&dev->qpt_lock))) != NULL; qpp = &q->next) if (q == qp) { - rcu_assign_pointer(*qpp, + RCU_INIT_POINTER(*qpp, rcu_dereference_protected(qp->next, lockdep_is_held(&dev->qpt_lock))); removed = 1; @@ -315,7 +315,7 @@ unsigned qib_free_all_qps(struct qib_devdata *dd) for (n = 0; n < dev->qp_table_size; n++) { qp = rcu_dereference_protected(dev->qp_table[n], lockdep_is_held(&dev->qpt_lock)); - rcu_assign_pointer(dev->qp_table[n], NULL); + RCU_INIT_POINTER(dev->qp_table[n], NULL); for (; qp; qp = rcu_dereference_protected(qp->next, lockdep_is_held(&dev->qpt_lock))) diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c index fa71b1e666c5..5e27f76805e2 100644 --- a/drivers/infiniband/hw/qib/qib_qsfp.c +++ b/drivers/infiniband/hw/qib/qib_qsfp.c @@ -81,7 +81,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len) * Module could take up to 2 Msec to respond to MOD_SEL, and there * is no way to tell if it is ready, so we must wait. */ - msleep(2); + msleep(20); /* Make sure TWSI bus is in sane state. */ ret = qib_twsi_reset(dd); @@ -99,6 +99,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len) while (cnt < len) { unsigned in_page; int wlen = len - cnt; + in_page = addr % QSFP_PAGESIZE; if ((in_page + wlen) > QSFP_PAGESIZE) wlen = QSFP_PAGESIZE - in_page; @@ -139,7 +140,7 @@ deselect: else if (pass) qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass); - msleep(2); + msleep(20); bail: mutex_unlock(&dd->eep_lock); @@ -189,7 +190,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp, * Module could take up to 2 Msec to respond to MOD_SEL, * and there is no way to tell if it is ready, so we must wait. */ - msleep(2); + msleep(20); /* Make sure TWSI bus is in sane state. */ ret = qib_twsi_reset(dd); @@ -206,6 +207,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp, while (cnt < len) { unsigned in_page; int wlen = len - cnt; + in_page = addr % QSFP_PAGESIZE; if ((in_page + wlen) > QSFP_PAGESIZE) wlen = QSFP_PAGESIZE - in_page; @@ -234,7 +236,7 @@ deselect: * going away, and there is no way to tell if it is ready. * so we must wait. */ - msleep(2); + msleep(20); bail: mutex_unlock(&dd->eep_lock); @@ -296,6 +298,7 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp) * set the page to zero, Even if it already appears to be zero. */ u8 poke = 0; + ret = qib_qsfp_write(ppd, 127, &poke, 1); udelay(50); if (ret != 1) { @@ -480,7 +483,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, udelay(20); /* Generous RST dwell */ dd->f_gpio_mod(dd, mask, mask, mask); - return; } void qib_qsfp_deinit(struct qib_qsfp_data *qd) @@ -540,6 +542,7 @@ int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len) while (bidx < QSFP_DEFAULT_HDR_CNT) { int iidx; + ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK); if (ret < 0) goto bail; diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 2f2501890c4e..4544d6f88ad7 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -1017,7 +1017,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) /* Post a send completion queue entry if requested. */ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED)) { - memset(&wc, 0, sizeof wc); + memset(&wc, 0, sizeof(wc)); wc.wr_id = wqe->wr.wr_id; wc.status = IB_WC_SUCCESS; wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; @@ -1073,7 +1073,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp, /* Post a send completion queue entry if requested. */ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED)) { - memset(&wc, 0, sizeof wc); + memset(&wc, 0, sizeof(wc)); wc.wr_id = wqe->wr.wr_id; wc.status = IB_WC_SUCCESS; wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index 4c07a8b34ffe..f42bd0f47577 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c @@ -247,8 +247,8 @@ static __be64 get_sguid(struct qib_ibport *ibp, unsigned index) struct qib_pportdata *ppd = ppd_from_ibp(ibp); return ppd->guid; - } else - return ibp->guids[index - 1]; + } + return ibp->guids[index - 1]; } static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) @@ -420,7 +420,7 @@ again: goto serr; } - memset(&wc, 0, sizeof wc); + memset(&wc, 0, sizeof(wc)); send_status = IB_WC_SUCCESS; release = 1; @@ -792,7 +792,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, status != IB_WC_SUCCESS) { struct ib_wc wc; - memset(&wc, 0, sizeof wc); + memset(&wc, 0, sizeof(wc)); wc.wr_id = wqe->wr.wr_id; wc.status = status; wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c index 911205d3d5a0..c72775f27212 100644 --- a/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c @@ -259,6 +259,7 @@ static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst) * it again during startup. */ u64 val; + rst_val &= ~(1ULL); qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask & @@ -590,6 +591,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim) * Both should be clear */ u64 newval = 0; + qib_write_kreg(dd, acc, newval); /* First read after write is not trustworthy */ pollval = qib_read_kreg32(dd, acc); @@ -601,6 +603,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim) /* Need to claim */ u64 pollval; u64 newval = EPB_ACC_REQ | oct_sel; + qib_write_kreg(dd, acc, newval); /* First read after write is not trustworthy */ pollval = qib_read_kreg32(dd, acc); @@ -812,6 +815,7 @@ static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc, if (!sofar) { /* Only set address at start of chunk */ int addrbyte = (addr + sofar) >> 8; + transval = csbit | EPB_MADDRH | addrbyte; tries = epb_trans(dd, trans, transval, &transval); @@ -922,7 +926,7 @@ qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw) * IRQ not set up at this point in init, so we poll. */ #define IB_SERDES_TRIM_DONE (1ULL << 11) -#define TRIM_TMO (30) +#define TRIM_TMO (15) static int qib_sd_trimdone_poll(struct qib_devdata *dd) { @@ -940,7 +944,7 @@ static int qib_sd_trimdone_poll(struct qib_devdata *dd) ret = 1; break; } - msleep(10); + msleep(20); } if (trim_tmo >= TRIM_TMO) { qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo); @@ -1071,6 +1075,7 @@ static int qib_sd_setvals(struct qib_devdata *dd) dds_reg_map >>= 4; for (midx = 0; midx < DDS_ROWS; ++midx) { u64 __iomem *daddr = taddr + ((midx << 4) + idx); + data = dds_init_vals[midx].reg_vals[idx]; writeq(data, daddr); mmiowb(); diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index 3c8e4e3caca6..81f56cdff2bc 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -586,8 +586,8 @@ static ssize_t show_serial(struct device *device, container_of(device, struct qib_ibdev, ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); - buf[sizeof dd->serial] = '\0'; - memcpy(buf, dd->serial, sizeof dd->serial); + buf[sizeof(dd->serial)] = '\0'; + memcpy(buf, dd->serial, sizeof(dd->serial)); strcat(buf, "\n"); return strlen(buf); } @@ -611,28 +611,6 @@ bail: return ret < 0 ? ret : count; } -static ssize_t show_logged_errs(struct device *device, - struct device_attribute *attr, char *buf) -{ - struct qib_ibdev *dev = - container_of(device, struct qib_ibdev, ibdev.dev); - struct qib_devdata *dd = dd_from_dev(dev); - int idx, count; - - /* force consistency with actual EEPROM */ - if (qib_update_eeprom_log(dd) != 0) - return -ENXIO; - - count = 0; - for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { - count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c", - dd->eep_st_errs[idx], - idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' '); - } - - return count; -} - /* * Dump tempsense regs. in decimal, to ease shell-scripts. */ @@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL); static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL); static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); -static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL); static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL); static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); @@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = { &dev_attr_nfreectxts, &dev_attr_serial, &dev_attr_boardversion, - &dev_attr_logged_errors, &dev_attr_tempsense, &dev_attr_localbus_info, &dev_attr_chip_reset, diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c index 647f7beb1b0a..f5698664419b 100644 --- a/drivers/infiniband/hw/qib/qib_twsi.c +++ b/drivers/infiniband/hw/qib/qib_twsi.c @@ -105,6 +105,7 @@ static void scl_out(struct qib_devdata *dd, u8 bit) udelay(2); else { int rise_usec; + for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) { if (mask & dd->f_gpio_mod(dd, 0, 0, 0)) break; @@ -326,6 +327,7 @@ int qib_twsi_reset(struct qib_devdata *dd) static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags) { int ret = 1; + if (flags & QIB_TWSI_START) start_seq(dd); @@ -435,8 +437,7 @@ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr, int sub_len; const u8 *bp = buffer; int max_wait_time, i; - int ret; - ret = 1; + int ret = 1; while (len > 0) { if (dev == QIB_TWSI_NO_DEV) { diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c index 31d3561400a4..eface3b3dacf 100644 --- a/drivers/infiniband/hw/qib/qib_tx.c +++ b/drivers/infiniband/hw/qib/qib_tx.c @@ -180,6 +180,7 @@ void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask, for (i = 0; i < cnt; i++) { int which; + if (!test_bit(i, mask)) continue; /* diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index aaf7039f8ed2..26243b722b5e 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c @@ -127,7 +127,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) * present on the wire. */ length = swqe->length; - memset(&wc, 0, sizeof wc); + memset(&wc, 0, sizeof(wc)); wc.byte_len = length + sizeof(struct ib_grh); if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c index d2806cae234c..3e0677c51276 100644 --- a/drivers/infiniband/hw/qib/qib_user_sdma.c +++ b/drivers/infiniband/hw/qib/qib_user_sdma.c @@ -50,7 +50,7 @@ /* expected size of headers (for dma_pool) */ #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64 /* attempt to drain the queue for 5secs */ -#define QIB_USER_SDMA_DRAIN_TIMEOUT 500 +#define QIB_USER_SDMA_DRAIN_TIMEOUT 250 /* * track how many times a process open this driver. @@ -226,6 +226,7 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt) sdma_rb_node->refcount++; } else { int ret; + sdma_rb_node = kmalloc(sizeof( struct qib_user_sdma_rb_node), GFP_KERNEL); if (!sdma_rb_node) @@ -936,6 +937,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd, if (tiddma) { char *tidsm = (char *)pkt + pktsize; + cfur = copy_from_user(tidsm, iov[idx].iov_base, tidsmsize); if (cfur) { @@ -1142,7 +1144,7 @@ void qib_user_sdma_queue_drain(struct qib_pportdata *ppd, qib_user_sdma_hwqueue_clean(ppd); qib_user_sdma_queue_clean(ppd, pq); mutex_unlock(&pq->lock); - msleep(10); + msleep(20); } if (pq->num_pending || pq->num_sending) { @@ -1316,8 +1318,6 @@ retry: if (nfree && !list_empty(pktlist)) goto retry; - - return; } /* pq->lock must be held, get packets on the wire... */ diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 9bcfbd842980..4a3599890ea5 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -1342,6 +1342,7 @@ static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr, done: if (dd->flags & QIB_USE_SPCL_TRIG) { u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; + qib_flush_wc(); __raw_writel(0xaebecede, piobuf_orig + spcl_off); } @@ -1744,7 +1745,7 @@ static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev, * we allow allocations of more than we report for this value. */ - pd = kmalloc(sizeof *pd, GFP_KERNEL); + pd = kmalloc(sizeof(*pd), GFP_KERNEL); if (!pd) { ret = ERR_PTR(-ENOMEM); goto bail; @@ -1829,7 +1830,7 @@ static struct ib_ah *qib_create_ah(struct ib_pd *pd, goto bail; } - ah = kmalloc(sizeof *ah, GFP_ATOMIC); + ah = kmalloc(sizeof(*ah), GFP_ATOMIC); if (!ah) { ret = ERR_PTR(-ENOMEM); goto bail; @@ -1862,7 +1863,7 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid) struct ib_ah *ah = ERR_PTR(-EINVAL); struct qib_qp *qp0; - memset(&attr, 0, sizeof attr); + memset(&attr, 0, sizeof(attr)); attr.dlid = dlid; attr.port_num = ppd_from_ibp(ibp)->port; rcu_read_lock(); @@ -1977,7 +1978,7 @@ static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev, struct qib_ucontext *context; struct ib_ucontext *ret; - context = kmalloc(sizeof *context, GFP_KERNEL); + context = kmalloc(sizeof(*context), GFP_KERNEL); if (!context) { ret = ERR_PTR(-ENOMEM); goto bail; @@ -2054,7 +2055,9 @@ int qib_register_ib_device(struct qib_devdata *dd) dev->qp_table_size = ib_qib_qp_table_size; get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd)); - dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table, + dev->qp_table = kmalloc_array( + dev->qp_table_size, + sizeof(*dev->qp_table), GFP_KERNEL); if (!dev->qp_table) { ret = -ENOMEM; @@ -2122,7 +2125,7 @@ int qib_register_ib_device(struct qib_devdata *dd) for (i = 0; i < ppd->sdma_descq_cnt; i++) { struct qib_verbs_txreq *tx; - tx = kzalloc(sizeof *tx, GFP_KERNEL); + tx = kzalloc(sizeof(*tx), GFP_KERNEL); if (!tx) { ret = -ENOMEM; goto err_tx; diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c index dabb697b1c2a..f8ea069a3eaf 100644 --- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c +++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c @@ -43,7 +43,7 @@ static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp) { struct qib_mcast_qp *mqp; - mqp = kmalloc(sizeof *mqp, GFP_KERNEL); + mqp = kmalloc(sizeof(*mqp), GFP_KERNEL); if (!mqp) goto bail; @@ -75,7 +75,7 @@ static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid) { struct qib_mcast *mcast; - mcast = kmalloc(sizeof *mcast, GFP_KERNEL); + mcast = kmalloc(sizeof(*mcast), GFP_KERNEL); if (!mcast) goto bail; diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c index 1d7281c5a02e..81b225f2300a 100644 --- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c +++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c @@ -72,6 +72,7 @@ int qib_enable_wc(struct qib_devdata *dd) if (dd->piobcnt2k && dd->piobcnt4k) { /* 2 sizes for chip */ unsigned long pio2kbase, pio4kbase; + pio2kbase = dd->piobufbase & 0xffffffffUL; pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL; if (pio2kbase < pio4kbase) { @@ -91,7 +92,7 @@ int qib_enable_wc(struct qib_devdata *dd) } for (bits = 0; !(piolen & (1ULL << bits)); bits++) - /* do nothing */ ; + ; /* do nothing */ if (piolen != (1ULL << bits)) { piolen >>= bits; @@ -100,8 +101,8 @@ int qib_enable_wc(struct qib_devdata *dd) piolen = 1ULL << (bits + 1); } if (pioaddr & (piolen - 1)) { - u64 atmp; - atmp = pioaddr & ~(piolen - 1); + u64 atmp = pioaddr & ~(piolen - 1); + if (atmp < addr || (atmp + piolen) > (addr + len)) { qib_dev_err(dd, "No way to align address/size (%llx/%llx), no WC mtrr\n", diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 5ce26817e7e1..b47aea1094b2 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -654,7 +654,9 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, enum dma_data_direction dma_dir); void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, - struct iser_data_buf *data); + struct iser_data_buf *data, + enum dma_data_direction dir); + int iser_initialize_task_headers(struct iscsi_task *task, struct iser_tx_desc *tx_desc); int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 3821633f1065..20e859a6f1a6 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -320,9 +320,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn) struct ib_conn *ib_conn = &iser_conn->ib_conn; struct iser_device *device = ib_conn->device; - if (!iser_conn->rx_descs) - goto free_login_buf; - if (device->iser_free_rdma_reg_res) device->iser_free_rdma_reg_res(ib_conn); @@ -334,7 +331,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn) /* make sure we never redo any unmapping */ iser_conn->rx_descs = NULL; -free_login_buf: iser_free_login_buf(iser_conn); } @@ -714,19 +710,23 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); if (is_rdma_data_aligned) iser_dma_unmap_task_data(iser_task, - &iser_task->data[ISER_DIR_IN]); + &iser_task->data[ISER_DIR_IN], + DMA_FROM_DEVICE); if (prot_count && is_rdma_prot_aligned) iser_dma_unmap_task_data(iser_task, - &iser_task->prot[ISER_DIR_IN]); + &iser_task->prot[ISER_DIR_IN], + DMA_FROM_DEVICE); } if (iser_task->dir[ISER_DIR_OUT]) { device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); if (is_rdma_data_aligned) iser_dma_unmap_task_data(iser_task, - &iser_task->data[ISER_DIR_OUT]); + &iser_task->data[ISER_DIR_OUT], + DMA_TO_DEVICE); if (prot_count && is_rdma_prot_aligned) iser_dma_unmap_task_data(iser_task, - &iser_task->prot[ISER_DIR_OUT]); + &iser_task->prot[ISER_DIR_OUT], + DMA_TO_DEVICE); } } diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index abce9339333f..341040bf0984 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -332,12 +332,13 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, } void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, - struct iser_data_buf *data) + struct iser_data_buf *data, + enum dma_data_direction dir) { struct ib_device *dev; dev = iser_task->iser_conn->ib_conn.device->ib_device; - ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); + ib_dma_unmap_sg(dev, data->buf, data->size, dir); } static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, @@ -357,7 +358,9 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, iser_data_buf_dump(mem, ibdev); /* unmap the command data before accessing it */ - iser_dma_unmap_task_data(iser_task, mem); + iser_dma_unmap_task_data(iser_task, mem, + (cmd_dir == ISER_DIR_OUT) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE); /* allocate copy buf, if we are writing, copy the */ /* unaligned scatterlist, dma map the copy */ diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 695a2704bd43..4065abe28829 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -600,16 +600,16 @@ void iser_release_work(struct work_struct *work) /** * iser_free_ib_conn_res - release IB related resources * @iser_conn: iser connection struct - * @destroy_device: indicator if we need to try to release - * the iser device (only iscsi shutdown and DEVICE_REMOVAL - * will use this. + * @destroy: indicator if we need to try to release the + * iser device and memory regoins pool (only iscsi + * shutdown and DEVICE_REMOVAL will use this). * * This routine is called with the iser state mutex held * so the cm_id removal is out of here. It is Safe to * be invoked multiple times. */ static void iser_free_ib_conn_res(struct iser_conn *iser_conn, - bool destroy_device) + bool destroy) { struct ib_conn *ib_conn = &iser_conn->ib_conn; struct iser_device *device = ib_conn->device; @@ -617,17 +617,20 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn, iser_info("freeing conn %p cma_id %p qp %p\n", iser_conn, ib_conn->cma_id, ib_conn->qp); - iser_free_rx_descriptors(iser_conn); - if (ib_conn->qp != NULL) { ib_conn->comp->active_qps--; rdma_destroy_qp(ib_conn->cma_id); ib_conn->qp = NULL; } - if (destroy_device && device != NULL) { - iser_device_try_release(device); - ib_conn->device = NULL; + if (destroy) { + if (iser_conn->rx_descs) + iser_free_rx_descriptors(iser_conn); + + if (device != NULL) { + iser_device_try_release(device); + ib_conn->device = NULL; + } } } @@ -643,9 +646,11 @@ void iser_conn_release(struct iser_conn *iser_conn) mutex_unlock(&ig.connlist_mutex); mutex_lock(&iser_conn->state_mutex); + /* In case we endup here without ep_disconnect being invoked. */ if (iser_conn->state != ISER_CONN_DOWN) { iser_warn("iser conn %p state %d, expected state down.\n", iser_conn, iser_conn->state); + iscsi_destroy_endpoint(iser_conn->ep); iser_conn->state = ISER_CONN_DOWN; } /* @@ -840,7 +845,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id) } static void iser_cleanup_handler(struct rdma_cm_id *cma_id, - bool destroy_device) + bool destroy) { struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; @@ -850,7 +855,7 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id, * and flush errors. */ iser_disconnected_handler(cma_id); - iser_free_ib_conn_res(iser_conn, destroy_device); + iser_free_ib_conn_res(iser_conn, destroy); complete(&iser_conn->ib_completion); }; diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index dafb3c531f96..075b19cc78e8 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -38,7 +38,7 @@ #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \ ISERT_MAX_CONN) -int isert_debug_level = 0; +static int isert_debug_level; module_param_named(debug_level, isert_debug_level, int, 0644); MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)"); @@ -949,7 +949,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count) isert_err("ib_post_recv() failed with ret: %d\n", ret); isert_conn->post_recv_buf_count -= count; } else { - isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count); + isert_dbg("Posted %d RX buffers\n", count); isert_conn->conn_rx_desc_head = rx_head; } return ret; @@ -1351,17 +1351,19 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd struct iscsi_conn *conn = isert_conn->conn; u32 payload_length = ntoh24(hdr->dlength); int rc; - unsigned char *text_in; + unsigned char *text_in = NULL; rc = iscsit_setup_text_cmd(conn, cmd, hdr); if (rc < 0) return rc; - text_in = kzalloc(payload_length, GFP_KERNEL); - if (!text_in) { - isert_err("Unable to allocate text_in of payload_length: %u\n", - payload_length); - return -ENOMEM; + if (payload_length) { + text_in = kzalloc(payload_length, GFP_KERNEL); + if (!text_in) { + isert_err("Unable to allocate text_in of payload_length: %u\n", + payload_length); + return -ENOMEM; + } } cmd->text_in_ptr = text_in; @@ -1434,9 +1436,15 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); break; case ISCSI_OP_TEXT: - cmd = isert_allocate_cmd(conn); - if (!cmd) - break; + if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) { + cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); + if (!cmd) + break; + } else { + cmd = isert_allocate_cmd(conn); + if (!cmd) + break; + } isert_cmd = iscsit_priv_cmd(cmd); ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, @@ -1658,6 +1666,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) struct isert_conn *isert_conn = isert_cmd->conn; struct iscsi_conn *conn = isert_conn->conn; struct isert_device *device = isert_conn->conn_device; + struct iscsi_text_rsp *hdr; isert_dbg("Cmd %p\n", isert_cmd); @@ -1698,6 +1707,11 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) case ISCSI_OP_REJECT: case ISCSI_OP_NOOP_OUT: case ISCSI_OP_TEXT: + hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; + /* If the continue bit is on, keep the command alive */ + if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE) + break; + spin_lock_bh(&conn->cmd_lock); if (!list_empty(&cmd->i_conn_node)) list_del_init(&cmd->i_conn_node); @@ -1709,8 +1723,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) * associated cmd->se_cmd needs to be released. */ if (cmd->se_cmd.se_tfo != NULL) { - isert_dbg("Calling transport_generic_free_cmd from" - " isert_put_cmd for 0x%02x\n", + isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n", cmd->iscsi_opcode); transport_generic_free_cmd(&cmd->se_cmd, 0); break; @@ -2275,7 +2288,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) } isert_init_send_wr(isert_conn, isert_cmd, send_wr); - isert_dbg("conn %p Text Reject\n", isert_conn); + isert_dbg("conn %p Text Response\n", isert_conn); return isert_post_response(isert_conn, isert_cmd); } @@ -3136,7 +3149,7 @@ accept_wait: spin_lock_bh(&np->np_thread_lock); if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { spin_unlock_bh(&np->np_thread_lock); - isert_dbg("np_thread_state %d for isert_accept_np\n", + isert_dbg("np_thread_state %d\n", np->np_thread_state); /** * No point in stalling here when np_thread @@ -3320,7 +3333,8 @@ static int __init isert_init(void) { int ret; - isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); + isert_comp_wq = alloc_workqueue("isert_comp_wq", + WQ_UNBOUND | WQ_HIGHPRI, 0); if (!isert_comp_wq) { isert_err("Unable to allocate isert_comp_wq\n"); ret = -ENOMEM; diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index eb694ddad79f..6e0a477681e9 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -3518,7 +3518,7 @@ static void srpt_close_session(struct se_session *se_sess) DECLARE_COMPLETION_ONSTACK(release_done); struct srpt_rdma_ch *ch; struct srpt_device *sdev; - int res; + unsigned long res; ch = se_sess->fabric_sess_ptr; WARN_ON(ch->sess != se_sess); @@ -3533,7 +3533,7 @@ static void srpt_close_session(struct se_session *se_sess) spin_unlock_irq(&sdev->spinlock); res = wait_for_completion_timeout(&release_done, 60 * HZ); - WARN_ON(res <= 0); + WARN_ON(res == 0); } /** |