diff options
author | Jason Gunthorpe <jgg@nvidia.com> | 2020-08-31 18:28:12 +0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2020-08-31 18:28:12 +0300 |
commit | 6989aa62d342d79d447a9af12477b907d211bebe (patch) | |
tree | 09722ffca5dea6933dec72691f89a8cb09f78656 /drivers/infiniband/hw | |
parent | 7672dac30435b6e28aebaea189ffcb233e61760d (diff) | |
parent | f75aef392f869018f78cfedf3c320a6b3fcfda6b (diff) | |
download | linux-6989aa62d342d79d447a9af12477b907d211bebe.tar.xz |
Merge tag 'v5.9-rc3' into rdma.git for-next
Required due to dependencies in following patches.
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw')
40 files changed, 84 insertions, 97 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 3f18efc0c297..5ee272d27aaa 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -2657,7 +2657,7 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr, default: break; } - /* fall through */ + fallthrough; case IB_WR_SEND_WITH_INV: rc = bnxt_re_build_send_wqe(qp, wr, &wqe); break; diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index dad0df8a2467..17ac8b7c5710 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -821,7 +821,8 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, struct ib_event event; unsigned int flags; - if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { + if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR && + rdma_is_kernel_res(&qp->ib_qp.res)) { flags = bnxt_re_lock_cqs(qp); bnxt_qplib_add_flush_qp(&qp->qplib_qp); bnxt_re_unlock_cqs(qp, flags); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 117b42349a28..d60e3dcea087 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -1779,7 +1779,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, break; } - /* fall thru */ + fallthrough; case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM: case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV: { diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 77bc02a9228e..1f288c73ccfc 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -2885,7 +2885,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) case MORIBUND: case CLOSING: stop_ep_timer(ep); - /*FALLTHROUGH*/ + fallthrough; case FPDU_MODE: if (ep->com.qp && ep->com.qp->srq) { srqidx = ABORT_RSS_SRQIDX_G( @@ -3759,7 +3759,7 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, send_fw_act_open_req(ep, atid); return; } - /* fall through */ + fallthrough; case FW_EADDRINUSE: set_bit(ACT_RETRY_INUSE, &ep->com.history); if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index ac48012c992f..cbddb20c6121 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -1165,7 +1165,7 @@ int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, break; } fw_flags |= FW_RI_RDMA_WRITE_WITH_IMMEDIATE; - /*FALLTHROUGH*/ + fallthrough; case IB_WR_RDMA_WRITE: fw_opcode = FW_RI_RDMA_WRITE_WR; swsqe->opcode = FW_RI_RDMA_WRITE; diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c index b12e4665c9ab..4a4ec2397857 100644 --- a/drivers/infiniband/hw/hfi1/pio_copy.c +++ b/drivers/infiniband/hw/hfi1/pio_copy.c @@ -209,7 +209,6 @@ static inline void jcopy(u8 *dest, const u8 *src, u32 n) fallthrough; case 1: *dest++ = *src++; - /* fall through */ } } diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 9af82ff933d7..73d197e21730 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -3215,6 +3215,7 @@ bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe) case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: switch (prev->wr.opcode) { case IB_WR_TID_RDMA_WRITE: req = wqe_to_tid_req(prev); diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 8d92d8d82573..4f1dd916d05f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -65,8 +65,6 @@ #define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2 #define HNS_ROCE_MIN_CQE_CNT 16 -#define HNS_ROCE_RESERVED_SGE 1 - #define HNS_ROCE_MAX_IRQ_NUM 128 #define HNS_ROCE_SGE_IN_WQE 2 diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 07b4c85d341d..aeb3a6fa7d47 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -535,7 +535,7 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept, roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val); dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep); - dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n", + dev_dbg(dev, "ext SDB threshold: empty: 0x%x, ful: 0x%x\n", ext_sdb_alept, ext_sdb_alful); } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 4da553d435e4..96e08b45c4a9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -633,7 +633,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); - if (unlikely(wr->num_sge >= hr_qp->rq.max_gs)) { + if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n", wr->num_sge, hr_qp->rq.max_gs); ret = -EINVAL; @@ -653,7 +653,6 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, if (wr->num_sge < hr_qp->rq.max_gs) { dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); dseg->addr = 0; - dseg->len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH); } /* rq support inline data */ @@ -787,8 +786,8 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, } if (wr->num_sge < srq->max_gs) { - dseg[i].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH); - dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); + dseg[i].len = 0; + dseg[i].lkey = cpu_to_le32(0x100); dseg[i].addr = 0; } @@ -5079,7 +5078,7 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) attr->srq_limit = limit_wl; attr->max_wr = srq->wqe_cnt - 1; - attr->max_sge = srq->max_gs - HNS_ROCE_RESERVED_SGE; + attr->max_sge = srq->max_gs; out: hns_roce_free_cmd_mailbox(hr_dev, mailbox); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 1fb1c583d0f8..ac29be43b6bd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -92,9 +92,7 @@ #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000 #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2 -#define HNS_ROCE_INVALID_LKEY 0x0 -#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000 - +#define HNS_ROCE_INVALID_LKEY 0x100 #define HNS_ROCE_CMQ_TX_TIMEOUT 30000 #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2 #define HNS_ROCE_V2_RSV_QPS 8 diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index bb87e5fc7e63..975281f03468 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -386,8 +386,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, return -EINVAL; } - hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) + - HNS_ROCE_RESERVED_SGE); + hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE) hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz); @@ -402,7 +401,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, hr_qp->rq_inl_buf.wqe_cnt = 0; cap->max_recv_wr = cnt; - cap->max_recv_sge = hr_qp->rq.max_gs - HNS_ROCE_RESERVED_SGE; + cap->max_recv_sge = hr_qp->rq.max_gs; return 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index f40a000e94ee..b9e2dbd372b6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -297,7 +297,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, spin_lock_init(&srq->lock); srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1); - srq->max_gs = init_attr->attr.max_sge + HNS_ROCE_RESERVED_SGE; + srq->max_gs = init_attr->attr.max_sge; if (udata) { ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index fa7a5ff498c7..a3b95805c154 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -2443,7 +2443,7 @@ static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node, case I40IW_CM_STATE_FIN_WAIT1: case I40IW_CM_STATE_LAST_ACK: cm_node->cm_id->rem_ref(cm_node->cm_id); - /* fall through */ + fallthrough; case I40IW_CM_STATE_TIME_WAIT: cm_node->state = I40IW_CM_STATE_CLOSED; i40iw_rem_ref_cm_node(cm_node); diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index 688f19667221..86d3f8aff329 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -1964,7 +1964,6 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq, info->out_rdrsp = true; break; case I40IW_AE_SOURCE_RSVD: - /* fallthrough */ default: break; } @@ -3762,14 +3761,14 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp, LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID))); set_64bit_val(wqe, 56, info->entry[2].data); - /* fallthrough */ + fallthrough; case 2: set_64bit_val(wqe, 32, (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) | LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID))); set_64bit_val(wqe, 40, info->entry[1].data); - /* fallthrough */ + fallthrough; case 1: set_64bit_val(wqe, 0, LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD)); diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c index ae8b97c30665..e1085634b8d9 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_hw.c +++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c @@ -353,7 +353,6 @@ void i40iw_process_aeq(struct i40iw_device *iwdev) i40iw_cm_disconn(iwqp); break; case I40IW_AE_BAD_CLOSE: - /* fall through */ case I40IW_AE_RESET_SENT: i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0); i40iw_cm_disconn(iwqp); @@ -413,7 +412,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev) case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG: case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT: ctx_info->err_rq_idx_valid = false; - /* fall through */ + fallthrough; default: if (!info->sq && ctx_info->err_rq_idx_valid) { ctx_info->err_rq_idx = info->wqe_idx; diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index 9c96ece5e7f3..58a433135a03 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -1489,36 +1489,35 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev) iwdev->iw_status = 0; i40iw_port_ibevent(iwdev); i40iw_destroy_rdma_device(iwdev->iwibdev); - /* fallthrough */ + fallthrough; case IP_ADDR_REGISTERED: if (!iwdev->reset) i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); - /* fallthrough */ - /* fallthrough */ + fallthrough; case PBLE_CHUNK_MEM: i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); - /* fallthrough */ + fallthrough; case CEQ_CREATED: i40iw_dele_ceqs(iwdev); - /* fallthrough */ + fallthrough; case AEQ_CREATED: i40iw_destroy_aeq(iwdev); - /* fallthrough */ + fallthrough; case IEQ_CREATED: i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset); - /* fallthrough */ + fallthrough; case ILQ_CREATED: i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset); - /* fallthrough */ + fallthrough; case CCQ_CREATED: i40iw_destroy_ccq(iwdev); - /* fallthrough */ + fallthrough; case HMC_OBJS_CREATED: i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset); - /* fallthrough */ + fallthrough; case CQP_CREATED: i40iw_destroy_cqp(iwdev, true); - /* fallthrough */ + fallthrough; case INITIAL_STATE: i40iw_cleanup_cm_core(&iwdev->cm_core); if (iwdev->vsi.pestat) { @@ -1528,7 +1527,6 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev) i40iw_del_init_mem(iwdev); break; case INVALID_STATE: - /* fallthrough */ default: i40iw_pr_err("bad init_state = %d\n", iwdev->init_state); break; diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c index d9c7ae6a7030..924be4b03c9a 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_puda.c +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c @@ -814,13 +814,13 @@ void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi, switch (rsrc->completion) { case PUDA_HASH_CRC_COMPLETE: i40iw_free_hash_desc(rsrc->hash_desc); - /* fall through */ + fallthrough; case PUDA_QP_CREATED: if (!reset) i40iw_puda_free_qp(rsrc); i40iw_free_dma_mem(dev->hw, &rsrc->qpmem); - /* fallthrough */ + fallthrough; case PUDA_CQ_CREATED: if (!reset) i40iw_puda_free_cq(rsrc); diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 016524683e17..e07fb37af086 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -190,9 +190,8 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, switch (event) { case NETDEV_DOWN: action = I40IW_ARP_DELETE; - /* Fall through */ + fallthrough; case NETDEV_UP: - /* Fall through */ case NETDEV_CHANGEADDR: /* Just skip if no need to handle ARP cache */ @@ -247,9 +246,8 @@ int i40iw_inet6addr_event(struct notifier_block *notifier, switch (event) { case NETDEV_DOWN: action = I40IW_ARP_DELETE; - /* Fall through */ + fallthrough; case NETDEV_UP: - /* Fall through */ case NETDEV_CHANGEADDR: i40iw_manage_arp_cache(iwdev, netdev->dev_addr, @@ -344,7 +342,7 @@ int i40iw_netdevice_event(struct notifier_block *notifier, switch (event) { case NETDEV_DOWN: iwdev->iw_status = 0; - /* Fall through */ + fallthrough; case NETDEV_UP: i40iw_port_ibevent(iwdev); break; diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 6957e4f3404b..b51339328a51 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -810,7 +810,7 @@ void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp, case I40IW_QP_STATE_RTS: if (iwqp->iwarp_state == I40IW_QP_STATE_IDLE) i40iw_send_reset(iwqp->cm_node); - /* fall through */ + fallthrough; case I40IW_QP_STATE_IDLE: case I40IW_QP_STATE_TERMINATE: case I40IW_QP_STATE_CLOSING: @@ -2144,7 +2144,6 @@ static int i40iw_post_send(struct ib_qp *ibqp, switch (ib_wr->opcode) { case IB_WR_SEND: - /* fall-through */ case IB_WR_SEND_WITH_INV: if (ib_wr->opcode == IB_WR_SEND) { if (ib_wr->send_flags & IB_SEND_SOLICITED) @@ -2201,7 +2200,7 @@ static int i40iw_post_send(struct ib_qp *ibqp, break; case IB_WR_RDMA_READ_WITH_INV: inv_stag = true; - /* fall-through*/ + fallthrough; case IB_WR_RDMA_READ: if (ib_wr->num_sge > I40IW_MAX_SGE_RD) { err = -EINVAL; diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index f8b936b76dcd..8a3436994f80 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -765,13 +765,13 @@ repoll: switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { case MLX4_OPCODE_RDMA_WRITE_IMM: wc->wc_flags |= IB_WC_WITH_IMM; - /* fall through */ + fallthrough; case MLX4_OPCODE_RDMA_WRITE: wc->opcode = IB_WC_RDMA_WRITE; break; case MLX4_OPCODE_SEND_IMM: wc->wc_flags |= IB_WC_WITH_IMM; - /* fall through */ + fallthrough; case MLX4_OPCODE_SEND: case MLX4_OPCODE_SEND_INVAL: wc->opcode = IB_WC_SEND; diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c index d844831179cf..5e4ec9786081 100644 --- a/drivers/infiniband/hw/mlx4/mcg.c +++ b/drivers/infiniband/hw/mlx4/mcg.c @@ -944,7 +944,7 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, switch (sa_mad->mad_hdr.method) { case IB_MGMT_METHOD_SET: may_create = 1; - /* fall through */ + fallthrough; case IB_SA_METHOD_DELETE: req = kzalloc(sizeof *req, GFP_KERNEL); if (!req) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index f9ca6e000a81..2975f350b9fd 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1578,12 +1578,12 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, pd = to_mxrcd(init_attr->xrcd)->pd; xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq; - /* fall through */ + fallthrough; case IB_QPT_XRC_INI: if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) return ERR_PTR(-ENOSYS); init_attr->recv_cq = init_attr->send_cq; - /* fall through */ + fallthrough; case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_RAW_PACKET: @@ -1592,7 +1592,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, return ERR_PTR(-ENOMEM); qp->pri.vid = 0xFFFF; qp->alt.vid = 0xFFFF; - /* fall through */ + fallthrough; case IB_QPT_UD: { err = create_qp_common(pd, init_attr, udata, 0, &qp); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 0133ebb8d740..dceb0eb2bed1 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -121,13 +121,13 @@ static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { case MLX5_OPCODE_RDMA_WRITE_IMM: wc->wc_flags |= IB_WC_WITH_IMM; - /* fall through */ + fallthrough; case MLX5_OPCODE_RDMA_WRITE: wc->opcode = IB_WC_RDMA_WRITE; break; case MLX5_OPCODE_SEND_IMM: wc->wc_flags |= IB_WC_WITH_IMM; - /* fall through */ + fallthrough; case MLX5_OPCODE_SEND: case MLX5_OPCODE_SEND_INVAL: wc->opcode = IB_WC_SEND; diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 454ce5de2de7..9bb9bb058932 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -250,9 +250,8 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, if (MLX5_CAP_GEN(dev->mdev, vport_counters) && method == IB_MGMT_METHOD_GET) return process_pma_cmd(dev, port_num, in, out); - /* fallthrough */ + fallthrough; case MLX5_IB_VENDOR_CLASS1: - /* fallthrough */ case MLX5_IB_VENDOR_CLASS2: case IB_MGMT_CLASS_CONG_MGMT: { if (method != IB_MGMT_METHOD_GET && diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index fbc45a5e76c5..d60d63221b14 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2872,7 +2872,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work) break; case MLX5_EVENT_TYPE_GENERAL_EVENT: handle_general_event(ibdev, work->param, &ibev); - /* fall through */ + fallthrough; default: goto out; } diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index e51df89e4440..ee7265fc9237 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -416,7 +416,7 @@ static int sq_overhead(struct ib_qp_init_attr *attr) switch (attr->qp_type) { case IB_QPT_XRC_INI: size += sizeof(struct mlx5_wqe_xrc_seg); - /* fall through */ + fallthrough; case IB_QPT_RC: size += sizeof(struct mlx5_wqe_ctrl_seg) + max(sizeof(struct mlx5_wqe_atomic_seg) + @@ -441,7 +441,7 @@ static int sq_overhead(struct ib_qp_init_attr *attr) if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) size += sizeof(struct mlx5_wqe_eth_pad) + sizeof(struct mlx5_wqe_eth_seg); - /* fall through */ + fallthrough; case IB_QPT_SMI: case MLX5_IB_QPT_HW_GSI: size += sizeof(struct mlx5_wqe_ctrl_seg) + diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c index 0823c0bc7e73..f051f4e06b53 100644 --- a/drivers/infiniband/hw/mthca/mthca_av.c +++ b/drivers/infiniband/hw/mthca/mthca_av.c @@ -115,7 +115,7 @@ static u8 ib_rate_to_memfree(u8 req_rate, u8 cur_rate) switch ((cur_rate - 1) / req_rate) { case 0: return MTHCA_RATE_MEMFREE_FULL; case 1: return MTHCA_RATE_MEMFREE_HALF; - case 2: /* fall through */ + case 2: case 3: return MTHCA_RATE_MEMFREE_QUARTER; default: return MTHCA_RATE_MEMFREE_EIGHTH; } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 6cdbec13756a..c1751c9a0f62 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -2134,7 +2134,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, case IB_WR_SEND_WITH_IMM: hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); hdr->immdt = ntohl(wr->ex.imm_data); - /* fall through */ + fallthrough; case IB_WR_SEND: hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); ocrdma_build_send(qp, hdr, wr); @@ -2148,7 +2148,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, case IB_WR_RDMA_WRITE_WITH_IMM: hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); hdr->immdt = ntohl(wr->ex.imm_data); - /* fall through */ + fallthrough; case IB_WR_RDMA_WRITE: hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); status = ocrdma_build_write(qp, hdr, wr); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 4ce4e2eef6cc..b49bef94637e 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3528,7 +3528,7 @@ static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, break; case IB_WR_RDMA_READ_WITH_INV: SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1); - /* fallthrough -- same is identical to RDMA READ */ + fallthrough; /* same is identical to RDMA READ */ case IB_WR_RDMA_READ: wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD; diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index ca5ea734e3d0..44150be215bf 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -2973,11 +2973,11 @@ static u32 qib_6120_iblink_state(u64 ibcs) state = IB_PORT_ARMED; break; case IB_6120_L_STATE_ACTIVE: - /* fall through */ case IB_6120_L_STATE_ACT_DEFER: state = IB_PORT_ACTIVE; break; - default: /* fall through */ + default: + fallthrough; case IB_6120_L_STATE_DOWN: state = IB_PORT_DOWN; break; diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index ea3ddb05cbad..0a6f26d4cb31 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -3586,11 +3586,11 @@ static u32 qib_7220_iblink_state(u64 ibcs) state = IB_PORT_ARMED; break; case IB_7220_L_STATE_ACTIVE: - /* fall through */ case IB_7220_L_STATE_ACT_DEFER: state = IB_PORT_ACTIVE; break; - default: /* fall through */ + default: + fallthrough; case IB_7220_L_STATE_DOWN: state = IB_PORT_DOWN; break; diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 8bcbc884e5b6..a10eab89aee4 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -5508,11 +5508,11 @@ static u32 qib_7322_iblink_state(u64 ibcs) state = IB_PORT_ARMED; break; case IB_7322_L_STATE_ACTIVE: - /* fall through */ case IB_7322_L_STATE_ACT_DEFER: state = IB_PORT_ACTIVE; break; - default: /* fall through */ + default: + fallthrough; case IB_7322_L_STATE_DOWN: state = IB_PORT_DOWN; break; @@ -6533,7 +6533,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd) "Invalid num_vls %u, using 4 VLs\n", qib_num_cfg_vls); qib_num_cfg_vls = 4; - /* fall through */ + fallthrough; case 4: ppd->vls_supported = IB_VL_VL0_3; break; diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 79bb83222e8d..e7789e724f56 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -433,7 +433,7 @@ static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags) /* Bad mkey not a violation below level 2 */ if (ibp->rvp.mkeyprot < 2) break; - /* fall through */ + fallthrough; case IB_MGMT_METHOD_SET: case IB_MGMT_METHOD_TRAP_REPRESS: if (ibp->rvp.mkey_violations != 0xFFFF) @@ -828,7 +828,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, case IB_PORT_NOP: if (lstate == 0) break; - /* FALLTHROUGH */ + fallthrough; case IB_PORT_DOWN: if (lstate == 0) lstate = QIB_IB_LINKDOWN_ONLY; @@ -1928,7 +1928,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, ret = IB_MAD_RESULT_SUCCESS; goto bail; } - /* FALLTHROUGH */ + fallthrough; default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply(smp); @@ -1962,7 +1962,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, ret = IB_MAD_RESULT_SUCCESS; goto bail; } - /* FALLTHROUGH */ + fallthrough; default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply(smp); @@ -2322,7 +2322,7 @@ static int process_cc(struct ib_device *ibdev, int mad_flags, ret = cc_get_congestion_control_table(ccp, ibdev, port); goto bail; - /* FALLTHROUGH */ + fallthrough; default: ccp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) ccp); @@ -2339,7 +2339,7 @@ static int process_cc(struct ib_device *ibdev, int mad_flags, ret = cc_set_congestion_control_table(ccp, ibdev, port); goto bail; - /* FALLTHROUGH */ + fallthrough; default: ccp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) ccp); diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index aaf7438258fa..3915e5b4a9bc 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -83,7 +83,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } - /* FALLTHROUGH */ + fallthrough; case OP(ATOMIC_ACKNOWLEDGE): /* * We can increment the tail pointer now that the last @@ -92,7 +92,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, */ if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) qp->s_tail_ack_queue = 0; - /* FALLTHROUGH */ + fallthrough; case OP(SEND_ONLY): case OP(ACKNOWLEDGE): /* Check for no next entry in the queue. */ @@ -149,7 +149,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, case OP(RDMA_READ_RESPONSE_FIRST): qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_READ_RESPONSE_MIDDLE): qp->s_cur_sge = &qp->s_ack_rdma_sge; qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; @@ -471,10 +471,10 @@ no_flow_control: * See qib_restart_rc(). */ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); - /* FALLTHROUGH */ + fallthrough; case OP(SEND_FIRST): qp->s_state = OP(SEND_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(SEND_MIDDLE): bth2 = qp->s_psn++ & QIB_PSN_MASK; ss = &qp->s_sge; @@ -510,10 +510,10 @@ no_flow_control: * See qib_restart_rc(). */ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_WRITE_FIRST): qp->s_state = OP(RDMA_WRITE_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_WRITE_MIDDLE): bth2 = qp->s_psn++ & QIB_PSN_MASK; ss = &qp->s_sge; @@ -1807,7 +1807,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr, if (!ret) goto rnr_nak; qp->r_rcv_len = 0; - /* FALLTHROUGH */ + fallthrough; case OP(SEND_MIDDLE): case OP(RDMA_WRITE_MIDDLE): send_middle: @@ -1839,7 +1839,7 @@ send_middle: qp->r_rcv_len = 0; if (opcode == OP(SEND_ONLY)) goto no_immediate_data; - /* fall through -- for SEND_ONLY_WITH_IMMEDIATE */ + fallthrough; /* for SEND_ONLY_WITH_IMMEDIATE */ case OP(SEND_LAST_WITH_IMMEDIATE): send_last_imm: wc.ex.imm_data = ohdr->u.imm_data; diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c index 99e11c347130..8f8d61736656 100644 --- a/drivers/infiniband/hw/qib/qib_sdma.c +++ b/drivers/infiniband/hw/qib/qib_sdma.c @@ -763,7 +763,7 @@ void __qib_sdma_process_event(struct qib_pportdata *ppd, * bringing the link up with traffic active on * 7220, e.g. */ ss->go_s99_running = 1; - /* fall through -- and start dma engine */ + fallthrough; /* and start dma engine */ case qib_sdma_event_e10_go_hw_start: /* This reference means the state machine is started */ sdma_get(&ppd->sdma_state); diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index e17b91e2c22a..554af4273a13 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c @@ -161,7 +161,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) case OP(SEND_FIRST): qp->s_state = OP(SEND_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(SEND_MIDDLE): len = qp->s_len; if (len > pmtu) { @@ -185,7 +185,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) case OP(RDMA_WRITE_FIRST): qp->s_state = OP(RDMA_WRITE_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_WRITE_MIDDLE): len = qp->s_len; if (len > pmtu) { @@ -351,7 +351,7 @@ send_first: goto no_immediate_data; else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE)) goto send_last_imm; - /* FALLTHROUGH */ + fallthrough; case OP(SEND_MIDDLE): /* Check for invalid length PMTU or posted rwqe len. */ if (unlikely(tlen != (hdrsize + pmtu + 4))) @@ -440,7 +440,7 @@ rdma_first: wc.ex.imm_data = ohdr->u.rc.imm_data; goto rdma_last_imm; } - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_WRITE_MIDDLE): /* Check for invalid length PMTU or posted rwqe len. */ if (unlikely(tlen != (hdrsize + pmtu + 4))) diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 7acf9ba5358a..f6c01bad5a74 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -237,7 +237,7 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr, case IB_QPT_GSI: if (ib_qib_disable_sma) break; - /* FALLTHROUGH */ + fallthrough; case IB_QPT_UD: qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp); break; diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index 515460c4212d..462ed71abf53 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -120,7 +120,7 @@ static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev) IB_QPS_ERR, NULL); if (status) { - usnic_err("Failed to transistion qp grp %u from %s to %s\n", + usnic_err("Failed to transition qp grp %u from %s to %s\n", qp_grp->grp_id, usnic_ib_qp_grp_state_to_string (cur_state), diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index afcc2abcf55c..9a8f2a9507be 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -238,7 +238,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, ret = -EINVAL; goto err_qp; } - /* fall through */ + fallthrough; case IB_QPT_RC: case IB_QPT_UD: qp = kzalloc(sizeof(*qp), GFP_KERNEL); |