diff options
Diffstat (limited to 'drivers/infiniband/hw/i40iw/i40iw_ctrl.c')
-rw-r--r-- | drivers/infiniband/hw/i40iw/i40iw_ctrl.c | 76 |
1 files changed, 62 insertions, 14 deletions
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index 42ca5346777d..d88c6cf47cf2 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -348,7 +348,10 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa u16 qs_handle; int i; - vsi->mss = l2params->mss; + if (vsi->mtu != l2params->mtu) { + vsi->mtu = l2params->mtu; + i40iw_reinitialize_ieq(dev); + } i40iw_fill_qos_list(l2params->qs_handle_list); for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) { @@ -374,7 +377,7 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa * i40iw_qp_rem_qos - remove qp from qos lists during destroy qp * @qp: qp to be removed from qos */ -static void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp) +void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp) { struct i40iw_sc_vsi *vsi = qp->vsi; unsigned long flags; @@ -479,6 +482,10 @@ static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp, I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size); cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0; cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0; + INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head); /* for the cqp commands backlog. */ + + i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPTAIL, 0); + i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, 0); i40iw_debug(cqp->dev, I40IW_DEBUG_WQE, "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n", @@ -1774,6 +1781,53 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq, info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE); info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA); info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW); + + switch (info->ae_id) { + case I40IW_AE_PRIV_OPERATION_DENIED: + case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG: + case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT: + case I40IW_AE_BAD_CLOSE: + case I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE: + case I40IW_AE_RDMA_READ_WHILE_ORD_ZERO: + case I40IW_AE_STAG_ZERO_INVALID: + case I40IW_AE_IB_RREQ_AND_Q1_FULL: + case I40IW_AE_WQE_UNEXPECTED_OPCODE: + case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION: + case I40IW_AE_DDP_UBE_INVALID_MO: + case I40IW_AE_DDP_UBE_INVALID_QN: + case I40IW_AE_DDP_NO_L_BIT: + case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: + case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE: + case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST: + case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: + case I40IW_AE_INVALID_ARP_ENTRY: + case I40IW_AE_INVALID_TCP_OPTION_RCVD: + case I40IW_AE_STALE_ARP_ENTRY: + case I40IW_AE_LLP_CLOSE_COMPLETE: + case I40IW_AE_LLP_CONNECTION_RESET: + case I40IW_AE_LLP_FIN_RECEIVED: + case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR: + case I40IW_AE_LLP_SEGMENT_TOO_SMALL: + case I40IW_AE_LLP_SYN_RECEIVED: + case I40IW_AE_LLP_TERMINATE_RECEIVED: + case I40IW_AE_LLP_TOO_MANY_RETRIES: + case I40IW_AE_LLP_DOUBT_REACHABILITY: + case I40IW_AE_RESET_SENT: + case I40IW_AE_TERMINATE_SENT: + case I40IW_AE_RESET_NOT_SENT: + case I40IW_AE_LCE_QP_CATASTROPHIC: + case I40IW_AE_QP_SUSPEND_COMPLETE: + info->qp = true; + info->compl_ctx = compl_ctx; + ae_src = I40IW_AE_SOURCE_RSVD; + break; + case I40IW_AE_LCE_CQ_CATASTROPHIC: + info->cq = true; + info->compl_ctx = LS_64_1(compl_ctx, 1); + ae_src = I40IW_AE_SOURCE_RSVD; + break; + } + switch (ae_src) { case I40IW_AE_SOURCE_RQ: case I40IW_AE_SOURCE_RQ_0011: @@ -1807,6 +1861,8 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq, info->compl_ctx = compl_ctx; info->out_rdrsp = true; break; + case I40IW_AE_SOURCE_RSVD: + /* fallthrough */ default: break; } @@ -2357,7 +2413,6 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp, qp->rcv_tph_en = info->rcv_tph_en; qp->xmit_tph_en = info->xmit_tph_en; qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle; - qp->exception_lan_queue = qp->pd->dev->exception_lan_queue; return 0; } @@ -2399,7 +2454,6 @@ static enum i40iw_status_code i40iw_sc_qp_create( LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) | LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) | - LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) | LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) | LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); @@ -2462,7 +2516,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify( LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) | LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) | LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | - LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) | LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) | LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) | LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) | @@ -2694,7 +2747,7 @@ static enum i40iw_status_code i40iw_sc_qp_setctx( LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) | LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) | LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) | - LS_64(qp->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE)); + LS_64(vsi->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE)); if (info->iwarp_info_valid) { qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) | @@ -4376,10 +4429,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp, i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP); break; - case I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH: - i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR, - (LAYER_MPA << 4) | DDP_LLP, MPA_MARKER); - break; case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR: i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, (LAYER_MPA << 4) | DDP_LLP, MPA_CRC); @@ -4395,7 +4444,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp, (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL); break; case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN: - case I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID: i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE); break; @@ -4541,7 +4589,8 @@ void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *inf vsi->dev = info->dev; vsi->back_vsi = info->back_vsi; - vsi->mss = info->params->mss; + vsi->mtu = info->params->mtu; + vsi->exception_lan_queue = info->exception_lan_queue; i40iw_fill_qos_list(info->params->qs_handle_list); for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) { @@ -4873,6 +4922,7 @@ enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40 vsi->pestat = info->pestat; vsi->pestat->hw = vsi->dev->hw; + vsi->pestat->vsi = vsi; if (info->stats_initialize) { i40iw_hw_stats_init(vsi->pestat, fcn_id, true); @@ -5018,14 +5068,12 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev, u8 db_size; spin_lock_init(&dev->cqp_lock); - INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for the cqp commands backlog. */ i40iw_device_init_uk(&dev->dev_uk); dev->debug_mask = info->debug_mask; dev->hmc_fn_id = info->hmc_fn_id; - dev->exception_lan_queue = info->exception_lan_queue; dev->is_pf = info->is_pf; dev->fpm_query_buf_pa = info->fpm_query_buf_pa; |