summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/qlogic/qed
diff options
context:
space:
mode:
authorRahul Verma <Rahul.Verma@cavium.com>2017-07-26 16:07:14 +0300
committerDavid S. Miller <davem@davemloft.net>2017-07-27 10:05:22 +0300
commitbf5a94bfe26a9fcd4af91ae6bccd4f3d600d2262 (patch)
tree10444bba0c94198edc3ffd48125b30ceec0d5fd0 /drivers/net/ethernet/qlogic/qed
parent477f2d1460a636abd08f03eafabe0c51366fa5de (diff)
downloadlinux-bf5a94bfe26a9fcd4af91ae6bccd4f3d600d2262.tar.xz
qed: Read per queue coalesce from hardware
Retrieve the actual coalesce value from hardware for every Rx/Tx queue, instead of Rx/Tx coalesce value cached during set coalesce. Signed-off-by: Rahul Verma <Rahul.Verma@cavium.com> Signed-off-by: Yuval Mintz <yuval.mintz@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed')
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c115
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c74
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h29
8 files changed, 280 insertions, 16 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index e6b3c83c5db8..defdda1ffaa2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -443,13 +443,25 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 id, bool is_vf);
/**
- * @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx or
- * Tx queue. We can configure coalescing to up to 511, but on
- * varying accuracy [the bigger the value the less accurate] up to a mistake
- * of 3usec for the highest values.
- * While the API allows setting coalescing per-qid, all queues sharing a SB
- * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
- * otherwise configuration would break.
+ * @brief qed_get_queue_coalesce - Retrieve coalesce value for a given queue.
+ *
+ * @param p_hwfn
+ * @param p_coal - store coalesce value read from the hardware.
+ * @param p_handle
+ *
+ * @return int
+ **/
+int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle);
+
+/**
+ * @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx and
+ * Tx queue. The fact that we can configure coalescing to up to 511, but on
+ * varying accuracy [the bigger the value the less accurate] up to a mistake
+ * of 3usec for the highest values.
+ * While the API allows setting coalescing per-qid, all queues sharing a SB
+ * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
+ * otherwise configuration would break.
+ *
*
* @param rx_coal - Rx Coalesce value in micro seconds.
* @param tx_coal - TX Coalesce value in micro seconds.
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 0ba5ec8a9814..9a1645852015 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -2047,6 +2047,106 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_queue_cid *p_cid, u16 *p_rx_coal)
+{
+ u32 coalesce, address, is_valid;
+ struct cau_sb_entry sb_entry;
+ u8 timer_res;
+ int rc;
+
+ rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+ p_cid->sb_igu_id * sizeof(u64),
+ (u64)(uintptr_t)&sb_entry, 2, 0);
+ if (rc) {
+ DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+ return rc;
+ }
+
+ timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
+
+ address = BAR0_MAP_REG_USDM_RAM +
+ USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ coalesce = qed_rd(p_hwfn, p_ptt, address);
+
+ is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
+ if (!is_valid)
+ return -EINVAL;
+
+ coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
+ *p_rx_coal = (u16)(coalesce << timer_res);
+
+ return 0;
+}
+
+int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_queue_cid *p_cid, u16 *p_tx_coal)
+{
+ u32 coalesce, address, is_valid;
+ struct cau_sb_entry sb_entry;
+ u8 timer_res;
+ int rc;
+
+ rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+ p_cid->sb_igu_id * sizeof(u64),
+ (u64)(uintptr_t)&sb_entry, 2, 0);
+ if (rc) {
+ DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+ return rc;
+ }
+
+ timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
+
+ address = BAR0_MAP_REG_XSDM_RAM +
+ XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ coalesce = qed_rd(p_hwfn, p_ptt, address);
+
+ is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
+ if (!is_valid)
+ return -EINVAL;
+
+ coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
+ *p_tx_coal = (u16)(coalesce << timer_res);
+
+ return 0;
+}
+
+int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle)
+{
+ struct qed_queue_cid *p_cid = handle;
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ if (IS_VF(p_hwfn->cdev)) {
+ rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
+
+ return rc;
+ }
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+
+ if (p_cid->b_is_rx) {
+ rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
+ if (rc)
+ goto out;
+ } else {
+ rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
+ if (rc)
+ goto out;
+ }
+
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
static int qed_fill_eth_dev_info(struct qed_dev *cdev,
struct qed_dev_eth_info *info)
{
@@ -2696,6 +2796,20 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
return rc;
}
+static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle)
+{
+ struct qed_queue_cid *p_cid = handle;
+ struct qed_hwfn *p_hwfn;
+ int rc;
+
+ p_hwfn = p_cid->p_owner;
+ rc = qed_get_queue_coalesce(p_hwfn, coal, handle);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Unable to read queue calescing\n");
+
+ return rc;
+}
+
static int qed_fp_cqe_completion(struct qed_dev *dev,
u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
{
@@ -2739,6 +2853,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
.tunn_config = &qed_tunn_configure,
.ntuple_filter_config = &qed_ntuple_arfs_filter_config,
.configure_arfs_searcher = &qed_configure_arfs_searcher,
+ .get_coalesce = &qed_get_coalesce,
};
const struct qed_eth_ops *qed_get_eth_ops(void)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 60ea72ce3e2c..cc1f248551c9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -407,4 +407,13 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn,
int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 coalesce, struct qed_queue_cid *p_cid);
-#endif /* _QED_L2_H */
+
+int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_queue_cid *p_cid, u16 *p_hw_coal);
+
+int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_queue_cid *p_cid, u16 *p_hw_coal);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 448810a235b8..27832885a87f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1568,12 +1568,6 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
return rc;
}
-static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
-{
- *rx_coal = cdev->rx_coalesce_usecs;
- *tx_coal = cdev->tx_coalesce_usecs;
-}
-
static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
void *handle)
{
@@ -1726,7 +1720,6 @@ const struct qed_common_ops qed_common_ops_pass = {
.chain_alloc = &qed_chain_alloc,
.chain_free = &qed_chain_free,
.nvm_get_image = &qed_nvm_get_image,
- .get_coalesce = &qed_get_coalesce,
.set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led,
.update_drv_state = &qed_update_drv_state,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 5feef783623b..3f40b1de7957 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -3400,6 +3400,75 @@ static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
length, status);
}
+static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf)
+{
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_read_coal_resp_tlv *p_resp;
+ struct vfpf_read_coal_req_tlv *req;
+ u8 status = PFVF_STATUS_FAILURE;
+ struct qed_vf_queue *p_queue;
+ struct qed_queue_cid *p_cid;
+ u16 coal = 0, qid, i;
+ bool b_is_rx;
+ int rc = 0;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+ req = &mbx->req_virt->read_coal_req;
+
+ qid = req->qid;
+ b_is_rx = req->is_rx ? true : false;
+
+ if (b_is_rx) {
+ if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid,
+ QED_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d]: Invalid Rx queue_id = %d\n",
+ p_vf->abs_vf_id, qid);
+ goto send_resp;
+ }
+
+ p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
+ rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
+ if (rc)
+ goto send_resp;
+ } else {
+ if (!qed_iov_validate_txq(p_hwfn, p_vf, qid,
+ QED_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d]: Invalid Tx queue_id = %d\n",
+ p_vf->abs_vf_id, qid);
+ goto send_resp;
+ }
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ p_queue = &p_vf->vf_queues[qid];
+ if ((!p_queue->cids[i].p_cid) ||
+ (!p_queue->cids[i].b_is_tx))
+ continue;
+
+ p_cid = p_queue->cids[i].p_cid;
+
+ rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
+ if (rc)
+ goto send_resp;
+ break;
+ }
+ }
+
+ status = PFVF_STATUS_SUCCESS;
+
+send_resp:
+ p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ,
+ sizeof(*p_resp));
+ p_resp->coal = coal;
+
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
+}
+
static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
@@ -3450,6 +3519,7 @@ static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
goto out;
}
+ vf->rx_coal = rx_coal;
}
if (tx_coal) {
@@ -3473,6 +3543,7 @@ static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
goto out;
}
}
+ vf->tx_coal = tx_coal;
}
status = PFVF_STATUS_SUCCESS;
@@ -3808,6 +3879,9 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
case CHANNEL_TLV_COALESCE_UPDATE:
qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
break;
+ case CHANNEL_TLV_COALESCE_READ:
+ qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
+ break;
}
} else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index c2e44bce398c..3955929ba892 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -217,6 +217,9 @@ struct qed_vf_info {
u8 num_rxqs;
u8 num_txqs;
+ u16 rx_coal;
+ u16 tx_coal;
+
u8 num_sbs;
u8 num_mac_filters;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 0a7bbc0f19b0..91b5e9f02a62 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -1343,6 +1343,37 @@ exit:
return rc;
}
+int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
+ u16 *p_coal, struct qed_queue_cid *p_cid)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_read_coal_resp_tlv *resp;
+ struct vfpf_read_coal_req_tlv *req;
+ int rc;
+
+ /* clear mailbox and prep header tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ, sizeof(*req));
+ req->qid = p_cid->rel.queue_id;
+ req->is_rx = p_cid->b_is_rx ? 1 : 0;
+
+ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+ resp = &p_iov->pf2vf_reply->read_coal_resp;
+
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ goto exit;
+
+ *p_coal = resp->coal;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
int
qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 2d9fdd62f56d..97d44dfb38ca 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -504,6 +504,20 @@ struct vfpf_update_coalesce {
u16 qid;
u8 padding[2];
};
+
+struct vfpf_read_coal_req_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 qid;
+ u8 is_rx;
+ u8 padding[5];
+};
+
+struct pfvf_read_coal_resp_tlv {
+ struct pfvf_tlv hdr;
+ u16 coal;
+ u8 padding[6];
+};
+
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
@@ -517,7 +531,7 @@ union vfpf_tlvs {
struct vfpf_ucast_filter_tlv ucast_filter;
struct vfpf_update_tunn_param_tlv tunn_param_update;
struct vfpf_update_coalesce update_coalesce;
- struct channel_list_end_tlv list_end;
+ struct vfpf_read_coal_req_tlv read_coal_req;
struct tlv_buffer_size tlv_buf_size;
};
@@ -527,6 +541,7 @@ union pfvf_tlvs {
struct tlv_buffer_size tlv_buf_size;
struct pfvf_start_queue_resp_tlv queue_start;
struct pfvf_update_tunn_param_tlv tunn_param_resp;
+ struct pfvf_read_coal_resp_tlv read_coal_resp;
};
enum qed_bulletin_bit {
@@ -634,6 +649,7 @@ enum {
CHANNEL_TLV_UPDATE_TUNN_PARAM,
CHANNEL_TLV_COALESCE_UPDATE,
CHANNEL_TLV_QID,
+ CHANNEL_TLV_COALESCE_READ,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
@@ -699,6 +715,17 @@ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
u16 rx_coal,
u16 tx_coal, struct qed_queue_cid *p_cid);
+/**
+ * @brief VF - Get coalesce per VF's relative queue.
+ *
+ * @param p_hwfn
+ * @param p_coal - coalesce value in micro second for VF queues.
+ * @param p_cid - queue cid
+ *
+ **/
+int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
+ u16 *p_coal, struct qed_queue_cid *p_cid);
+
#ifdef CONFIG_QED_SRIOV
/**
* @brief Read the VF bulletin and act on it if needed